source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
rabit_tracker.py
|
"""
Tracker script for rabit
Implements the tracker control protocol
- start rabit jobs
- help nodes to establish links with each other
Tianqi Chen
"""
import sys
import os
import socket
import struct
import subprocess
import random
import time
from threading import Thread
"""
Extension of socket to handle recv and send of special data
"""
class ExSocket:
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
sock = self.sock
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return ''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s)
def recvstr(self):
slen = self.recvint()
return self.recvall(slen)
# magic number used to verify existence of data
kMagic = 0xff99
class SlaveEntry:
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = socket.gethostbyname(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for i in xrange(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class Tracker:
def __init__(self, port = 9091, port_end = 9999, verbose = True, hostIP = 'auto'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
break
except socket.error:
continue
sock.listen(128)
self.sock = sock
self.verbose = verbose
if hostIP == 'auto':
hostIP = 'ip'
self.hostIP = hostIP
self.log_print('start listen on %s:%d' % (socket.gethostname(), self.port), 1)
def __del__(self):
self.sock.close()
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
if self.hostIP == 'dns':
host = socket.gethostname()
elif self.hostIP == 'ip':
host = socket.gethostbyname(socket.getfqdn())
else:
host = self.hostIP
return {'rabit_tracker_uri': host,
'rabit_tracker_port': self.port}
def get_neighbor(self, rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank / 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) / 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def handle_print(self,slave, msg):
sys.stdout.write(msg)
def log_print(self, msg, level):
if level == 1:
if self.verbose:
sys.stderr.write(msg + '\n')
else:
sys.stderr.write(msg + '\n')
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
self.handle_print(s, msg)
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
self.log_print('Recieve %s signal from %d' % (s.cmd, s.rank), 1)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map == None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = range(nslave)
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key = lambda x : x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
self.log_print('Recieve %s signal from %s; assign rank %d' % (s.cmd, s.host, s.rank), 1)
if len(todo_nodes) == 0:
self.log_print('@tracker All of %d nodes getting started' % nslave, 2)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
self.log_print('Recieve %s signal from %d' % (s.cmd, s.rank), 1)
if s.wait_accept > 0:
wait_conn[rank] = s
self.log_print('@tracker All nodes finishes job', 2)
self.end_time = time.time()
self.log_print('@tracker %s secs between node start and job finish' % str(self.end_time - self.start_time), 2)
def submit(nslave, args, fun_submit, verbose, hostIP = 'auto'):
master = Tracker(verbose = verbose, hostIP = hostIP)
submit_thread = Thread(target = fun_submit, args = (nslave, args, master.slave_envs()))
submit_thread.daemon = True
submit_thread.start()
master.accept_slaves(nslave)
submit_thread.join()
|
metrics.py
|
import threading
import time
class Message:
def __init__(self, host, raw):
self.created = time.time()
self.host = host
self.raw = raw
if raw is None:
self.size = 0
else:
self.size = len(raw)
class MessageList:
def __init__(self):
self.bytes = 0
self.errors = 0
self.messages = []
class Totals:
def __init__(self):
self.received = MessageList()
self.sent = MessageList()
class Summary:
def __init__(self, cleaner_wait=1, timeout=60):
self.received = MessageList()
self.sent = MessageList()
self.timeout = timeout
self.runable = True
self.thread = threading.Thread(name="cleaner", target=self.cleaner)
self.thread.start()
self.totals = Totals()
def receive(self, message):
self.received.messages.append(message)
def send(self, message):
self.sent.messages.append(message)
def cleaner(self, wait=1):
while self.runable:
time.sleep(wait)
old = time.time() - self.timeout
if len(self.received.messages) > 0:
message = self.received.messages[0]
while message.created < old and len(self.received.messages):
self.received.messages.pop(0)
message = self.received.messages[0]
if len(self.sent.messages) > 0:
message = self.sent.messages[0]
while message.created < old and len(self.sent.messages):
self.sent.messages.pop(0)
message = self.sent.messages[0]
errors = 0
bytes = 0
for message in self.received.messages:
if message.size < 1:
errors += 1
bytes += message.size
self.received.errors = errors
self.received.bytes = bytes
errors = 0
bytes = 0
for message in self.sent.messages:
if message.size < 1:
errors += 1
bytes += message.size
self.sent.errors = errors
self.sent.bytes = bytes
def stop(self):
self.runable = False
|
ircbot.py
|
'''Todo:
* Add multiple thread support for async_process functions
* Potentially thread each handler function? idk
'''
import sys
import socket
import re
import threading
import logging
import time
if sys.hexversion < 0x03000000:
#Python 2
import Queue as queue
BlockingIOError = socket.error
else:
import queue
from .ircclient import IRCClient
#Somewhat complex regex that accurately matches nick!username@host, with named groups for easy parsing and usage
user_re = re.compile(r'(?P<nick>[\w\d<-\[\]\^\{\}]+)!(?P<user>[\w\d<-\[\]\^\{\}]+)@(?P<host>.+)')
class IRCBot(IRCClient):
'''See `IRCClient` for basic client usage, here is usage for the bot system
Handler notation:
on_join(self, nick, host, channel)
on_topic(self, nick, host, channel, topic)
on_part(self, nick, host, channel, message)
on_msg(self, nick, host, channel, message)
on_privmsg(self, nick, host, message)
on_chanmsg(self, nick, host, channel, message)
on_notice(self, nick, host, channel, message)
on_nick(self, nick, new_nick, host)
on_353(self, nick, host)
on_311() nick
on_330()
'''
_handlers = {
'join': [],
'part': [],
'kick': [],
'topic': [],
'msg': [],
'privmsg': [],
'chanmsg': [],
'notice': [],
'nick': [],
'whois_start': [], #whois start 353
'whois_logged': [], #whois is logged 330
'whois_end': [], #whois end 318
'acc': []
}
_process_thread = None
def _async_process(self):
while not self._stop_event.is_set():
time.sleep(0.01)
try:
args = self._in_queue.get_nowait()
#These "msg"s will be raw irc received lines, which have several forms
# basically, we should be looking for
# :User!Name@host COMMAND <ARGS>
logging.debug(args)
userhost = user_re.search(args[0][1:])
#print(args)
if userhost:
nick, host, user = userhost.groups()
command = args[1]
if command == 'JOIN':
channel = args[2][1:] #JOIN Channels are : prefixed
for handler in self._handlers['join']:
handler(self, nick, host, channel)
elif command == 'TOPIC':
channel = args[2]
topic = ' '.join(args[3:])
for handler in self._handlers['topic']:
handler(self, nick, host, channel, topic)
elif command == 'PART':
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['part']:
handler(self, nick, host, channel, message)
elif command == 'PRIVMSG':
channel = args[2]
message = ' '.join(args[3:])[1:]
for handler in self._handlers['msg']:
handler(self, nick, host, channel, message)
if channel[0] == '#':
#this is a channel
for handler in self._handlers['chanmsg']:
handler(self, nick, host, channel, message)
else:
#private message
for handler in self._handlers['privmsg']:
handler(self, nick, host, message)
elif command == 'KICK':
channel = args[2]
kicked_nick = args[3]
reason = ' '.join(args[4:])[1:]
for handler in self._handlers['kick']:
handler(self, nick, host, channel, kicked_nick, reason)
elif command == 'NICK':
new_nick = args[2][1:]
for handler in self._handlers['nick']:
handler(self, nick, new_nick, host)
elif command == 'NOTICE':
#:nick!user@host NOTICE <userchan> :message
channel = args[2]
if args[4] == 'ACC':
nick = args[3][1:]
code = args[5]
for handler in self._handlers['acc']:
handler(self, nick, code)
else:
message = ' '.join(args[3:])
for handler in self._handlers['notice']:
handler(self, nick, host, channel, message)
else:
logging.warning("Unhandled command %s" % command)
else: #whois replies
if args[1] == '311':
nick = args[3]
for handler in self._handlers['whois_start']:
handler(self, nick)
elif args[1] == '330':
nick = args[3]
msg = ' '.join(args[4:])
for handler in self._handlers['whois_logged']:
handler(self, nick, msg)
elif args[1] == '318':
nick = args[3]
for handler in self._handlers['whois_end']:
handler(self, nick)
self._in_queue.task_done()
except queue.Empty as e: pass
except Exception as e:
logging.debug(e.args)
def start(self):
IRCClient.start(self)
self._process_thread = threading.Thread(target=self._async_process)
self._process_thread.start()
def on(self, type):
'''Decorator function'''
def decorator(self, func):
'''decorated functions should be written as class methods
@on('join')
def on_join(self, channel):
print("Joined channel %s" % channel)
'''
self._handlers[type].append(func)
return func
return decorator
def on_acc(self, func):
self._handlers['acc'].append(func)
return func
def on_whois_start(self, func):
self._handlers['whois_start'].append(func)
return func
def on_whois_logged(self, func):
self._handlers['whois_logged'].append(func)
return func
def on_whois_end(self, func):
self._handlers['whois_end'].append(func)
return func
def on_join(self, func):
self._handlers['join'].append(func)
return func
def on_part(self, func):
self._handlers['part'].append(func)
return func
def on_kick(self, func):
self._handlers['kick'].append(func)
return func
def on_msg(self, func):
self._handlers['msg'].append(func)
return func
def on_privmsg(self, func):
self._handlers['privmsg'].append(func)
return func
def on_chanmsg(self, func):
self._handlers['chanmsg'].append(func)
return func
def on_notice(self, func):
self._handlers['notice'].append(func)
return func
def on_nick(self, func):
self._handlers['nick'].append(func)
return func
__all__ = ['IRCBot']
|
train.py
|
#!/usr/bin/env python
import os
import json
import tensorflow.contrib.slim as slim
import datetime
import random
import time
import argparse
import os
import threading
from scipy import misc
import tensorflow as tf
import numpy as np
from distutils.version import LooseVersion
if LooseVersion(tf.__version__) >= LooseVersion('1.0'):
rnn_cell = tf.contrib.rnn
else:
try:
from tensorflow.models.rnn import rnn_cell
except ImportError:
rnn_cell = tf.nn.rnn_cell
random.seed(0)
np.random.seed(0)
from tensorboxresnet.utils import train_utils, googlenet_load, tf_concat
def build_overfeat_inner(H, lstm_input):
'''
build simple overfeat decoder
'''
if H['rnn_len'] > 1:
raise ValueError('rnn_len > 1 only supported with use_lstm == True')
outputs = []
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('Overfeat', initializer=initializer):
w = tf.get_variable(
'ip', shape=[H['later_feat_channels'], H['lstm_size']]
)
outputs.append(tf.matmul(lstm_input, w))
return outputs
def deconv(x, output_shape, channels):
k_h = 2
k_w = 2
w = tf.get_variable(
'w_deconv',
initializer=tf.random_normal_initializer(stddev=0.01),
shape=[k_h, k_w, channels[1], channels[0]]
)
y = tf.nn.conv2d_transpose(
x, w, output_shape, strides=[1, k_h, k_w, 1], padding='VALID'
)
return y
def rezoom(
H, pred_boxes, early_feat, early_feat_channels, w_offsets, h_offsets
):
'''
Rezoom into a feature map at multiple interpolation points in a grid.
If the predicted object center is at X, len(w_offsets) == 3, and len(h_offsets) == 5,
the rezoom grid will look as follows:
[o o o]
[o o o]
[o X o]
[o o o]
[o o o]
Where each letter indexes into the feature map with bilinear interpolation
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
indices = []
for w_offset in w_offsets:
for h_offset in h_offsets:
indices.append(
train_utils.bilinear_select(
H, pred_boxes, early_feat, early_feat_channels, w_offset,
h_offset
)
)
interp_indices = tf_concat(0, indices)
rezoom_features = train_utils.interp(
early_feat, interp_indices, early_feat_channels
)
rezoom_features_r = tf.reshape(
rezoom_features, [
len(w_offsets) * len(h_offsets), outer_size, H['rnn_len'],
early_feat_channels
]
)
rezoom_features_t = tf.transpose(rezoom_features_r, [1, 2, 0, 3])
return tf.reshape(
rezoom_features_t, [
outer_size, H['rnn_len'],
len(w_offsets) * len(h_offsets) * early_feat_channels
]
)
def build_forward(H, x, phase, reuse):
'''
Construct the forward model
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
input_mean = 117.
x -= input_mean
cnn, early_feat = googlenet_load.model(x, H, reuse)
early_feat_channels = H['early_feat_channels']
early_feat = early_feat[:, :, :, :early_feat_channels]
if H['deconv']:
size = 3
stride = 2
pool_size = 5
with tf.variable_scope("deconv", reuse=reuse):
w = tf.get_variable(
'conv_pool_w',
shape=[
size, size, H['later_feat_channels'],
H['later_feat_channels']
],
initializer=tf.random_normal_initializer(stddev=0.01)
)
cnn_s = tf.nn.conv2d(
cnn, w, strides=[1, stride, stride, 1], padding='SAME'
)
cnn_s_pool = tf.nn.avg_pool(
cnn_s[:, :, :, :256],
ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1],
padding='SAME'
)
cnn_s_with_pool = tf_concat(3, [cnn_s_pool, cnn_s[:, :, :, 256:]])
cnn_deconv = deconv(
cnn_s_with_pool,
output_shape=[
H['batch_size'], H['grid_height'], H['grid_width'], 256
],
channels=[H['later_feat_channels'], 256]
)
cnn = tf_concat(3, (cnn_deconv, cnn[:, :, :, 256:]))
elif H['avg_pool_size'] > 1:
pool_size = H['avg_pool_size']
cnn1 = cnn[:, :, :, :700]
cnn2 = cnn[:, :, :, 700:]
cnn2 = tf.nn.avg_pool(
cnn2,
ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1],
padding='SAME'
)
cnn = tf_concat(3, [cnn1, cnn2])
cnn = tf.reshape(
cnn, [
H['batch_size'] * H['grid_width'] * H['grid_height'],
H['later_feat_channels']
]
)
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('decoder', reuse=reuse, initializer=initializer):
scale_down = 0.01
lstm_input = tf.reshape(
cnn * scale_down,
(H['batch_size'] * grid_size, H['later_feat_channels'])
)
if H['use_lstm']:
lstm_outputs = build_lstm_inner(H, lstm_input)
else:
lstm_outputs = build_overfeat_inner(H, lstm_input)
pred_boxes = []
pred_logits = []
for k in range(H['rnn_len']):
output = lstm_outputs[k]
if phase == 'train':
output = tf.nn.dropout(output, 0.5)
box_weights = tf.get_variable(
'box_ip%d' % k, shape=(H['lstm_size'], 4)
)
conf_weights = tf.get_variable(
'conf_ip%d' % k, shape=(H['lstm_size'], H['num_classes'])
)
pred_boxes_step = tf.reshape(
tf.matmul(output, box_weights) * 50, [outer_size, 1, 4]
)
pred_boxes.append(pred_boxes_step)
pred_logits.append(
tf.reshape(
tf.matmul(output, conf_weights),
[outer_size, 1, H['num_classes']]
)
)
pred_boxes = tf_concat(1, pred_boxes)
pred_logits = tf_concat(1, pred_logits)
pred_logits_squash = tf.reshape(
pred_logits, [outer_size * H['rnn_len'], H['num_classes']]
)
pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
pred_confidences = tf.reshape(
pred_confidences_squash,
[outer_size, H['rnn_len'], H['num_classes']]
)
if H['use_rezoom']:
pred_confs_deltas = []
pred_boxes_deltas = []
w_offsets = H['rezoom_w_coords']
h_offsets = H['rezoom_h_coords']
num_offsets = len(w_offsets) * len(h_offsets)
rezoom_features = rezoom(
H, pred_boxes, early_feat, early_feat_channels, w_offsets,
h_offsets
)
if phase == 'train':
rezoom_features = tf.nn.dropout(rezoom_features, 0.5)
for k in range(H['rnn_len']):
delta_features = tf_concat(
1, [lstm_outputs[k], rezoom_features[:, k, :] / 1000.]
)
dim = 128
delta_weights1 = tf.get_variable(
'delta_ip1%d' % k,
shape=[
H['lstm_size'] + early_feat_channels * num_offsets, dim
]
)
ip1 = tf.nn.relu(tf.matmul(delta_features, delta_weights1))
if phase == 'train':
ip1 = tf.nn.dropout(ip1, 0.5)
delta_confs_weights = tf.get_variable(
'delta_ip2%d' % k, shape=[dim, H['num_classes']]
)
if H['reregress']:
delta_boxes_weights = tf.get_variable(
'delta_ip_boxes%d' % k, shape=[dim, 4]
)
pred_boxes_deltas.append(
tf.reshape(
tf.matmul(ip1, delta_boxes_weights) * 5,
[outer_size, 1, 4]
)
)
scale = H.get('rezoom_conf_scale', 50)
pred_confs_deltas.append(
tf.reshape(
tf.matmul(ip1, delta_confs_weights) * scale,
[outer_size, 1, H['num_classes']]
)
)
pred_confs_deltas = tf_concat(1, pred_confs_deltas)
if H['reregress']:
pred_boxes_deltas = tf_concat(1, pred_boxes_deltas)
return pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas
return pred_boxes, pred_logits, pred_confidences
def build_forward_backward(H, x, phase, boxes, flags):
'''
Call build_forward() and then setup the loss functions
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
reuse = {'train': None, 'test': True}[phase]
if H['use_rezoom']:
(
pred_boxes, pred_logits, pred_confidences, pred_confs_deltas,
pred_boxes_deltas
) = build_forward(H, x, phase, reuse)
else:
pred_boxes, pred_logits, pred_confidences = build_forward(
H, x, phase, reuse
)
with tf.variable_scope(
'decoder', reuse={'train': None,
'test': True}[phase]
):
outer_boxes = tf.reshape(boxes, [outer_size, H['rnn_len'], 4])
outer_flags = tf.cast(
tf.reshape(flags, [outer_size, H['rnn_len']]), 'int32'
)
if H['use_lstm']:
hungarian_module = tf.load_op_library(
'utils/hungarian/hungarian.so'
)
assignments, classes, perm_truth, pred_mask = (
hungarian_module.hungarian(
pred_boxes, outer_boxes, outer_flags,
H['solver']['hungarian_iou']
)
)
else:
classes = tf.reshape(flags, (outer_size, 1))
perm_truth = tf.reshape(outer_boxes, (outer_size, 1, 4))
pred_mask = tf.reshape(
tf.cast(tf.greater(classes, 0), 'float32'), (outer_size, 1, 1)
)
true_classes = tf.reshape(
tf.cast(tf.greater(classes, 0), 'int64'),
[outer_size * H['rnn_len']]
)
pred_logit_r = tf.reshape(
pred_logits, [outer_size * H['rnn_len'], H['num_classes']]
)
confidences_loss = (
tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pred_logit_r, labels=true_classes
)
)
) / outer_size * H['solver']['head_weights'][0]
residual = tf.reshape(
perm_truth - pred_boxes * pred_mask, [outer_size, H['rnn_len'], 4]
)
boxes_loss = tf.reduce_sum(
tf.abs(residual)
) / outer_size * H['solver']['head_weights'][1]
if H['use_rezoom']:
if H['rezoom_change_loss'] == 'center':
error = (perm_truth[:, :, 0:2] - pred_boxes[:, :, 0:2]
) / tf.maximum(perm_truth[:, :, 2:4], 1.)
square_error = tf.reduce_sum(tf.square(error), 2)
inside = tf.reshape(
tf.to_int64(
tf.logical_and(
tf.less(square_error, 0.2**2),
tf.greater(classes, 0)
)
), [-1]
)
elif H['rezoom_change_loss'] == 'iou':
iou = train_utils.iou(
train_utils.to_x1y1x2y2(tf.reshape(pred_boxes, [-1, 4])),
train_utils.to_x1y1x2y2(tf.reshape(perm_truth, [-1, 4]))
)
inside = tf.reshape(tf.to_int64(tf.greater(iou, 0.5)), [-1])
else:
assert H['rezoom_change_loss'] == False
inside = tf.reshape(
tf.to_int64((tf.greater(classes, 0))), [-1]
)
new_confs = tf.reshape(
pred_confs_deltas,
[outer_size * H['rnn_len'], H['num_classes']]
)
delta_confs_loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=new_confs, labels=inside
)
) / outer_size * H['solver']['head_weights'][0] * 0.1
pred_logits_squash = tf.reshape(
new_confs, [outer_size * H['rnn_len'], H['num_classes']]
)
pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
pred_confidences = tf.reshape(
pred_confidences_squash,
[outer_size, H['rnn_len'], H['num_classes']]
)
loss = confidences_loss + boxes_loss + delta_confs_loss
if H['reregress']:
delta_residual = tf.reshape(
perm_truth - (pred_boxes + pred_boxes_deltas) * pred_mask,
[outer_size, H['rnn_len'], 4]
)
delta_boxes_loss = (
tf.reduce_sum(
tf.minimum(tf.square(delta_residual), 10.**2)
) / outer_size * H['solver']['head_weights'][1] * 0.03
)
boxes_loss = delta_boxes_loss
tf.summary.histogram(
phase + '/delta_hist0_x', pred_boxes_deltas[:, 0, 0]
)
tf.summary.histogram(
phase + '/delta_hist0_y', pred_boxes_deltas[:, 0, 1]
)
tf.summary.histogram(
phase + '/delta_hist0_w', pred_boxes_deltas[:, 0, 2]
)
tf.summary.histogram(
phase + '/delta_hist0_h', pred_boxes_deltas[:, 0, 3]
)
loss += delta_boxes_loss
else:
loss = confidences_loss + boxes_loss
return pred_boxes, pred_confidences, loss, confidences_loss, boxes_loss
def build(H, q):
'''
Build full model for training, including forward / backward passes,
optimizers, and summary statistics.
'''
arch = H
solver = H["solver"]
os.environ['CUDA_VISIBLE_DEVICES'] = str(solver.get('gpu', ''))
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
learning_rate = tf.placeholder(tf.float32)
if solver['opt'] == 'RMS':
opt = tf.train.RMSPropOptimizer(
learning_rate=learning_rate, decay=0.9, epsilon=solver['epsilon']
)
elif solver['opt'] == 'Adam':
opt = tf.train.AdamOptimizer(
learning_rate=learning_rate, epsilon=solver['epsilon']
)
elif solver['opt'] == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
else:
raise ValueError('Unrecognized opt type')
loss, accuracy, confidences_loss, boxes_loss = {}, {}, {}, {}
for phase in ['train', 'test']:
# generate predictions and losses from forward pass
x, confidences, boxes = q[phase].dequeue_many(arch['batch_size'])
flags = tf.argmax(confidences, 3)
grid_size = H['grid_width'] * H['grid_height']
(
pred_boxes, pred_confidences, loss[phase], confidences_loss[phase],
boxes_loss[phase]
) = build_forward_backward(H, x, phase, boxes, flags)
pred_confidences_r = tf.reshape(
pred_confidences,
[H['batch_size'], grid_size, H['rnn_len'], arch['num_classes']]
)
pred_boxes_r = tf.reshape(
pred_boxes, [H['batch_size'], grid_size, H['rnn_len'], 4]
)
# Set up summary operations for tensorboard
a = tf.equal(
tf.argmax(confidences[:, :, 0, :], 2),
tf.argmax(pred_confidences_r[:, :, 0, :], 2)
)
accuracy[phase] = tf.reduce_mean(
tf.cast(a, 'float32'), name=phase + '/accuracy'
)
if phase == 'train':
global_step = tf.Variable(0, trainable=False)
tvars = tf.trainable_variables()
if H['clip_norm'] <= 0:
grads = tf.gradients(loss['train'], tvars)
else:
grads, norm = tf.clip_by_global_norm(
tf.gradients(loss['train'], tvars), H['clip_norm']
)
train_op = opt.apply_gradients(
zip(grads, tvars), global_step=global_step
)
elif phase == 'test':
moving_avg = tf.train.ExponentialMovingAverage(0.95)
smooth_op = moving_avg.apply(
[
accuracy['train'],
accuracy['test'],
confidences_loss['train'],
boxes_loss['train'],
confidences_loss['test'],
boxes_loss['test'],
]
)
for p in ['train', 'test']:
tf.summary.scalar('%s/accuracy' % p, accuracy[p])
tf.summary.scalar(
'%s/accuracy/smooth' % p, moving_avg.average(accuracy[p])
)
tf.summary.scalar(
"%s/confidences_loss" % p, confidences_loss[p]
)
tf.summary.scalar(
"%s/confidences_loss/smooth" % p,
moving_avg.average(confidences_loss[p])
)
tf.summary.scalar("%s/regression_loss" % p, boxes_loss[p])
tf.summary.scalar(
"%s/regression_loss/smooth" % p,
moving_avg.average(boxes_loss[p])
)
if phase == 'test':
test_image = x
# show ground truth to verify labels are correct
test_true_confidences = confidences[0, :, :, :]
test_true_boxes = boxes[0, :, :, :]
# show predictions to visualize training progress
test_pred_confidences = pred_confidences_r[0, :, :, :]
test_pred_boxes = pred_boxes_r[0, :, :, :]
def log_image(
np_img, np_confidences, np_boxes, np_global_step, pred_or_true
):
if np_img.shape[2] == 4:
np_img = np_img[:, :, [0, 1, 3]]
merged = train_utils.add_rectangles(
H,
np_img,
np_confidences,
np_boxes,
use_stitching=True,
rnn_len=H['rnn_len']
)[0]
num_images = 5000
img_path = os.path.join(
H['save_dir'], '%s_%s.jpg' % (
(np_global_step / H['logging']['display_iter']
) % num_images, pred_or_true
)
)
imsave(img_path, merged)
return merged
pred_log_img = tf.py_func(
log_image, [
test_image, test_pred_confidences, test_pred_boxes,
global_step, 'pred'
], [tf.float32]
)
true_log_img = tf.py_func(
log_image, [
test_image, test_true_confidences, test_true_boxes,
global_step, 'true'
], [tf.float32]
)
tf.summary.image(
phase + '/pred_boxes', pred_log_img, max_outputs=10
)
tf.summary.image(
phase + '/true_boxes', true_log_img, max_outputs=10
)
summary_op = tf.summary.merge_all()
return (
config, loss, accuracy, summary_op, train_op, smooth_op, global_step,
learning_rate
)
def train(H, test_images):
'''
Setup computation graph, run 2 prefetch data threads, and then run the main loop
'''
if not os.path.exists(H['save_dir']): os.makedirs(H['save_dir'])
ckpt_file = H['save_dir'] + '/save.ckpt'
with open(H['save_dir'] + '/hypes.json', 'w') as f:
json.dump(H, f, indent=4)
x_in = tf.placeholder(tf.float32)
confs_in = tf.placeholder(tf.float32)
boxes_in = tf.placeholder(tf.float32)
q = {}
enqueue_op = {}
for phase in ['train', 'test']:
dtypes = [tf.float32, tf.float32, tf.float32]
grid_size = H['grid_width'] * H['grid_height']
channels = H.get('image_channels', 3)
print('Image channels: %d' % channels)
shapes = (
[H['image_height'], H['image_width'],
channels], [grid_size, H['rnn_len'], H['num_classes']],
[grid_size, H['rnn_len'], 4],
)
q[phase] = tf.FIFOQueue(capacity=30, dtypes=dtypes, shapes=shapes)
enqueue_op[phase] = q[phase].enqueue((x_in, confs_in, boxes_in))
def make_feed(d):
return {
x_in: d['image'],
confs_in: d['confs'],
boxes_in: d['boxes'],
learning_rate: H['solver']['learning_rate']
}
def thread_loop(sess, enqueue_op, phase, gen):
for d in gen:
sess.run(enqueue_op[phase], feed_dict=make_feed(d))
(
config, loss, accuracy, summary_op, train_op, smooth_op, global_step,
learning_rate
) = build(H, q)
saver = tf.train.Saver(max_to_keep=None)
writer = tf.summary.FileWriter(logdir=H['save_dir'], flush_secs=10)
with tf.Session(config=config) as sess:
tf.train.start_queue_runners(sess=sess)
for phase in ['train', 'test']:
# enqueue once manually to avoid thread start delay
gen = train_utils.load_data_gen(
H, phase, jitter=H['solver']['use_jitter']
)
d = next(gen)
sess.run(enqueue_op[phase], feed_dict=make_feed(d))
t = threading.Thread(
target=thread_loop, args=(sess, enqueue_op, phase, gen)
)
t.daemon = True
t.start()
tf.set_random_seed(H['solver']['rnd_seed'])
sess.run(tf.global_variables_initializer())
writer.add_graph(sess.graph)
weights_str = H['solver']['weights']
if len(weights_str) > 0:
print('Restoring from: %s' % weights_str)
saver.restore(sess, weights_str)
elif H['slim_ckpt'] == '':
sess.run(
tf.variables_initializer(
[
x for x in tf.global_variables()
if x.name.startswith(H['slim_basename']) and
H['solver']['opt'] not in x.name
]
)
)
else:
init_fn = slim.assign_from_checkpoint_fn(
'%s/data/%s' %
(os.path.dirname(os.path.realpath(__file__)),
H['slim_ckpt']), [
x for x in tf.global_variables()
if x.name.startswith(H['slim_basename']) and
H['solver']['opt'] not in x.name
]
)
init_fn(sess)
# train model for N iterations
start = time.time()
max_iter = H['solver'].get('max_iter', 10000000)
for i in range(max_iter):
display_iter = H['logging']['display_iter']
adjusted_lr = (
H['solver']['learning_rate'] * 0.5**
max(0, (i / H['solver']['learning_rate_step']) - 2)
)
lr_feed = {learning_rate: adjusted_lr}
if i % display_iter != 0:
# train network
batch_loss_train, _ = sess.run(
[loss['train'], train_op], feed_dict=lr_feed
)
else:
# test network every N iterations; log additional info
if i > 0:
dt = (time.time() - start
) / (H['batch_size'] * display_iter)
start = time.time()
(train_loss, test_accuracy, summary_str, _, _) = sess.run(
[
loss['train'],
accuracy['test'],
summary_op,
train_op,
smooth_op,
],
feed_dict=lr_feed
)
writer.add_summary(summary_str, global_step=global_step.eval())
print_str = ', '.join(
[
'Step: %d',
'lr: %f',
'Train Loss: %.2f',
'Softmax Test Accuracy: %.1f%%',
'Time/image (ms): %.1f',
]
)
print(
print_str % (
i, adjusted_lr, train_loss, test_accuracy * 100,
dt * 1000 if i > 0 else 0
)
)
if global_step.eval() % H['logging'][
'save_iter'
] == 0 or global_step.eval() == max_iter - 1:
saver.save(sess, ckpt_file, global_step=global_step)
def main():
'''
Parse command line arguments and return the hyperparameter dictionary H.
H first loads the --hypes hypes.json file and is further updated with
additional arguments as needed.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default=None, type=str)
parser.add_argument('--gpu', default=None, type=int)
parser.add_argument('--hypes', required=True, type=str)
parser.add_argument('--max_iter', required=False, type=int, default=None)
parser.add_argument('--logdir', default='output', type=str)
args = parser.parse_args()
with open(args.hypes, 'r') as f:
H = json.load(f)
if args.gpu is not None:
H['solver']['gpu'] = args.gpu
if args.max_iter is not None:
H['solver']['max_iter'] = args.max_iter
if len(H.get('exp_name', '')) == 0:
H['exp_name'] = args.hypes.split('/')[-1].replace('.json', '')
H['save_dir'] = args.logdir + '/%s_%s' % (
H['exp_name'], datetime.datetime.now().strftime('%Y_%m_%d_%H.%M')
)
if args.weights is not None:
H['solver']['weights'] = args.weights
train(H, test_images=[])
if __name__ == '__main__':
main()
|
imagewriter.py
|
import os
from queue import Queue
from threading import Thread
import cv2 as cv
import numpy as np
from vision.utils import box_utils_numpy as box_utils
class ImageWriter:
def __init__(
self,
video_path,
queue_sz,
output_path,
create_output_folder=True,
extension='png',
):
self.stopped = False
self.queue = Queue(maxsize=queue_sz)
self.frame_count = 0
self.ext = extension.lower()
self.out = output_path
if not os.path.isdir(self.out):
if create_output_folder:
os.makedirs(self.out)
else:
raise RuntimeError('The output folder is invalid!')
self.basename_template = (
os.path.basename(video_path).replace('.', '_')
+ '_frame_{}_box_{}.'
+ self.ext
)
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
while True:
if self.stopped:
return
img, boxes, ids = self.queue.get(block=True)
if boxes is None:
continue
for boxid, box in zip(ids, boxes):
box = box.astype(np.int)
if box_utils.area_of(box[:2], box[2:]) == 0:
continue
(xmin, ymin, xmax, ymax) = box.astype(np.int)
wagon = img[ymin:ymax, xmin:xmax, :]
full_path = os.path.join(
self.out, self.basename_template.format(self.frame_count, boxid)
)
cv.imwrite(full_path, wagon)
self.frame_count += 1
def stop(self):
self.stopped = True
def __call__(self, img, boxes, ids):
self.queue.put((img, boxes, ids), block=True)
|
updater_long.py
|
import os
import sys
import time
import sqlite3
import zipfile
import pythoncom
import pandas as pd
from PyQt5 import QtWidgets
from PyQt5.QAxContainer import QAxWidget
from multiprocessing import Process, Queue, Lock
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from login.manuallogin import find_window, manual_login
from utility.static import strf_time, now, telegram_msg
from utility.setting import openapi_path, sn_brrq, db_day, db_stg
app = QtWidgets.QApplication(sys.argv)
class UpdaterLong:
def __init__(self, gubun, queryQQ, lockk):
self.gubun = gubun
self.queryQ = queryQQ
self.lock = lockk
self.str_trname = None
self.str_tday = strf_time('%Y%m%d')
self.df_tr = None
self.dict_tritems = None
self.dict_bool = {
'로그인': False,
'TR수신': False
}
self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1')
self.ocx.OnEventConnect.connect(self.OnEventConnect)
self.ocx.OnReceiveTrData.connect(self.OnReceiveTrData)
self.Start()
def Start(self):
self.CommConnect()
self.Updater()
def CommConnect(self):
self.ocx.dynamicCall('CommConnect()')
while not self.dict_bool['로그인']:
pythoncom.PumpWaitingMessages()
def Updater(self):
con = sqlite3.connect(db_stg)
df = pd.read_sql('SELECT * FROM long', con)
con.close()
df = df.set_index('index')
codes = list(df.index)
codes = [code for i, code in enumerate(codes) if i % 4 == self.gubun]
count = len(codes)
for i, code in enumerate(codes):
time.sleep(3.6)
self.lock.acquire()
df = self.Block_Request('opt10081', 종목코드=code, 기준일자=self.str_tday, 수정주가구분=1,
output='주식일봉차트조회', next=0)
self.lock.release()
df = df.set_index('일자')
df[['현재가']] = df[['현재가']].astype(int).abs()
df = df[::-1]
if df['현재가'][-1] >= df['현재가'][-20:].mean() * 1.05:
prelong = 1
else:
prelong = 0
tc19 = df['현재가'][-19:].sum()
self.queryQ.put([code, prelong, tc19])
print(f'[{now()}] {self.gubun} 데이터 업데이트 중 ... [{i + 1}/{count}]')
if self.gubun == 3:
self.queryQ.put('업데이트완료')
sys.exit()
def OnEventConnect(self, err_code):
if err_code == 0:
self.dict_bool['로그인'] = True
def OnReceiveTrData(self, screen, rqname, trcode, record, nnext):
if screen == '' and record == '':
return
items = None
self.dict_bool['TR다음'] = True if nnext == '2' else False
for output in self.dict_tritems['output']:
record = list(output.keys())[0]
items = list(output.values())[0]
if record == self.str_trname:
break
rows = self.ocx.dynamicCall('GetRepeatCnt(QString, QString)', trcode, rqname)
if rows == 0:
rows = 1
df2 = []
for row in range(rows):
row_data = []
for item in items:
data = self.ocx.dynamicCall('GetCommData(QString, QString, int, QString)', trcode, rqname, row, item)
row_data.append(data.strip())
df2.append(row_data)
df = pd.DataFrame(data=df2, columns=items)
self.df_tr = df
self.dict_bool['TR수신'] = True
def Block_Request(self, *args, **kwargs):
trcode = args[0].lower()
liness = self.ReadEnc(trcode)
self.dict_tritems = self.ParseDat(trcode, liness)
self.str_trname = kwargs['output']
nnext = kwargs['next']
for i in kwargs:
if i.lower() != 'output' and i.lower() != 'next':
self.ocx.dynamicCall('SetInputValue(QString, QString)', i, kwargs[i])
self.dict_bool['TR수신'] = False
self.ocx.dynamicCall('CommRqData(QString, QString, int, QString)', self.str_trname, trcode, nnext, sn_brrq)
while not self.dict_bool['TR수신']:
pythoncom.PumpWaitingMessages()
return self.df_tr
# noinspection PyMethodMayBeStatic
def ReadEnc(self, trcode):
enc = zipfile.ZipFile(f'{openapi_path}/data/{trcode}.enc')
liness = enc.read(trcode.upper() + '.dat').decode('cp949')
return liness
# noinspection PyMethodMayBeStatic
def ParseDat(self, trcode, liness):
liness = liness.split('\n')
start = [i for i, x in enumerate(liness) if x.startswith('@START')]
end = [i for i, x in enumerate(liness) if x.startswith('@END')]
block = zip(start, end)
enc_data = {'trcode': trcode, 'input': [], 'output': []}
for start, end in block:
block_data = liness[start - 1:end + 1]
block_info = block_data[0]
block_type = 'input' if 'INPUT' in block_info else 'output'
record_line = block_data[1]
tokens = record_line.split('_')[1].strip()
record = tokens.split('=')[0]
fields = block_data[2:-1]
field_name = []
for line in fields:
field = line.split('=')[0].strip()
field_name.append(field)
fields = {record: field_name}
enc_data['input'].append(fields) if block_type == 'input' else enc_data['output'].append(fields)
return enc_data
class Query:
def __init__(self, queryQQ):
self.queryQ = queryQQ
self.con = sqlite3.connect(db_day)
self.Start()
def __del__(self):
self.con.close()
def Start(self):
df_long = pd.DataFrame(columns=['prelong', 'tc19'])
while True:
data = self.queryQ.get()
if data != '업데이트완료':
df_long.at[data[0]] = data[1], data[2]
else:
break
df_long[['prelong', 'tc19']] = df_long[['prelong', 'tc19']].astype(int)
con = sqlite3.connect(db_stg)
df = pd.read_sql('SELECT * FROM long', con)
df = df.set_index('index')
df['prelong'] = df_long['prelong']
df['tc19'] = df_long['tc19']
df.to_sql('long', con, if_exists='replace', chunksize=1000)
con.close()
telegram_msg('long DB를 업데이트하였습니다.')
sys.exit()
if __name__ == '__main__':
queryQ = Queue()
lock = Lock()
login_info = f'{openapi_path}/system/Autologin.dat'
if os.path.isfile(login_info):
os.remove(f'{openapi_path}/system/Autologin.dat')
Process(target=Query, args=(queryQ,)).start()
Process(target=UpdaterLong, args=(0, queryQ, lock)).start()
while find_window('Open API login') == 0:
time.sleep(1)
time.sleep(5)
manual_login(1)
while find_window('Open API login') != 0:
time.sleep(1)
Process(target=UpdaterLong, args=(1, queryQ, lock)).start()
while find_window('Open API login') == 0:
time.sleep(1)
time.sleep(5)
manual_login(2)
while find_window('Open API login') != 0:
time.sleep(1)
Process(target=UpdaterLong, args=(2, queryQ, lock)).start()
while find_window('Open API login') == 0:
time.sleep(1)
time.sleep(5)
manual_login(3)
while find_window('Open API login') != 0:
time.sleep(1)
Process(target=UpdaterLong, args=(3, queryQ, lock)).start()
while find_window('Open API login') == 0:
time.sleep(1)
time.sleep(5)
manual_login(4)
while find_window('Open API login') != 0:
time.sleep(1)
|
test_event_ping_no_response.py
|
# -*- coding: utf-8 -*-
__author__ = 'Marcin Usielski'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = 'marcin.usielski@nokia.com'
import time
from moler.events.unix.ping_no_response import PingNoResponse
from moler.util.moler_test import MolerTest
import datetime
def test_event_ping_no_response(buffer_connection):
counter = dict()
counter['nr'] = 0
sleep_time = 0.4
max_timeout = 5.0
def callback_fun(param):
param['nr'] += 1
output = "From 192.168.255.126 icmp_seq=1 Destination Host Unreachable"
event = PingNoResponse(connection=buffer_connection.moler_connection, till_occurs_times=2)
event.add_event_occurred_callback(callback=callback_fun, callback_params={'param': counter})
assert 0 == counter['nr']
event.start()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
start_time = time.time()
while time.time() - start_time <= max_timeout:
if 1 == counter['nr']:
break
MolerTest.sleep(sleep_time)
assert 1 == counter['nr']
event.pause()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
assert 1 == counter['nr']
event.resume()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
event.await_done()
start_time = time.time()
while time.time() - start_time <= max_timeout:
if 2 == counter['nr']:
break
MolerTest.sleep(sleep_time)
assert 2 == counter['nr']
assert event.done() is True
def test_erase_not_full_line_on_pause(buffer_connection):
output = "From 192.168.255.126 icmp_seq=1 Destination Host Unreachable"
sleep_time = 0.0005
processed = {'process': 0}
class PingNoResponseDelay(PingNoResponse):
def _process_line_from_output(self, current_chunk, line, is_full_line):
processed['process'] += 1
MolerTest.sleep(seconds=sleep_time)
super(PingNoResponseDelay, self)._process_line_from_output(current_chunk=current_chunk,
line=line, is_full_line=is_full_line)
event = PingNoResponseDelay(connection=buffer_connection.moler_connection, till_occurs_times=2)
event.start()
run = True
def feed_in_separate_thread():
while run:
buffer_connection.moler_connection.data_received("abcde\nfghi\njkl".encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time/10)
from threading import Thread
tf = Thread(target=feed_in_separate_thread)
tf.setDaemon(True)
tf.start()
start_time = time.time()
while time.time() - start_time < 4 or processed['process'] < 300:
event.pause()
MolerTest.sleep(sleep_time)
event.resume()
MolerTest.sleep(sleep_time)
event.resume()
run = False
MolerTest.sleep(0.2)
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
event.await_done(timeout=1)
assert event.done() is True
|
queue.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
* @ Scout for Python
##############################################################################
# Author: YWJT / ZhiQiang Koo #
# Modify: 2020-03-13 #
##############################################################################
# This program is distributed under the "Artistic License" Agreement #
# The LICENSE file is located in the same directory as this program. Please #
# read the LICENSE file before you make copies or distribute this program #
##############################################################################
"""
import sys
sys.path.append("..")
import os
import time
import psutil
import time
from threading import Thread
from multiprocessing import Process
from multiprocessing import Queue
from collections import deque
from cache.cache import CacheServer
from base import Loger
from base import ScoutBase
class PQueues(ScoutBase):
def __init__(self):
ScoutBase.__init__(self)
self.TCP_DQ = deque(maxlen=500)
self.UDP_DQ = deque(maxlen=500)
"""Instant a CacheServer
exptime:
expireAfterSeconds: <int> Used to create an expiring (TTL) collection.
MongoDB will automatically delete documents from this collection after <int> seconds.
The indexed field must be a UTC datetime or the data will not expire.
"""
__Cache=CacheServer().create_or_connect_cache()
self.TCP=__Cache["TCP"]
self.UDP=__Cache["UDP"]
CacheServer().create_index(self.TCP, "exptime", self.avr['expire_after_seconds'])
CacheServer().create_index(self.UDP, "exptime", self.avr['expire_after_seconds'])
def Qset(self, q=None):
self.q = q
def saveCache(self, bolt, stdout):
try:
obj = getattr(self, str(bolt))
if type(stdout)==list:
CacheServer().insert_many(obj, stdout)
else:
CacheServer().insert_one(obj, stdout)
except Exception as e:
Loger().ERROR("no collection name is %s, Error: %s" % (str(stdout["proto"]), str(e)))
pass
def Qpush(self, value):
self.q.put(value)
def Qdeque(self, dq, collection, value):
if len(dq) == dq.maxlen:
self.saveCache(collection, list(dq))
dq.clear()
time.sleep(0.1)
dq.append(value)
def Qsave(self):
while 1:
DQ=self.q.get()
if DQ:
_dq_handle = getattr(self, "%s_DQ" % str(DQ["proto"]))
self.Qdeque(_dq_handle, DQ["proto"], DQ)
else:
time.sleep(1)
def createThread(self, func, *args):
t = Thread(target=func, args=(args))
t.start()
return t
def createProcess(self, func, *args):
p = Process(target=func, args=(args))
p.start()
return p
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
if '__pypy__' in sys.builtin_module_names:
raise ImportError # don't use ctypes, missing ctypes.resize()
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(os.fsencode(support.TESTFN)))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
@support.impl_detail("PyPy does not call __del__ at shutdown", pypy=False)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.gollect() seems to be enough to
# work around all these issues.
support.gc_collect()
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
vectorized_env_executor.py
|
import numpy as np
import pickle as pickle
from multiprocessing import Process, Pipe
import copy
from simAdapter import terrainRLSim
from meta_policy_search.envs.normalized_env import normalize
class MetaIterativeEnvExecutor(object):
"""
Wraps multiple environments of the same kind and provides functionality to reset / step the environments
in a vectorized manner. Internally, the environments are executed iteratively.
Args:
env (meta_policy_search.envs.base.MetaEnv): meta environment object
meta_batch_size (int): number of meta tasks
envs_per_task (int): number of environments per meta task
max_path_length (int): maximum length of sampled environment paths - if the max_path_length is reached,
the respective environment is reset
"""
def __init__(self, env, meta_batch_size, envs_per_task, max_path_length):
self.envs = []
print ("env:", env)
sys.exit()
if (env is None):
for _ in range(meta_batch_size * envs_per_task):
env = terrainRLSim.getEnv(env_name="PD_Humanoid_3D_GRF_Mixed_1Sub_Imitate_30FPS_DenseState_v0", render=True)
# env = globals()[config['env']]() # instantiate env
env = normalize(env) # apply normalize wrapper to env
self.envs.append(env)
else:
self.envs = np.asarray([copy.deepcopy(env) for _ in range(meta_batch_size * envs_per_task)])
self.ts = np.zeros(len(self.envs), dtype='int') # time steps
self.max_path_length = max_path_length
def step(self, actions):
"""
Steps the wrapped environments with the provided actions
Args:
actions (list): lists of actions, of length meta_batch_size x envs_per_task
Returns
(tuple): a length 4 tuple of lists, containing obs (np.array), rewards (float), dones (bool),
env_infos (dict). Each list is of length meta_batch_size x envs_per_task
(assumes that every task has same number of envs)
"""
assert len(actions) == self.num_envs
all_results = [env.step(a) for (a, env) in zip(actions, self.envs)]
# stack results split to obs, rewards, ...
obs, rewards, dones, env_infos = list(map(list, zip(*all_results)))
# reset env when done or max_path_length reached
dones = np.asarray(dones)
self.ts += 1
dones = np.logical_or(self.ts >= self.max_path_length, dones)
for i in np.argwhere(dones).flatten():
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return obs, rewards, dones, env_infos
def set_tasks(self, tasks):
"""
Sets a list of tasks to each environment
Args:
tasks (list): list of the tasks for each environment
"""
envs_per_task = np.split(self.envs, len(tasks))
for task, envs in zip(tasks, envs_per_task):
for env in envs:
env.set_task(task)
def reset(self):
"""
Resets the environments
Returns:
(list): list of (np.ndarray) with the new initial observations.
"""
obses = [env.reset() for env in self.envs]
self.ts[:] = 0
return obses
@property
def num_envs(self):
"""
Number of environments
Returns:
(int): number of environments
"""
return len(self.envs)
class MetaParallelEnvExecutor(object):
"""
Wraps multiple environments of the same kind and provides functionality to reset / step the environments
in a vectorized manner. Thereby the environments are distributed among meta_batch_size processes and
executed in parallel.
Args:
env (meta_policy_search.envs.base.MetaEnv): meta environment object
meta_batch_size (int): number of meta tasks
envs_per_task (int): number of environments per meta task
max_path_length (int): maximum length of sampled environment paths - if the max_path_length is reached,
the respective environment is reset
"""
def __init__(self, env, meta_batch_size, envs_per_task, max_path_length):
self.n_envs = meta_batch_size * envs_per_task
self.meta_batch_size = meta_batch_size
print
self.envs_per_task = envs_per_task
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(meta_batch_size)])
seeds = np.random.choice(range(10**6), size=meta_batch_size, replace=False)
# print ("Env:", env)
self.ps = [
Process(target=worker, args=(work_remote, remote, pickle.dumps(env), envs_per_task, max_path_length, seed))
for (work_remote, remote, seed) in zip(self.work_remotes, self.remotes, seeds)] # Why pass work remotes?
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
def step(self, actions):
"""
Executes actions on each env
Args:
actions (list): lists of actions, of length meta_batch_size x envs_per_task
Returns
(tuple): a length 4 tuple of lists, containing obs (np.array), rewards (float), dones (bool), env_infos (dict)
each list is of length meta_batch_size x envs_per_task (assumes that every task has same number of envs)
"""
assert len(actions) == self.num_envs
# split list of actions in list of list of actions per meta tasks
chunks = lambda l, n: [l[x: x + n] for x in range(0, len(l), n)]
actions_per_meta_task = chunks(actions, self.envs_per_task)
# step remote environments
for remote, action_list in zip(self.remotes, actions_per_meta_task):
remote.send(('step', action_list))
results = [remote.recv() for remote in self.remotes]
obs, rewards, dones, env_infos = map(lambda x: sum(x, []), zip(*results))
return obs, rewards, dones, env_infos
def reset(self):
"""
Resets the environments of each worker
Returns:
(list): list of (np.ndarray) with the new initial observations.
"""
for remote in self.remotes:
remote.send(('reset', None))
return sum([remote.recv() for remote in self.remotes], [])
def set_tasks(self, tasks=None):
"""
Sets a list of tasks to each worker
Args:
tasks (list): list of the tasks for each worker
"""
for remote, task in zip(self.remotes, tasks):
remote.send(('set_task', task))
for remote in self.remotes:
remote.recv()
@property
def num_envs(self):
"""
Number of environments
Returns:
(int): number of environments
"""
return self.n_envs
def worker(remote, parent_remote, env_pickle, n_envs, max_path_length, seed):
"""
Instantiation of a parallel worker for collecting samples. It loops continually checking the task that the remote
sends to it.
Args:
remote (multiprocessing.Connection):
parent_remote (multiprocessing.Connection):
env_pickle (pkl): pickled environment
n_envs (int): number of environments per worker
max_path_length (int): maximum path length of the task
seed (int): random seed for the worker
"""
parent_remote.close()
# print ("env_pickle: ", env_pickle)
# sys.exit()
envs = []
env_ = pickle.loads(env_pickle)
if type(env_) is tuple:
for _ in range(n_envs):
if (env_[0] == 'terrianrlSim'):
env = terrainRLSim.getEnv(env_name=env_[1], render=False)
# env = globals()[config['env']]() # instantiate env
env = normalize(env) # apply normalize wrapper to env
envs.append(env)
else:
envs = [pickle.loads(env_pickle) for _ in range(n_envs)]
np.random.seed(seed)
ts = np.zeros(n_envs, dtype='int')
while True:
# receive command and data from the remote
cmd, data = remote.recv()
# do a step in each of the environment of the worker
if cmd == 'step':
all_results = [env.step(a) for (a, env) in zip(data, envs)]
obs, rewards, dones, infos = map(list, zip(*all_results))
ts += 1
for i in range(n_envs):
if dones[i] or (ts[i] >= max_path_length):
dones[i] = True
obs[i] = envs[i].reset()
ts[i] = 0
remote.send((obs, rewards, dones, infos))
# reset all the environments of the worker
elif cmd == 'reset':
obs = [env.reset() for env in envs]
ts[:] = 0
remote.send(obs)
# set the specified task for each of the environments of the worker
elif cmd == 'set_task':
for env in envs:
env.set_task(data)
remote.send(None)
# close the remote and stop the worker
elif cmd == 'close':
remote.close()
break
else:
raise NotImplementedError
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import csv
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a) in [3,4]])
class CryptoCompare(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('min-api.cryptocompare.com',
"/data/pricehistorical?fsym=ZCL&tsyms=USD")
return {'USD': Decimal(json['ZCL']['USD'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
# Use when dynamic fetching is needed
return self.config.get('currency', 'USD')
def config_exchange(self):
return self.config.get('use_exchange', 'CryptoCompare')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, CryptoCompare)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No exchange rate available)") if rate is None else " 1 %s=%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
main.py
|
import sys
import os
import shutil
import random
from multiprocessing import Process, Manager
import pandas as pd
from parse import *
from pyswip import Prolog
from commits import *
APP_NAME = "museuMonitor"
DAP = "dap(A,P,R,Infs,Time).".format(APP_NAME)
DAP_CR = "dapCR(A,NP,NR,Infs,Time).".format(APP_NAME)
INFR_FILE = "code/infra.pl"
APP_FILE = "code/app.pl"
NODE = "node({}, {}, ({}, {}, {}), {}, {}).\n"
LINK = "link({}, {}, {}, {}).\n"
HW_CAPS = {}
QOS_CAPS = {}
FAIL_PROB = 0.02
SEED = 33
def my_query(s, p):
q = p.query(s)
result = next(q)
return result
def no_cr_process(res):
p = Prolog()
p.consult("code/dap.pl")
try:
no_cr = my_query(DAP, p)
res.append({"Infs": no_cr["Infs"], "Time": no_cr["Time"]})
except StopIteration:
print(" - fault")
res.append({})
def fail():
return random.random() < FAIL_PROB
def save_caps():
global HW_CAPS, QOS_CAPS
f_r = open(INFR_FILE, "r")
infr = f_r.readlines()
nodes = [i for i in infr if i.startswith("node(")]
links = [i for i in infr if i.startswith("link(")]
for n in nodes:
name, _, _, ram, hdd, _, _ = parse(NODE, n)
HW_CAPS[name] = {'ram': float(ram), 'hdd': float(hdd)}
for ln in links:
n1, n2, lat, bw = parse(LINK, ln)
QOS_CAPS[(n1, n2)] = {'lat': float(lat), 'bw': float(bw)}
def change_nodes(nodes, prob):
res = []
for n in nodes:
if random.random() < prob:
name, sw, cpu, _, _, sec, iot = parse(NODE, n)
ram = HW_CAPS[name]['ram']
hdd = HW_CAPS[name]['hdd']
new_ram = round(random.uniform(ram//10, ram * 1.1), 2) if not fail() else 0
new_hdd = round(random.uniform(hdd//10, hdd * 1.2), 2) if not fail() else 0
n = NODE.format(name, sw, cpu, new_ram, new_hdd, sec, iot)
res.append(n)
return res
def change_links(links, prob):
res = []
for ln in links:
if random.random() < prob:
n1, n2, _, _ = parse(LINK, ln)
lat = QOS_CAPS[(n1, n2)]['lat']
bw = QOS_CAPS[(n1, n2)]['bw']
new_lat = round(random.uniform(lat // 2, lat * 1.5), 2) if not fail() else 1000
new_bw = round(random.uniform(bw // 2, bw * 1.1), 2) if not fail() else 0
ln = LINK.format(n1, n2, new_lat, new_bw)
res.append(ln)
return res
def change_infr(prob):
f_r = open(INFR_FILE, "r")
infr = f_r.readlines()
nodes = [i for i in infr if i.startswith("node(")]
links = [i for i in infr if i.startswith("link(")]
for n in nodes:
infr.remove(n)
for ln in links:
infr.remove(ln)
f_r.close()
new_nodes = change_nodes(nodes, prob)
new_links = change_links(links, prob)
infr.extend(new_nodes)
infr.extend(new_links)
f_w = open(INFR_FILE, "w")
f_w.writelines(infr)
f_w.close()
def change_app(epoch):
if epoch % 100 == 0:
commit_no = epoch // 100
eval("commit_{}()".format(commit_no))
def get_data(n):
if os.path.exists(APP_FILE):
os.remove(APP_FILE)
if os.path.exists(INFR_FILE):
os.remove(INFR_FILE)
shutil.copy("infra/app.pl", APP_FILE)
shutil.copy(f"infra/infra_{n}.pl", INFR_FILE)
def main(n=16, prob=0.1, epochs=600):
mgr = Manager()
res_no_cr = mgr.list()
res_cr = []
# preliminary operation
random.seed(SEED)
get_data(n)
p_cr = Prolog()
p_cr.consult("code/dap2.pl")
i = 1
while i <= epochs:
if i == 1:
save_caps()
# NO CR PROCESS
process = Process(target=no_cr_process, args=(res_no_cr,))
process.start()
process.join()
# CR PROCESS
try:
my_query("make", p_cr)
cr = my_query(DAP_CR, p_cr)
res_cr.append({"InfsCR": cr["Infs"], "TimeCR": cr["Time"]})
except StopIteration:
print("CR fault")
res_cr.append({})
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write("Done {}/{}".format(i, epochs))
i += 1
change_app(i)
change_infr(prob)
print("\n")
res_no_cr = list(res_no_cr)
df_no_cr = pd.DataFrame.from_records(res_no_cr, columns=["Infs", "Time"])
df_cr = pd.DataFrame.from_records(res_cr, columns=["InfsCR", "TimeCR"])
df = pd.concat([df_no_cr, df_cr], axis=1, ignore_index=True)
df.columns = ["Infs", "Time", "InfsCR", "TimeCR"]
avg_time = df['Time'].mean()
avg_time_cr = df['TimeCR'].mean()
avg_infs = df['Infs'].mean()
avg_infs_cr = df['InfsCR'].mean()
print("Done for {} prob\n".format(prob))
print("TIME: {:.6f} \t TIME_CR: {:.6f}".format(avg_time, avg_time_cr))
print("INFS: {} \t INFS_CR: {}".format(avg_infs, avg_infs_cr))
filename = "results_{}_{}.csv".format(n, prob)
df.index.name = "Epoch"
df.to_csv("csv/{}".format(filename))
if __name__ == '__main__':
PROBS = [0.1, 0.2, 0.4, 0.5]
NODES = [16, 32, 64, 128, 256, 512]
for n in NODES:
for p in PROBS:
main(n=n, prob=p, epochs=600)
|
vegeta_stress.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.network
import infra.e2e_args
import subprocess
import threading
import time
import generate_vegeta_targets as TargetGenerator
from loguru import logger as LOG
VEGETA_BIN = "/opt/vegeta/vegeta"
def print_memory_stats(node, shutdown_event):
with node.client() as c:
while not shutdown_event.is_set():
r = c.get("/node/memory")
LOG.warning(r.body.json())
time.sleep(10)
def run(args, additional_attack_args):
# Test that vegeta is available
subprocess.run([VEGETA_BIN, "-version"], capture_output=True, check=True)
with infra.network.network(
args.nodes,
args.binary_dir,
args.debug_nodes,
args.perf_nodes,
pdb=args.pdb,
) as network:
network.start_and_join(args)
primary, _ = network.find_primary()
primary_hostname = f"{primary.pubhost}:{primary.pubport}"
vegeta_targets = "vegeta_targets"
with open(vegeta_targets, "w") as f:
for i in range(10):
TargetGenerator.write_vegeta_target_line(
f,
primary_hostname,
"/app/log/private",
body={"id": i, "msg": f"Private message: {i}"},
)
for i in range(10):
TargetGenerator.write_vegeta_target_line(
f, primary_hostname, f"/app/log/private?id={i}", method="GET"
)
for i in range(10):
TargetGenerator.write_vegeta_target_line(
f,
primary_hostname,
"/app/log/public",
body={"id": i, "msg": f"Public message: {i}"},
)
for i in range(10):
TargetGenerator.write_vegeta_target_line(
f, primary_hostname, f"/app/log/public?id={i}", method="GET"
)
attack_cmd = [VEGETA_BIN, "attack"]
attack_cmd += ["--targets", vegeta_targets]
attack_cmd += ["--format", "json"]
attack_cmd += ["--duration", "10s"]
sa = primary.session_auth("user0")
attack_cmd += ["--cert", sa["session_auth"].cert]
attack_cmd += ["--key", sa["session_auth"].key]
attack_cmd += ["--root-certs", sa["ca"]]
attack_cmd += additional_attack_args
attack_cmd_s = " ".join(attack_cmd)
LOG.warning(f"Starting: {attack_cmd_s}")
vegeta_run = subprocess.Popen(attack_cmd, stdout=subprocess.PIPE)
tee_split = subprocess.Popen(
["tee", "vegeta_results.bin"],
stdin=vegeta_run.stdout,
stdout=subprocess.PIPE,
)
report_cmd = [VEGETA_BIN, "report", "--every", "5s"]
vegeta_report = subprocess.Popen(report_cmd, stdin=tee_split.stdout)
# Start a second thread which will print the primary's memory stats at regular intervals
shutdown_event = threading.Event()
memory_thread = threading.Thread(
target=print_memory_stats, args=(primary, shutdown_event)
)
memory_thread.start()
LOG.info("Waiting for completion...")
vegeta_report.communicate()
LOG.info("Shutting down...")
shutdown_event.set()
memory_thread.join()
LOG.success("Done!")
if __name__ == "__main__":
def add(parser):
pass
args, unknown_args = infra.e2e_args.cli_args(add=add, accept_unknown=True)
args.package = "liblogging"
args.nodes = infra.e2e_args.min_nodes(args, f=1)
run(args, unknown_args)
|
main_test.py
|
import unittest
import multiprocessing
import time
import server, client
class MainTest (unittest.TestCase):
"""
This test runs the server in a shell subprocess and after a second
it runs the client in another shell subprocess. Afterwards, it kills
both processes and run assertions on the output.
The testing is a little bit brutal.
"""
def test_run(self):
sp = multiprocessing.Process(target=server.run, args=())
cp = multiprocessing.Process(target=client.run, args=())
sp.start()
time.sleep(1)
cp.start()
time.sleep(1)
sp.terminate()
f = open('client.log')
output = [line.rstrip() for line in f.readlines()]
f.close()
print output
self.assertEqual(output, ['OUT:hello', 'IN:hello', 'OUT:from', 'IN:from', 'OUT:twisted', 'IN:twisted', 'OUT:client', 'IN:client', 'OUT:exit', 'IN:exit'])
|
asyn.py
|
import asyncio
import asyncio.events
import functools
import inspect
import io
import os
import re
import sys
import threading
from contextlib import contextmanager
from glob import has_magic
from .callbacks import _DEFAULT_CALLBACK
from .exceptions import FSTimeoutError
from .spec import AbstractBufferedFile, AbstractFileSystem
from .utils import is_exception, other_paths
private = re.compile("_[^_]")
iothread = [None] # dedicated fsspec IO thread
loop = [None] # global event loop for any non-async instance
_lock = None # global lock placeholder
def get_lock():
"""Allocate or return a threading lock.
The lock is allocatted on first use to allow setting one lock per forked process.
"""
global _lock
if not _lock:
_lock = threading.Lock()
return _lock
def reset_lock():
"""Reset the global lock.
This should be called only on the init of a forked process to reset the lock to
None, enabling the new forked process to get a new lock.
"""
global _lock
iothread[0] = None
loop[0] = None
_lock = None
async def _runner(event, coro, result, timeout=None):
timeout = timeout if timeout else None # convert 0 or 0.0 to None
if timeout is not None:
coro = asyncio.wait_for(coro, timeout=timeout)
try:
result[0] = await coro
except Exception as ex:
result[0] = ex
finally:
event.set()
def sync(loop, func, *args, timeout=None, **kwargs):
"""
Make loop run coroutine until it returns. Runs in other thread
"""
timeout = timeout if timeout else None # convert 0 or 0.0 to None
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
if loop is None or loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = asyncio.events.get_running_loop()
if loop0 is loop:
raise NotImplementedError("Calling sync() from within a running loop")
except RuntimeError:
pass
coro = func(*args, **kwargs)
result = [None]
event = threading.Event()
asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
while True:
# this loops allows thread to get interrupted
if event.wait(1):
break
if timeout is not None:
timeout -= 1
if timeout < 0:
raise FSTimeoutError
return_result = result[0]
if isinstance(return_result, asyncio.TimeoutError):
# suppress asyncio.TimeoutError, raise FSTimeoutError
raise FSTimeoutError from return_result
elif isinstance(return_result, BaseException):
raise return_result
else:
return return_result
def sync_wrapper(func, obj=None):
"""Given a function, make so can be called in async or blocking contexts
Leave obj=None if defining within a class. Pass the instance if attaching
as an attribute of the instance.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = obj or args[0]
return sync(self.loop, func, *args, **kwargs)
return wrapper
@contextmanager
def _selector_policy():
original_policy = asyncio.get_event_loop_policy()
try:
if (
sys.version_info >= (3, 8)
and os.name == "nt"
and hasattr(asyncio, "WindowsSelectorEventLoopPolicy")
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
yield
finally:
asyncio.set_event_loop_policy(original_policy)
def get_running_loop():
if hasattr(asyncio, "get_running_loop"):
return asyncio.get_running_loop()
else:
loop = asyncio._get_running_loop()
if loop is None:
raise RuntimeError("no running event loop")
else:
return loop
def get_loop():
"""Create or return the default fsspec IO loop
The loop will be running on a separate thread.
"""
if loop[0] is None:
with get_lock():
# repeat the check just in case the loop got filled between the
# previous two calls from another thread
if loop[0] is None:
with _selector_policy():
loop[0] = asyncio.new_event_loop()
th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
th.daemon = True
th.start()
iothread[0] = th
return loop[0]
@contextmanager
def fsspec_loop():
"""Temporarily switch the current event loop to the fsspec's
own loop, and then revert it back after the context gets
terminated.
"""
try:
original_loop = get_running_loop()
except RuntimeError:
original_loop = None
fsspec_loop = get_loop()
try:
asyncio._set_running_loop(fsspec_loop)
yield fsspec_loop
finally:
asyncio._set_running_loop(original_loop)
try:
import resource
except ImportError:
resource = None
ResourceError = OSError
else:
ResourceEror = resource.error
_DEFAULT_BATCH_SIZE = 128
_NOFILES_DEFAULT_BATCH_SIZE = 1280
def _get_batch_size(nofiles=False):
from fsspec.config import conf
if nofiles:
if "nofiles_gather_batch_size" in conf:
return conf["nofiles_gather_batch_size"]
else:
if "gather_batch_size" in conf:
return conf["gather_batch_size"]
if nofiles:
return _NOFILES_DEFAULT_BATCH_SIZE
if resource is None:
return _DEFAULT_BATCH_SIZE
try:
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
except (ImportError, ValueError, ResourceError):
return _DEFAULT_BATCH_SIZE
if soft_limit == resource.RLIM_INFINITY:
return -1
else:
return soft_limit // 8
async def _run_coros_in_chunks(
coros,
batch_size=None,
callback=_DEFAULT_CALLBACK,
timeout=None,
return_exceptions=False,
nofiles=False,
):
"""Run the given coroutines in chunks.
Parameters
----------
coros: list of coroutines to run
batch_size: int or None
Number of coroutines to submit/wait on simultaneously.
If -1, then it will not be any throttling. If
None, it will be inferred from _get_batch_size()
callback: fsspec.callbacks.Callback instance
Gets a relative_update when each coroutine completes
timeout: number or None
If given, each coroutine times out after this time. Note that, since
there are multiple batches, the total run time of this function will in
general be longer
return_exceptions: bool
Same meaning as in asyncio.gather
nofiles: bool
If inferring the batch_size, does this operation involve local files?
If yes, you normally expect smaller batches.
"""
if batch_size is None:
batch_size = _get_batch_size(nofiles=nofiles)
if batch_size == -1:
batch_size = len(coros)
assert batch_size > 0
results = []
for start in range(0, len(coros), batch_size):
chunk = [
asyncio.Task(asyncio.wait_for(c, timeout=timeout))
for c in coros[start : start + batch_size]
]
if callback is not _DEFAULT_CALLBACK:
[
t.add_done_callback(lambda *_, **__: callback.relative_update(1))
for t in chunk
]
results.extend(
await asyncio.gather(*chunk, return_exceptions=return_exceptions),
)
return results
# these methods should be implemented as async by any async-able backend
async_methods = [
"_ls",
"_cat_file",
"_get_file",
"_put_file",
"_rm_file",
"_cp_file",
"_pipe_file",
"_expand_path",
"_info",
"_isfile",
"_isdir",
"_exists",
"_walk",
"_glob",
"_find",
"_du",
"_size",
"_mkdir",
"_makedirs",
]
class AsyncFileSystem(AbstractFileSystem):
"""Async file operations, default implementations
Passes bulk operations to asyncio.gather for concurrent operation.
Implementations that have concurrent batch operations and/or async methods
should inherit from this class instead of AbstractFileSystem. Docstrings are
copied from the un-underscored method in AbstractFileSystem, if not given.
"""
# note that methods do not have docstring here; they will be copied
# for _* methods and inferred for overridden methods.
async_impl = True
disable_throttling = False
def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
self.asynchronous = asynchronous
self._pid = os.getpid()
if not asynchronous:
self._loop = loop or get_loop()
else:
self._loop = None
self.batch_size = batch_size
super().__init__(*args, **kwargs)
@property
def loop(self):
if self._pid != os.getpid():
raise RuntimeError("This class is not fork-safe")
return self._loop
async def _rm_file(self, path, **kwargs):
raise NotImplementedError
async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
# TODO: implement on_error
batch_size = batch_size or self.batch_size
path = await self._expand_path(path, recursive=recursive)
return await _run_coros_in_chunks(
[self._rm_file(p, **kwargs) for p in path],
batch_size=batch_size,
nofiles=True,
)
async def _cp_file(self, path1, path2, **kwargs):
raise NotImplementedError
async def _copy(
self,
path1,
path2,
recursive=False,
on_error=None,
maxdepth=None,
batch_size=None,
**kwargs,
):
if on_error is None and recursive:
on_error = "ignore"
elif on_error is None:
on_error = "raise"
paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive)
path2 = other_paths(paths, path2)
batch_size = batch_size or self.batch_size
coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)]
result = await _run_coros_in_chunks(
coros, batch_size=batch_size, return_exceptions=True, nofiles=True
)
for ex in filter(is_exception, result):
if on_error == "ignore" and isinstance(ex, FileNotFoundError):
continue
raise ex
async def _pipe(self, path, value=None, batch_size=None, **kwargs):
if isinstance(path, str):
path = {path: value}
batch_size = batch_size or self.batch_size
return await _run_coros_in_chunks(
[self._pipe_file(k, v, **kwargs) for k, v in path.items()],
batch_size=batch_size,
nofiles=True,
)
async def _process_limits(self, url, start, end):
"""Helper for "Range"-based _cat_file"""
size = None
suff = False
if start is not None and start < 0:
# if start is negative and end None, end is the "suffix length"
if end is None:
end = -start
start = ""
suff = True
else:
size = size or (await self._info(url))["size"]
start = size + start
elif start is None:
start = 0
if not suff:
if end is not None and end < 0:
if start is not None:
size = size or (await self._info(url))["size"]
end = size + end
elif end is None:
end = ""
if isinstance(end, int):
end -= 1 # bytes range is inclusive
return "bytes=%s-%s" % (start, end)
async def _cat_file(self, path, start=None, end=None, **kwargs):
raise NotImplementedError
async def _cat(
self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
):
paths = await self._expand_path(path, recursive=recursive)
coros = [self._cat_file(path, **kwargs) for path in paths]
batch_size = batch_size or self.batch_size
out = await _run_coros_in_chunks(
coros, batch_size=batch_size, nofiles=True, return_exceptions=True
)
if on_error == "raise":
ex = next(filter(is_exception, out), False)
if ex:
raise ex
if (
len(paths) > 1
or isinstance(path, list)
or paths[0] != self._strip_protocol(path)
):
return {
k: v
for k, v in zip(paths, out)
if on_error != "omit" or not is_exception(v)
}
else:
return out[0]
async def _cat_ranges(
self, paths, starts, ends, max_gap=None, batch_size=None, **kwargs
):
# TODO: on_error
if max_gap is not None:
# use utils.merge_offset_ranges
raise NotImplementedError
if not isinstance(paths, list):
raise TypeError
if not isinstance(starts, list):
starts = [starts] * len(paths)
if not isinstance(ends, list):
ends = [starts] * len(paths)
if len(starts) != len(paths) or len(ends) != len(paths):
raise ValueError
coros = [
self._cat_file(p, start=s, end=e, **kwargs)
for p, s, e in zip(paths, starts, ends)
]
batch_size = batch_size or self.batch_size
return await _run_coros_in_chunks(coros, batch_size=batch_size, nofiles=True)
async def _put_file(self, lpath, rpath, **kwargs):
raise NotImplementedError
async def _put(
self,
lpath,
rpath,
recursive=False,
callback=_DEFAULT_CALLBACK,
batch_size=None,
**kwargs,
):
"""Copy file(s) from local.
Copies a specific file or tree of files (if recursive=True). If rpath
ends with a "/", it will be assumed to be a directory, and target files
will go within.
The put_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from .implementations.local import LocalFileSystem, make_path_posix
rpath = self._strip_protocol(rpath)
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
fs = LocalFileSystem()
lpaths = fs.expand_path(lpath, recursive=recursive)
rpaths = other_paths(
lpaths, rpath, exists=isinstance(rpath, str) and await self._isdir(rpath)
)
is_dir = {l: os.path.isdir(l) for l in lpaths}
rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
batch_size = batch_size or self.batch_size
coros = []
callback.set_size(len(file_pairs))
for lfile, rfile in file_pairs:
callback.branch(lfile, rfile, kwargs)
coros.append(self._put_file(lfile, rfile, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _get_file(self, rpath, lpath, **kwargs):
raise NotImplementedError
async def _get(
self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
):
"""Copy file(s) to local.
Copies a specific file or tree of files (if recursive=True). If lpath
ends with a "/", it will be assumed to be a directory, and target files
will go within. Can submit a list of paths, which may be glob-patterns
and will be expanded.
The get_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from fsspec.implementations.local import make_path_posix
rpath = self._strip_protocol(rpath)
lpath = make_path_posix(lpath)
rpaths = await self._expand_path(rpath, recursive=recursive)
lpaths = other_paths(rpaths, lpath)
[os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
batch_size = kwargs.pop("batch_size", self.batch_size)
coros = []
callback.set_size(len(lpaths))
for lpath, rpath in zip(lpaths, rpaths):
callback.branch(rpath, lpath, kwargs)
coros.append(self._get_file(rpath, lpath, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _isfile(self, path):
try:
return (await self._info(path))["type"] == "file"
except: # noqa: E722
return False
async def _isdir(self, path):
try:
return (await self._info(path))["type"] == "directory"
except IOError:
return False
async def _size(self, path):
return (await self._info(path)).get("size", None)
async def _sizes(self, paths, batch_size=None):
batch_size = batch_size or self.batch_size
return await _run_coros_in_chunks(
[self._size(p) for p in paths], batch_size=batch_size
)
async def _exists(self, path):
try:
await self._info(path)
return True
except FileNotFoundError:
return False
async def _info(self, path, **kwargs):
raise NotImplementedError
async def _ls(self, path, detail=True, **kwargs):
raise NotImplementedError
async def _walk(self, path, maxdepth=None, **kwargs):
path = self._strip_protocol(path)
full_dirs = {}
dirs = {}
files = {}
detail = kwargs.pop("detail", False)
try:
listing = await self._ls(path, detail=True, **kwargs)
except (FileNotFoundError, IOError):
if detail:
yield path, {}, {}
else:
yield path, [], []
return
for info in listing:
# each info name must be at least [path]/part , but here
# we check also for names like [path]/part/
pathname = info["name"].rstrip("/")
name = pathname.rsplit("/", 1)[-1]
if info["type"] == "directory" and pathname != path:
# do not include "self" path
full_dirs[pathname] = info
dirs[name] = info
elif pathname == path:
# file-like with same name as give path
files[""] = info
else:
files[name] = info
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs):
yield _
async def _glob(self, path, **kwargs):
import re
ends = path.endswith("/")
path = self._strip_protocol(path)
indstar = path.find("*") if path.find("*") >= 0 else len(path)
indques = path.find("?") if path.find("?") >= 0 else len(path)
indbrace = path.find("[") if path.find("[") >= 0 else len(path)
ind = min(indstar, indques, indbrace)
detail = kwargs.pop("detail", False)
if not has_magic(path):
root = path
depth = 1
if ends:
path += "/*"
elif await self._exists(path):
if not detail:
return [path]
else:
return {path: await self._info(path)}
else:
if not detail:
return [] # glob of non-existent returns empty
else:
return {}
elif "/" in path[:ind]:
ind2 = path[:ind].rindex("/")
root = path[: ind2 + 1]
depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
else:
root = ""
depth = None if "**" in path else path[ind + 1 :].count("/") + 1
allpaths = await self._find(
root, maxdepth=depth, withdirs=True, detail=True, **kwargs
)
# Escape characters special to python regex, leaving our supported
# special characters in place.
# See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
# for shell globbing details.
pattern = (
"^"
+ (
path.replace("\\", r"\\")
.replace(".", r"\.")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.replace("{", r"\{")
.replace("}", r"\}")
.rstrip("/")
.replace("?", ".")
)
+ "$"
)
pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
pattern = re.sub("[*]", "[^/]*", pattern)
pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
out = {
p: allpaths[p]
for p in sorted(allpaths)
if pattern.match(p.replace("//", "/").rstrip("/"))
}
if detail:
return out
else:
return list(out)
async def _du(self, path, total=True, maxdepth=None, **kwargs):
sizes = {}
# async for?
for f in await self._find(path, maxdepth=maxdepth, **kwargs):
info = await self._info(f)
sizes[info["name"]] = info["size"]
if total:
return sum(sizes.values())
else:
return sizes
async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
path = self._strip_protocol(path)
out = dict()
detail = kwargs.pop("detail", False)
# async for?
async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
if withdirs:
files.update(dirs)
out.update({info["name"]: info for name, info in files.items()})
if not out and (await self._isfile(path)):
# walk works on directories, but find should also return [path]
# when path happens to be a file
out[path] = {}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names}
async def _expand_path(self, path, recursive=False, maxdepth=None):
if isinstance(path, str):
out = await self._expand_path([path], recursive, maxdepth)
else:
# reduce depth on each recursion level unless None or 0
maxdepth = maxdepth if not maxdepth else maxdepth - 1
out = set()
path = [self._strip_protocol(p) for p in path]
for p in path: # can gather here
if has_magic(p):
bit = set(await self._glob(p))
out |= bit
if recursive:
out |= set(
await self._expand_path(
list(bit), recursive=recursive, maxdepth=maxdepth
)
)
continue
elif recursive:
rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
out |= rec
if p not in out and (recursive is False or (await self._exists(p))):
# should only check once, for the root
out.add(p)
if not out:
raise FileNotFoundError(path)
return list(sorted(out))
async def _mkdir(self, path, create_parents=True, **kwargs):
pass # not necessary to implement, may not have directories
async def _makedirs(self, path, exist_ok=False):
pass # not necessary to implement, may not have directories
async def open_async(self, path, mode="rb", **kwargs):
if "b" not in mode or kwargs.get("compression"):
raise ValueError
raise NotImplementedError
def mirror_sync_methods(obj):
"""Populate sync and async methods for obj
For each method will create a sync version if the name refers to an async method
(coroutine) and there is no override in the child class; will create an async
method for the corresponding sync method if there is no implementation.
Uses the methods specified in
- async_methods: the set that an implementation is expected to provide
- default_async_methods: that can be derived from their sync version in
AbstractFileSystem
- AsyncFileSystem: async-specific default coroutines
"""
from fsspec import AbstractFileSystem
for method in async_methods + dir(AsyncFileSystem):
if not method.startswith("_"):
continue
smethod = method[1:]
if private.match(method):
isco = inspect.iscoroutinefunction(getattr(obj, method, None))
unsync = getattr(getattr(obj, smethod, False), "__func__", None)
is_default = unsync is getattr(AbstractFileSystem, smethod, "")
if isco and is_default:
mth = sync_wrapper(getattr(obj, method), obj=obj)
setattr(obj, smethod, mth)
if not mth.__doc__:
mth.__doc__ = getattr(
getattr(AbstractFileSystem, smethod, None), "__doc__", ""
)
class FSSpecCoroutineCancel(Exception):
pass
def _dump_running_tasks(
printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
):
import traceback
tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
if printout:
[task.print_stack() for task in tasks]
out = [
{
"locals": task._coro.cr_frame.f_locals,
"file": task._coro.cr_frame.f_code.co_filename,
"firstline": task._coro.cr_frame.f_code.co_firstlineno,
"linelo": task._coro.cr_frame.f_lineno,
"stack": traceback.format_stack(task._coro.cr_frame),
"task": task if with_task else None,
}
for task in tasks
]
if cancel:
for t in tasks:
cbs = t._callbacks
t.cancel()
asyncio.futures.Future.set_exception(t, exc)
asyncio.futures.Future.cancel(t)
[cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
try:
t._coro.throw(exc) # exits coro, unless explicitly handled
except exc:
pass
return out
class AbstractAsyncStreamedFile(AbstractBufferedFile):
# no read buffering, and always auto-commit
# TODO: readahead might still be useful here, but needs async version
async def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length: int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
length = -1 if length is None else int(length)
if self.mode != "rb":
raise ValueError("File not in read mode")
if length < 0:
length = self.size - self.loc
if self.closed:
raise ValueError("I/O operation on closed file.")
if length == 0:
# don't even bother calling fetch
return b""
out = await self._fetch_range(self.loc, self.loc + length)
self.loc += len(out)
return out
async def write(self, data):
"""
Write data to buffer.
Buffer only sent on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data: bytes
Set of bytes to be written.
"""
if self.mode not in {"wb", "ab"}:
raise ValueError("File not in write mode")
if self.closed:
raise ValueError("I/O operation on closed file.")
if self.forced:
raise ValueError("This file has been force-flushed, can only close")
out = self.buffer.write(data)
self.loc += out
if self.buffer.tell() >= self.blocksize:
await self.flush()
return out
async def close(self):
"""Close file
Finalizes writes, discards cache
"""
if getattr(self, "_unclosable", False):
return
if self.closed:
return
if self.mode == "rb":
self.cache = None
else:
if not self.forced:
await self.flush(force=True)
if self.fs is not None:
self.fs.invalidate_cache(self.path)
self.fs.invalidate_cache(self.fs._parent(self.path))
self.closed = True
async def flush(self, force=False):
if self.closed:
raise ValueError("Flush on closed file")
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if force:
self.forced = True
if self.mode not in {"wb", "ab"}:
# no-op to flush on read-mode
return
if not force and self.buffer.tell() < self.blocksize:
# Defer write on small block
return
if self.offset is None:
# Initialize a multipart upload
self.offset = 0
try:
await self._initiate_upload()
except: # noqa: E722
self.closed = True
raise
if self._upload_chunk(final=force) is not False:
self.offset += self.buffer.seek(0, 2)
self.buffer = io.BytesIO()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def _fetch_range(self, start, end):
raise NotImplementedError
async def _initiate_upload(self):
raise NotImplementedError
async def _upload_chunk(self, final=False):
raise NotImplementedError
|
wrapper_window.py
|
import abc
from typing import Any
from gbvision.constants.types import Frame
from .window import Window
from gbvision.models.system import EMPTY_PIPELINE
from threading import Thread
class WrapperWindow(Window, abc.ABC):
"""
A window class that uses an object (wrap_object) and reads frames from it in to display it's feed
:param wrap_object: an object to read frames from, can be of any type
"""
def __init__(self, window_name: str, wrap_object: Any, drawing_pipeline=EMPTY_PIPELINE):
Window.__init__(self, window_name=window_name, drawing_pipeline=drawing_pipeline)
self.wrap_object = wrap_object
def show_and_get_frame(self) -> Frame:
"""
shows one frame and returns it
:return: the frame if the window was not closed, None otherwise
"""
frame = self._get_frame()
if self.show_frame(frame):
return frame
return None
def show(self):
"""
reads from the wrap object and shows the frame until the window is closed
"""
while True:
if self.show_and_get_frame() is None:
return
@abc.abstractmethod
def _get_frame(self) -> Frame:
"""
unsafely reads a frame from the wrapped object and returns the read frame
:return: the read frame
"""
pass
def show_async(self):
"""
opens the steam video window on another thread
"""
Thread(target=self.show).start()
|
dbapi_test.py
|
from datetime import datetime, date, timezone
from numpy.random import randint, uniform
from math import floor
from queue import Queue
from subprocess import Popen
from time import sleep
import threading, sys, os
sys.path.append(os.path.abspath(__file__).rsplit('tests/', 1)[0] + '/pysqream/')
import dbapi, pytest
q = Queue()
varchar_length = 10
nvarchar_length = 10
max_bigint = sys.maxsize if sys.platform not in ('win32', 'cygwin') else 2147483647
def generate_varchar(length):
return ''.join(chr(num) for num in randint(32, 128, length))
def print_test(test_desc):
print (f'\033[94mTest: {test_desc}\033[0m')
col_types = {'bool', 'tinyint', 'smallint', 'int', 'bigint', 'real', 'double', 'date', 'datetime', 'varchar({})'.format(varchar_length), 'nvarchar({})'.format(varchar_length)}
pos_test_vals = {'bool': (0, 1, True, False, 2, 3.6, 'test', (1997, 5, 9), (1997, 12, 12, 10, 10, 10)),
'tinyint': (randint(0, 255), randint(0, 255), 0, 255, True, False),
'smallint': (randint(-32768, 32767), 0, -32768, 32767, True, False),
'int': (randint(-2147483648, 2147483647), 0, -2147483648, 2147483647, True, False),
'bigint': (randint(1-max_bigint, max_bigint), 0, 1-max_bigint, max_bigint, True, False),
'real': (float('inf'), float('-inf'), float('+0'), float('-0'), round(uniform(1e-6, 1e6), 5), 837326.52428, True, False), # float('nan')
'double': (float('inf'), float('-inf'), float('+0'), float('-0'), uniform(1e-6, 1e6), True, False), # float('nan')
'date': (date(1998, 9, 24), date(2020, 12, 1), date(1997, 5, 9), date(1993, 7, 13), date(1001, 1, 1)),
'datetime': (datetime(1001, 1, 1, 10, 10, 10), datetime(1997, 11, 30, 10, 10, 10), datetime(1987, 7, 27, 20, 15, 45), datetime(1993, 12, 20, 17, 25, 46)),
'varchar': (generate_varchar(varchar_length), generate_varchar(varchar_length), generate_varchar(varchar_length), 'b '),
'nvarchar': ('א', 'א ', '', 'ab א')}
neg_test_vals = {'tinyint': (258, 3.6, 'test', (1997, 5, 9), (1997, 12, 12, 10, 10, 10)),
'smallint': (40000, 3.6, 'test', (1997, 5, 9), (1997, 12, 12, 10, 10, 10)),
'int': (9999999999, 3.6, 'test', (1997, 5, 9), (1997, 12, 12, 10, 10, 10)),
'bigint': (92233720368547758070, 3.6, 'test', (1997, 12, 12, 10, 10, 10)),
'real': ('test', (1997, 12, 12, 10, 10, 10)),
'double': ('test', (1997, 12, 12, 10, 10, 10)),
'date': (5, 3.6, (-8, 9, 1), (2012, 15, 6), (2012, 9, 45), 'test', False, True),
'datetime': (5, 3.6, (-8, 9, 1, 0, 0, 0), (2012, 15, 6, 0, 0, 0), (2012, 9, 45, 0, 0, 0), (2012, 9, 14, 26, 0, 0), (2012, 9, 14, 13, 89, 0), 'test', False, True),
'varchar': (5, 3.6, (1, 2), (1997, 12, 12, 10, 10, 10), False, True),
'nvarchar': (5, 3.6, (1, 2), (1997, 12, 12, 10, 10, 10), False, True)}
def start_stop(op = 'start', build_dir=None, ip=None):
Popen(('killall', '-9', 'sqreamd'))
sleep(5)
Popen(('killall', '-9', 'server_picker'))
sleep(5)
Popen(('killall', '-9', 'metadata_server'))
sleep(5)
if op =='start':
Popen((build_dir + 'metadata_server'))
sleep(5)
Popen((build_dir + 'server_picker', ip, '3105'))
sleep(5)
Popen((build_dir + 'sqreamd' ))
sleep(5)
sleep(5)
# @pytest.fixture(scope = 'module')
def connect_dbapi(clustered=False, use_ssl=False):
args = sys.argv
ip = args[1] if len(args) > 1 else '127.0.0.1'
port = (3109 if use_ssl else 3108) if clustered else (5001 if use_ssl else 5000)
return dbapi.connect(ip, port, 'master', 'sqream', 'sqream', clustered, use_ssl)
def connect_and_execute(num, cursor=False):
con = connect_dbapi()
if cursor:
cur = con.cursor()
cur.execute("select {}".format(num))
res = cur.fetchall()
q.put(res)
con.execute("select {}".format(num))
res = con.fetchall()
q.put(res)
pytest.con = connect_dbapi()
pytest.ip = '127.0.0.1'
pytest.connect_and_execute = connect_and_execute
class TestPositive:
def test_positive(self):
print('positive tests')
for col_type in col_types:
trimmed_col_type = col_type.split('(')[0]
print(f'Inserted values test for column type {col_type}')
pytest.con.execute(f"create or replace table test (t_{trimmed_col_type} {col_type})")
for val in pos_test_vals[trimmed_col_type]:
pytest.con.execute('truncate table test')
rows = [(val,)]
pytest.con.executemany("insert into test values (?)", rows)
res = pytest.con.execute("select * from test").fetchall()[0][0]
# Compare
error = False
assert (
val == res or
(val != res and trimmed_col_type == 'bool' and val != 0 and res == True) or
(val != res and trimmed_col_type == 'varchar' and val != 0 and val.strip() == res) or
(val != res and trimmed_col_type == 'real' and val != 0 and abs(res-val) <= 0.1)
)
print(f'Null test for column type: {col_type}')
pytest.con.execute("create or replace table test (t_{} {})".format(trimmed_col_type, col_type))
pytest.con.executemany('insert into test values (?)', [(None,)])
res = pytest.con.execute('select * from test').fetchall()[0][0]
assert res == None
def test_nulls(self):
print_test("Case statement with nulls")
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany('insert into test values (?)', [(5,), (None,), (6,), (7,), (None,), (8,), (None,)])
pytest.con.executemany("select case when xint is null then 1 else 0 end from test")
expected_list = [0, 1, 0, 0, 1, 0, 1]
res_list = []
res_list += [x[0] for x in pytest.con.fetchall()]
assert expected_list == res_list
def test_bool(self):
print_test("Testing select true/false")
pytest.con.execute("select false")
res = pytest.con.fetchall()[0][0]
assert res == 0
pytest.con.execute("select true")
res = pytest.con.fetchall()[0][0]
assert res == 1
def test_when_running(self):
print_test("Running a statement when there is an open statement")
pytest.con.execute("select 1")
sleep(10)
res = pytest.con.execute("select 1").fetchall()[0][0]
assert res == 1
class TestNegative:
''' Negative Set/Get tests '''
def test_negative(self):
print_test('Negative tests')
for col_type in col_types:
if col_type == 'bool':
continue
trimmed_col_type = col_type.split('(')[0]
pytest.con.execute("create or replace table test (t_{} {})".format(trimmed_col_type, col_type))
for val in neg_test_vals[trimmed_col_type]:
rows = [(val,)]
with pytest.raises(Exception) as e:
pytest.con.executemany("insert into test values (?)", rows)
assert "Error packing columns. Check that all types match the respective column types" in str(e.value)
def test_incosistent_sizes(self):
print_test("Inconsistent sizes test")
pytest.con.execute("create or replace table test (xint int, yint int)")
with pytest.raises(Exception) as e:
pytest.con.executemany('insert into test values (?, ?)', [(5,), (6, 9), (7, 8)])
assert "Incosistent data sequences passed for inserting. Please use rows/columns of consistent length" in str(e.value)
def test_varchar_conversion(self):
print_test("Varchar - Conversion of a varchar to a smaller length")
pytest.con.execute("create or replace table test (test varchar(10))")
with pytest.raises(Exception) as e:
pytest.con.executemany("insert into test values ('aa12345678910')")
assert "expected response statementPrepared but got" in str(e.value)
def test_nvarchar_conversion(self):
print_test("Nvarchar - Conversion of a varchar to a smaller length")
pytest.con.execute("create or replace table test (test nvarchar(10))")
with pytest.raises(Exception) as e:
pytest.con.executemany("insert into test values ('aa12345678910')")
assert "expected response executed but got" in str(e.value)
def test_incorrect_fetchmany(self):
print_test("Incorrect usage of fetchmany - fetch without a statement")
pytest.con.execute("create or replace table test (xint int)")
with pytest.raises(Exception) as e:
pytest.con.fetchmany(2)
assert "No open statement while attempting fetch operation" in str(e.value)
def test_incorrect_fetchall(self):
print_test("Incorrect usage of fetchall")
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany("select * from test")
with pytest.raises(Exception) as e:
pytest.con.fetchall(5)
assert "Bad argument to fetchall" in str(e.value)
def test_incorrect_fetchone(self):
print_test("Incorrect usage of fetchone")
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany("select * from test")
with pytest.raises(Exception) as e:
pytest.con.fetchone(5)
assert "Bad argument to fetchone" in str(e.value)
def test_multi_statement(self):
print_test("Multi statements test")
with pytest.raises(Exception) as e:
pytest.con.execute("select 1; select 1;")
assert "expected one statement, got 2" in str(e.value)
def test_parametered_query(self):
print_test("Parametered query tests")
params = 6
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany('insert into test values (?)', [(5,), (6,), (7,)])
with pytest.raises(Exception) as e:
pytest.con.execute('select * from test where xint > ?', str(params))
assert "Parametered queries not supported" in str(e.value)
def test_execute_closed_cursor(self):
print_test("running execute on a closed cursor")
cur = pytest.con.cursor()
cur.close()
with pytest.raises(Exception) as e:
cur.execute("select 1")
assert "Cursor has been closed" in str(e.value)
class TestFetch:
def test_fetch(self):
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany('insert into test values (?)', [(1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,)])
# fetchmany(1) vs fetchone()
pytest.con.execute("select * from test")
res = pytest.con.fetchmany(1)[0][0]
pytest.con.execute("select * from test")
res2 = pytest.con.fetchone()[0]
assert res == res2
# fetchmany(-1) vs fetchall()
pytest.con.execute("select * from test")
res3 = pytest.con.fetchmany(-1)
pytest.con.execute("select * from test")
res4 = pytest.con.fetchall()
assert res3 == res4
# fetchone() loop
pytest.con.execute("select * from test")
for i in range(1, 11):
x = pytest.con.fetchone()[0]
assert x == i
def test_combined_fetch(self):
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany('insert into test values (?)', [(1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,)])
pytest.con.execute("select * from test")
expected_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
res_list = []
res_list.append(pytest.con.fetchone()[0])
res_list += [x[0] for x in pytest.con.fetchmany(2)]
res_list.append(pytest.con.fetchone()[0])
res_list += [x[0] for x in pytest.con.fetchall()]
expected_list == res_list
def test_fetch_after_data_read(self):
pytest.con.execute("create or replace table test (xint int)")
pytest.con.executemany('insert into test values (?)', [(1,)])
pytest.con.execute("select * from test")
x = pytest.con.fetchone()[0]
res = pytest.con.fetchone()
assert res is None
res = pytest.con.fetchall()
assert res == []
res = pytest.con.fetchmany(1)
assert res == []
class TestCursor:
def test_cursor_through_clustered(self):
con_clustered = dbapi.connect(pytest.ip, 3108, 'master', 'sqream', 'sqream', clustered=True)
cur = con_clustered.cursor()
assert cur.execute("select 1").fetchall()[0][0] == 1
def test_two_statements_same_cursor(self):
vals = [1]
cur = pytest.con.cursor()
cur.execute("select 1")
res1 = cur.fetchall()[0][0]
vals.append(res1)
cur.execute("select 1")
res2 = cur.fetchall()[0][0]
vals.append(res2)
assert all(x == vals[0] for x in vals)
def test_cursor_when_open_statement(self):
cur = pytest.con.cursor()
cur.execute("select 1")
sleep(10)
cur.execute("select 1")
res = cur.fetchall()[0][0]
assert res == 1
def test_fetch_after_all_read(self):
cur = pytest.con.cursor()
cur.execute("create or replace table test (xint int)")
cur.executemany('insert into test values (?)', [(1,)])
cur.execute("select * from test")
x = cur.fetchone()[0]
res = cur.fetchone()
assert res is None
res = pytest.con.fetchall()
assert res == []
res = pytest.con.fetchmany(1)
assert res == []
class TestString:
def test_insert_return_utf8(self):
pytest.con.execute("create or replace table test (xvarchar varchar(20))")
pytest.con.executemany('insert into test values (?)', [(u"hello world",), ("hello world",)])
pytest.con.execute("select * from test")
res = pytest.con.fetchall()
assert res[0][0] == res[1][0]
def test_strings_with_escaped_chars(self):
pytest.con.execute("create or replace table test (xvarchar varchar(20))")
values = [("\t",), ("\n",), ("\\n",), ("\\\n",), (" \\",), ("\\\\",), (" \nt",), ("'abd''ef'",), ("abd""ef",), ("abd\"ef",)]
pytest.con.executemany('insert into test values (?)', values)
pytest.con.executemany("select * from test")
expected_list = ['', '', '\\n', '\\', ' \\', '\\\\', ' \nt', "'abd''ef'", 'abdef', 'abd"ef']
res_list = []
res_list += [x[0] for x in pytest.con.fetchall()]
assert expected_list == res_list
class TestDatetime:
def test_different_timezones(self):
t1 = datetime.strptime(datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M"), '%Y-%m-%d %H:%M')
t2 = datetime.strptime(datetime.now().strftime("%Y-%m-%d %H:%M"), '%Y-%m-%d %H:%M')
pytest.con.execute("create or replace table test (xdatetime datetime)")
pytest.con.executemany('insert into test values (?)', [(t1,), (t2,)])
pytest.con.execute("select * from test")
res = pytest.con.fetchall()
assert res[0][0] != res[1][0]
def test_datetimes_with_microseconds(self):
t1 = datetime(1997, 5, 9, 4, 30, 10, 123456)
t2 = datetime(1997, 5, 9, 4, 30, 10, 987654)
pytest.con.execute("create or replace table test (xdatetime datetime)")
pytest.con.executemany('insert into test values (?)', [(t1,), (t2,)])
class TestThreads:
def test_concurrent_insert(self):
t1 = threading.Thread(target=pytest.connect_and_execute, args=(3, ))
t2 = threading.Thread(target=pytest.connect_and_execute, args=(3, ))
t1.start()
t2.start()
t1.join()
t2.join()
res1 = q.get()[0][0]
res2 = q.get()[0][0]
assert res1 == res2
def test_concurrent_insert_through_cursor(self):
t1 = threading.Thread(target=pytest.connect_and_execute, args=(5, True))
t2 = threading.Thread(target=pytest.connect_and_execute, args=(5, True))
t1.start()
t2.start()
t1.join()
t2.join()
res1 = q.get()[0][0]
res2 = q.get()[0][0]
assert res1 == res2
|
client.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from datetime import datetime
import os
import os.path
import shutil
import time
import tempfile
import threading
import unittest
from couchdb import client, http, util
from couchdb.tests import testutil
class ServerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_init_with_resource(self):
sess = http.Session()
res = http.Resource(client.DEFAULT_BASE_URL, sess)
serv = client.Server(url=res)
serv.config()
def test_init_with_session(self):
sess = http.Session()
serv = client.Server(client.DEFAULT_BASE_URL, session=sess)
serv.config()
self.assertTrue(serv.resource.session is sess)
def test_exists(self):
self.assertTrue(client.Server(client.DEFAULT_BASE_URL))
self.assertFalse(client.Server('http://localhost:9999'))
def test_repr(self):
repr(self.server)
def test_server_vars(self):
version = self.server.version()
self.assertTrue(isinstance(version, util.strbase))
config = self.server.config()
self.assertTrue(isinstance(config, dict))
tasks = self.server.tasks()
self.assertTrue(isinstance(tasks, list))
def test_server_stats(self):
stats = self.server.stats()
self.assertTrue(isinstance(stats, dict))
stats = self.server.stats('httpd/requests')
self.assertTrue(isinstance(stats, dict))
self.assertTrue(len(stats) == 1 and len(stats['httpd']) == 1)
def test_get_db_missing(self):
self.assertRaises(http.ResourceNotFound,
lambda: self.server['couchdb-python/missing'])
def test_create_db_conflict(self):
name, db = self.temp_db()
self.assertRaises(http.PreconditionFailed, self.server.create,
name)
def test_delete_db(self):
name, db = self.temp_db()
assert name in self.server
self.del_db(name)
assert name not in self.server
def test_delete_db_missing(self):
self.assertRaises(http.ResourceNotFound, self.server.delete,
'couchdb-python/missing')
def test_replicate(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
id, rev = a.save({'test': 'a'})
result = self.server.replicate(aname, bname)
self.assertEqual(result['ok'], True)
self.assertEqual(b[id]['test'], 'a')
doc = b[id]
doc['test'] = 'b'
b.update([doc])
self.server.replicate(bname, aname)
self.assertEqual(a[id]['test'], 'b')
self.assertEqual(b[id]['test'], 'b')
def test_replicate_continuous(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
result = self.server.replicate(aname, bname, continuous=True)
self.assertEqual(result['ok'], True)
version = tuple(int(i) for i in self.server.version().split('.')[:2])
if version >= (0, 10):
self.assertTrue('_local_id' in result)
def test_iter(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
dbs = list(self.server)
self.assertTrue(aname in dbs)
self.assertTrue(bname in dbs)
def test_len(self):
self.temp_db()
self.temp_db()
self.assertTrue(len(self.server) >= 2)
def test_uuids(self):
ls = self.server.uuids()
assert type(ls) == list
ls = self.server.uuids(count=10)
assert type(ls) == list and len(ls) == 10
def test_235_unicode_server(self):
url = client.DEFAULT_BASE_URL
if not isinstance(url, util.utype):
url = url.decode('utf-8')
server = client.Server(url)
dbname = 'couchdb-python/test-235-unicode-server'
db = server.create(dbname)
try:
db.update([{'foo': u'\ua000'}])
finally:
server.delete(dbname)
def test_basic_auth(self):
url = "http://root:password@localhost:5984/"
server = client.Server(url)
dbname = 'couchdb-python/test_basic_auth'
self.assertRaises(http.Unauthorized, server.create, dbname)
class DatabaseTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_save_new(self):
doc = {'foo': 'bar'}
id, rev = self.db.save(doc)
self.assertTrue(id is not None)
self.assertTrue(rev is not None)
self.assertEqual((id, rev), (doc['_id'], doc['_rev']))
doc = self.db.get(id)
self.assertEqual(doc['foo'], 'bar')
def test_save_new_with_id(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc)
self.assertTrue(doc['_id'] == id == 'foo')
self.assertEqual(doc['_rev'], rev)
def test_save_existing(self):
doc = {}
id_rev_old = self.db.save(doc)
doc['foo'] = True
id_rev_new = self.db.save(doc)
self.assertTrue(doc['_rev'] == id_rev_new[1])
self.assertTrue(id_rev_old[1] != id_rev_new[1])
def test_save_new_batch(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc, batch='ok')
self.assertTrue(rev is None)
self.assertTrue('_rev' not in doc)
def test_save_existing_batch(self):
doc = {'_id': 'foo'}
self.db.save(doc)
id_rev_old = self.db.save(doc)
id_rev_new = self.db.save(doc, batch='ok')
self.assertTrue(id_rev_new[1] is None)
self.assertEqual(id_rev_old[1], doc['_rev'])
def test_exists(self):
self.assertTrue(self.db)
self.assertFalse(client.Database('couchdb-python/missing'))
def test_name(self):
# Access name assigned during creation.
name, db = self.temp_db()
self.assertTrue(db.name == name)
# Access lazily loaded name,
self.assertTrue(client.Database(db.resource.url).name == name)
def test_commit(self):
self.assertTrue(self.db.commit()['ok'] == True)
def test_create_large_doc(self):
self.db['foo'] = {'data': '0123456789' * 110 * 1024} # 10 MB
self.assertEqual('foo', self.db['foo']['_id'])
def test_doc_id_quoting(self):
self.db['foo/bar'] = {'foo': 'bar'}
self.assertEqual('bar', self.db['foo/bar']['foo'])
del self.db['foo/bar']
self.assertEqual(None, self.db.get('foo/bar'))
def test_unicode(self):
self.db[u'føø'] = {u'bår': u'Iñtërnâtiônàlizætiøn', 'baz': 'ASCII'}
self.assertEqual(u'Iñtërnâtiônàlizætiøn', self.db[u'føø'][u'bår'])
self.assertEqual(u'ASCII', self.db[u'føø'][u'baz'])
def test_disallow_nan(self):
try:
self.db['foo'] = {'number': float('nan')}
self.fail('Expected ValueError')
except ValueError:
pass
def test_disallow_none_id(self):
deldoc = lambda: self.db.delete({'_id': None, '_rev': None})
self.assertRaises(ValueError, deldoc)
def test_doc_revs(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
doc['bar'] = 43
self.db['foo'] = doc
new_rev = doc['_rev']
new_doc = self.db.get('foo')
self.assertEqual(new_rev, new_doc['_rev'])
new_doc = self.db.get('foo', rev=new_rev)
self.assertEqual(new_rev, new_doc['_rev'])
old_doc = self.db.get('foo', rev=old_rev)
self.assertEqual(old_rev, old_doc['_rev'])
revs = [i for i in self.db.revisions('foo')]
self.assertEqual(revs[0]['_rev'], new_rev)
self.assertEqual(revs[1]['_rev'], old_rev)
gen = self.db.revisions('crap')
self.assertRaises(StopIteration, lambda: next(gen))
self.assertTrue(self.db.compact())
while self.db.info()['compact_running']:
pass
# 0.10 responds with 404, 0.9 responds with 500, same content
doc = 'fail'
try:
doc = self.db.get('foo', rev=old_rev)
except http.ServerError:
doc = None
assert doc is None
def test_attachment_crud(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, 'Foo bar', 'foo.txt', 'text/plain')
self.assertNotEqual(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual(b'Foo bar',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual(b'Foo bar',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEqual(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_attachment_crud_with_files(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
fileobj = util.StringIO(b'Foo bar baz')
self.db.put_attachment(doc, fileobj, 'foo.txt')
self.assertNotEqual(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar baz'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual(b'Foo bar baz',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual(b'Foo bar baz',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEqual(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_empty_attachment(self):
doc = {}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, '', 'empty.txt')
self.assertNotEqual(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['empty.txt']
self.assertEqual(0, attachment['length'])
def test_default_attachment(self):
doc = {}
self.db['foo'] = doc
self.assertTrue(self.db.get_attachment(doc, 'missing.txt') is None)
sentinel = object()
self.assertTrue(self.db.get_attachment(doc, 'missing.txt', sentinel) is sentinel)
def test_attachment_from_fs(self):
tmpdir = tempfile.mkdtemp()
tmpfile = os.path.join(tmpdir, 'test.txt')
f = open(tmpfile, 'w')
f.write('Hello!')
f.close()
doc = {}
self.db['foo'] = doc
with open(tmpfile) as f:
self.db.put_attachment(doc, f)
doc = self.db.get('foo')
self.assertTrue(doc['_attachments']['test.txt']['content_type'] == 'text/plain')
shutil.rmtree(tmpdir)
def test_attachment_no_filename(self):
doc = {}
self.db['foo'] = doc
self.assertRaises(ValueError, self.db.put_attachment, doc, '')
def test_json_attachment(self):
doc = {}
self.db['foo'] = doc
self.db.put_attachment(doc, '{}', 'test.json', 'application/json')
self.assertEqual(self.db.get_attachment(doc, 'test.json').read(), b'{}')
def test_include_docs(self):
doc = {'foo': 42, 'bar': 40}
self.db['foo'] = doc
rows = list(self.db.query(
'function(doc) { emit(doc._id, null); }',
include_docs=True
))
self.assertEqual(1, len(rows))
self.assertEqual(doc, rows[0].doc)
def test_query_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
res = list(self.db.query('function(doc) { emit(doc.i, null); }',
keys=list(range(1, 6, 2))))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_bulk_update_conflict(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
self.db[doc['_id']] = doc
results = self.db.update(docs)
self.assertEqual(False, results[0][0])
assert isinstance(results[0][2], http.ResourceConflict)
def test_bulk_update_all_or_nothing(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
doc['name'] = 'Jane Doe'
self.db[doc['_id']] = doc
results = self.db.update(docs, all_or_nothing=True)
self.assertEqual(True, results[0][0])
doc = self.db.get(doc['_id'], conflicts=True)
assert '_conflicts' in doc
revs = self.db.get(doc['_id'], open_revs='all')
assert len(revs) == 2
def test_bulk_update_bad_doc(self):
self.assertRaises(TypeError, self.db.update, [object()])
def test_copy_doc(self):
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', 'bar')
self.assertEqual(result, self.db['bar'].rev)
def test_copy_doc_conflict(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
self.assertRaises(http.ResourceConflict, self.db.copy, 'foo', 'bar')
def test_copy_doc_overwrite(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', self.db['bar'])
doc = self.db['bar']
self.assertEqual(result, doc.rev)
self.assertEqual('testing', doc['status'])
def test_copy_doc_srcobj(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy(self.db['foo'], 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_destobj_norev(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy('foo', {'_id': 'bar'})
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db.copy(DictLike(self.db['foo']), 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_dest_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db['bar'] = {}
self.db.copy('foo', DictLike(self.db['bar']))
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_baddoc(self):
self.assertRaises(TypeError, self.db.copy, object(), 'bar')
def test_copy_doc_dest_baddoc(self):
self.assertRaises(TypeError, self.db.copy, 'foo', object())
def test_changes(self):
self.db['foo'] = {'bar': True}
self.assertEqual(self.db.changes(since=0)['last_seq'], 1)
first = next(self.db.changes(feed='continuous'))
self.assertEqual(first['seq'], 1)
self.assertEqual(first['id'], 'foo')
def test_changes_releases_conn(self):
# Consume an entire changes feed to read the whole response, then check
# that the HTTP connection made it to the pool.
list(self.db.changes(feed='continuous', timeout=0))
scheme, netloc = util.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_releases_conn_when_lastseq(self):
# Consume a changes feed, stopping at the 'last_seq' item, i.e. don't
# let the generator run any further, then check the connection made it
# to the pool.
for obj in self.db.changes(feed='continuous', timeout=0):
if 'last_seq' in obj:
break
scheme, netloc = util.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_conn_usable(self):
# Consume a changes feed to get a used connection in the pool.
list(self.db.changes(feed='continuous', timeout=0))
# Try using the connection again to make sure the connection was left
# in a good state from the previous request.
self.assertTrue(self.db.info()['doc_count'] == 0)
def test_changes_heartbeat(self):
def wakeup():
time.sleep(.3)
self.db.save({})
threading.Thread(target=wakeup).start()
for change in self.db.changes(feed='continuous', heartbeat=100):
break
def test_purge(self):
doc = {'a': 'b'}
self.db['foo'] = doc
self.assertEqual(self.db.purge([doc])['purge_seq'], 1)
def test_json_encoding_error(self):
doc = {'now': datetime.now()}
self.assertRaises(TypeError, self.db.save, doc)
def test_security(self):
security = self.db.security
self.assertEqual(security, {})
security['members'] = {'names': ['test'], 'roles': []}
self.db.security = security
class ViewTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_row_object(self):
row = list(self.db.view('_all_docs', keys=['blah']))[0]
self.assertEqual(row.id, None)
self.assertEqual(row.key, 'blah')
self.assertEqual(row.value, None)
self.assertEqual(row.error, 'not_found')
self.db.save({'_id': 'xyz', 'foo': 'bar'})
row = list(self.db.view('_all_docs', keys=['xyz']))[0]
self.assertEqual(row.id, 'xyz')
self.assertEqual(row.key, 'xyz')
self.assertEqual(list(row.value.keys()), ['rev'])
self.assertEqual(row.error, None)
def test_view_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
res = list(self.db.view('test/multi_key', keys=list(range(1, 6, 2))))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_ddoc_info(self):
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'test': {'map': 'function(doc) { emit(doc.type, null); }'}
}
}
info = self.db.info('test')
self.assertEqual(info['view_index']['compact_running'], False)
def test_view_compaction(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
self.assertTrue(self.db.compact('test'))
def test_view_cleanup(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
ddoc = self.db['_design/test']
ddoc['views'] = {
'ids': {'map': 'function(doc) { emit(doc._id, null); }'}
}
self.db.update([ddoc])
self.db.view('test/ids')
self.assertTrue(self.db.cleanup())
def test_view_function_objects(self):
if 'python' not in self.server.config()['query_servers']:
return
for i in range(1, 4):
self.db.save({'i': i, 'j':2*i})
def map_fun(doc):
yield doc['i'], doc['j']
res = list(self.db.query(map_fun, language='python'))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1,4)):
self.assertEqual(i, res[idx].key)
self.assertEqual(2*i, res[idx].value)
def reduce_fun(keys, values):
return sum(values)
res = list(self.db.query(map_fun, reduce_fun, 'python'))
self.assertEqual(1, len(res))
self.assertEqual(12, res[0].value)
def test_init_with_resource(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEqual(len(list(view())), 1)
def test_iter_view(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEqual(len(list(view)), 1)
def test_update_seq(self):
self.db['foo'] = {}
rows = self.db.view('_all_docs', update_seq=True)
self.assertEqual(rows.update_seq, 1)
def test_tmpview_repr(self):
mapfunc = "function(doc) {emit(null, null);}"
view = client.TemporaryView(self.db.resource('_temp_view'), mapfunc)
self.assertTrue('TemporaryView' in repr(view))
self.assertTrue(mapfunc in repr(view))
def test_wrapper_iter(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(list(self.db.view('_all_docs', wrapper=Wrapper))[0], Wrapper))
def test_wrapper_rows(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(self.db.view('_all_docs', wrapper=Wrapper).rows[0], Wrapper))
def test_properties(self):
for attr in ['rows', 'total_rows', 'offset']:
self.assertTrue(getattr(self.db.view('_all_docs'), attr) is not None)
def test_rowrepr(self):
self.db['foo'] = {}
rows = list(self.db.query("function(doc) {emit(null, 1);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' in repr(rows[0]))
rows = list(self.db.query("function(doc) {emit(null, 1);}", "function(keys, values, combine) {return sum(values);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' not in repr(rows[0]))
class ShowListTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
show_func = """
function(doc, req) {
return {"body": req.id + ":" + (req.query.r || "<default>")};
}
"""
list_func = """
function(head, req) {
start({headers: {'Content-Type': 'text/csv'}});
if (req.query.include_header) {
send('id' + '\\r\\n');
}
var row;
while (row = getRow()) {
send(row.id + '\\r\\n');
}
}
"""
design_doc = {'_id': '_design/foo',
'shows': {'bar': show_func},
'views': {'by_id': {'map': "function(doc) {emit(doc._id, null)}"},
'by_name': {'map': "function(doc) {emit(doc.name, null)}"}},
'lists': {'list': list_func}}
def setUp(self):
super(ShowListTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': '1', 'name': 'one'}, {'_id': '2', 'name': 'two'}])
def test_show_urls(self):
self.assertEqual(self.db.show('_design/foo/_show/bar')[1].read(), b'null:<default>')
self.assertEqual(self.db.show('foo/bar')[1].read(), b'null:<default>')
def test_show_docid(self):
self.assertEqual(self.db.show('foo/bar')[1].read(), b'null:<default>')
self.assertEqual(self.db.show('foo/bar', '1')[1].read(), b'1:<default>')
self.assertEqual(self.db.show('foo/bar', '2')[1].read(), b'2:<default>')
def test_show_params(self):
self.assertEqual(self.db.show('foo/bar', r='abc')[1].read(), b'null:abc')
def test_list(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id')[1].read(), b'1\r\n2\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_id', include_header='true')[1].read(), b'id\r\n1\r\n2\r\n')
def test_list_keys(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id', keys=['1'])[1].read(), b'1\r\n')
def test_list_view_params(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_name', startkey='o', endkey='p')[1].read(), b'1\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_name', descending=True)[1].read(), b'2\r\n1\r\n')
class UpdateHandlerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
update_func = """
function(doc, req) {
if (!doc) {
if (req.id) {
return [{_id : req.id}, "new doc"]
}
return [null, "empty doc"];
}
doc.name = "hello";
return [doc, "hello doc"];
}
"""
design_doc = {'_id': '_design/foo',
'language': 'javascript',
'updates': {'bar': update_func}}
def setUp(self):
super(UpdateHandlerTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': 'existed', 'name': 'bar'}])
def test_empty_doc(self):
self.assertEqual(self.db.update_doc('foo/bar')[1].read(), b'empty doc')
def test_new_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'new')[1].read(), b'new doc')
def test_update_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'existed')[1].read(), b'hello doc')
class ViewIterationTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
num_docs = 100
def docfromnum(self, num):
return {'_id': util.utype(num), 'num': int(num / 2)}
def docfromrow(self, row):
return {'_id': row['id'], 'num': row['key']}
def setUp(self):
super(ViewIterationTestCase, self).setUp()
design_doc = {'_id': '_design/test',
'views': {'nums': {'map': 'function(doc) {emit(doc.num, null);}'},
'nulls': {'map': 'function(doc) {emit(null, null);}'}}}
self.db.save(design_doc)
self.db.update([self.docfromnum(num) for num in range(self.num_docs)])
def test_allrows(self):
rows = list(self.db.iterview('test/nums', 10))
self.assertEqual(len(rows), self.num_docs)
self.assertEqual([self.docfromrow(row) for row in rows],
[self.docfromnum(num) for num in range(self.num_docs)])
def test_batchsizes(self):
# Check silly _batch values.
self.assertRaises(ValueError, lambda: next(self.db.iterview('test/nums', 0)))
self.assertRaises(ValueError, lambda: next(self.db.iterview('test/nums', -1)))
# Test various _batch sizes that are likely to cause trouble.
self.assertEqual(len(list(self.db.iterview('test/nums', 1))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', int(self.num_docs / 2)))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs * 2))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs - 1))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs + 1))), self.num_docs)
def test_batchsizes_with_skip(self):
self.assertEqual(
len(list(self.db.iterview('test/nums', self.num_docs // 10, skip=self.num_docs // 2))),
self.num_docs // 2)
def test_limit(self):
# limit=0 doesn't make sense for iterview.
self.assertRaises(ValueError, lambda: next(self.db.iterview('test/nums', 10, limit=0)))
# Test various limit sizes that are likely to cause trouble.
for limit in [1, int(self.num_docs / 4), self.num_docs - 1, self.num_docs,
self.num_docs + 1]:
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, limit=limit)],
[self.docfromnum(x) for x in range(min(limit, self.num_docs))])
# Test limit same as batch size, in case of weird edge cases.
limit = int(self.num_docs / 4)
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', limit, limit=limit)],
[self.docfromnum(x) for x in range(limit)])
def test_descending(self):
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, descending=True)],
[self.docfromnum(x) for x in range(self.num_docs - 1, -1, -1)])
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, limit=int(self.num_docs / 4), descending=True)],
[self.docfromnum(x) for x in range(self.num_docs - 1, int(self.num_docs * 3 / 4) - 1, -1)])
def test_startkey(self):
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, startkey=int(self.num_docs / 2) - 1)],
[self.docfromnum(x) for x in range(self.num_docs - 2, self.num_docs)])
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, startkey=1, descending=True)],
[self.docfromnum(x) for x in range(3, -1, -1)])
def test_nullkeys(self):
self.assertEqual(len(list(self.db.iterview('test/nulls', 10))), self.num_docs)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ServerTestCase, 'test'))
suite.addTest(unittest.makeSuite(DatabaseTestCase, 'test'))
suite.addTest(unittest.makeSuite(ViewTestCase, 'test'))
suite.addTest(unittest.makeSuite(ShowListTestCase, 'test'))
suite.addTest(unittest.makeSuite(UpdateHandlerTestCase, 'test'))
suite.addTest(unittest.makeSuite(ViewIterationTestCase, 'test'))
suite.addTest(testutil.doctest_suite(client))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
zip_crack-GIL-Thread-Effect.py
|
#!/usr/bin/python
# Created by c@caine
# On: 13/02/2017
# --- Preamble --- #
import sys
import zipfile
from threading import Thread
# --- Declarations --- #
zip_file = sys.argv[1]
unzip_me = zipfile.ZipFile(zip_file)
word_list = sys.argv[2]
# --- Functions --- #
def extract_file (unzip_me, password):
try:
unzip_me.extractall(pwd = password)
print('[+] Password = ' + password + '\n')
except:
pass
def main ():
with open (word_list) as pass_file:
for line in pass_file.readlines():
password = line.strip('\n')
t = Thread (target=extract_file, args=(unzip_me, password))
t.start()
# --- Main --- #
if __name__ == "__main__":
main()
|
musiccontrol.py
|
# Music Control...
# Music Player:
# from glob import glob
from re import I
from tkinter import *
from tkinter import filedialog
from traceback import *
from win32com.client import Dispatch
import time,eyed3,threading
import os, sys
from sys import *
from os import *
import time
# from time import *
# do not import time *, or there will be errors, although i do not know why, bu do not do this....
# global variables.
name = []
list_name = None
var_scale = None
progress_scal = None
vio_scale = None
root = None
def openfile(index = [1]):
"""
open the files.
return none.
"""
global total,name, list_name, wmp
filenames = filedialog.askopenfilenames(title = "SelectingMusicPlayer.",filetypes =[("mp3Files.","*.mp3"),("WMAFiles.","*.wma"),("WAVFiles.","*.wav")])
if filenames:
for i in range(len(filenames)):
media = wmp.newMedia(filenames[i])
wmp.currentPlaylist.appendItem(media)
print(filenames[i])
coco = eyed3.load(filenames[i])
# eyed3 module read mp3 messages..
total = int(coco.info.time_secs)
minute = int(coco.info.time_secs)//60
sec = int(coco.info.time_secs)%60
length = int(coco.info.time_secs)
name = filenames[i].split("/")
i =index[-1]
list_name.insert(END,str(i)+"."+name[-1])
list_name.insert(END," "*6)
if sec >=10:
list_name.insert(END,"0%d:%d" %(minute,sec)+ "\n")
else:
list_name.insert(END,"0%s:0%d" %(minute,sec)+ "\n")
i = i +1
index.append(i)
def play(event = None):
"""
play the song.
return none.
"""
global wmp
# root.title("%s" % name[-1])
per_thread = threading.Thread(target = per)
per_thread.daemnon = True
wmp.controls.play()
per_thread.start()
# print(wmp.currentMedia.duration)
def per():
"""
reset the processing.
return none.
"""
global total, progress_scal, root, wmp
while wmp.playState !=1:
progress_scal.set(int(wmp.controls.currentPosition))
progress_scal.config(label = wmp.controls.currentPositionString)
progress_scal.config(to = total,tickinterval = 50)
time.sleep(1)
root.title("%s" % wmp.currentMedia.name)
def stop():
"""
stop the player.
return none.
"""
global wmp
wmp.controls.stop()
def pause(event = None):
"""
pause the player.
return none.
"""
global wmp
wmp.controls.pause()
def uselist():
"""
none.
return none.
"""
pass
def fullscr():
"""
none.
return none.
"""
pass
def exitit():
"""
destroy the ui player.
return none.
"""
global root
root.destroy()
def Previous_it():
"""
the prior song.
return none.
"""
global wmp
wmp.controls.previous()
def Next_it():
"""
the next song.
return none.
"""
global wmp
wmp.controls.next()
def Volume_ctr(none):
"""
get the volume.
return none.
"""
global wmp
wmp.settings.Volume = vio_scale.get()
def Volume_add(i=[0]):
"""
volume up.
return none.
"""
global vio_scale, wmp
wmp.settings.Volume =wmp.settings.Volume+5
i.append(wmp.settings.Volume)
vio_scale.set(wmp.settings.Volume)
def Volume_minus(i=[0]):
"""
volume down.
return none.
"""
global vio_scale, wmp
wmp.settings.Volume = wmp.settings.Volume -5
i.append(wmp.settings.Volume)
vio_scale.set(wmp.settings.Volume)
def Scale_ctr(none):
"""
get the current position.
return none.
"""
global wmp
wmp.controls.currentPosition = var_scale.get()
print(wmp.currentMedia.duration)
def Clear_list():
"""
clear the song list.
return none.
"""
global list_name, name, index, wmp
wmp.currentPlaylist.clear()
list_name.delete(1.0,END)
name = []
index = []
def List_random():
"""
random playing.
return none.
"""
global wmp
wmp.settings.setMode("shuffle",True)
play()
def List_loop():
"""
loop playing.
return none.
"""
global wmp
wmp.settings.setMode("loop",True)
play()
# global variable...
wmp = None
# a global variable.
def main_music_playing():
"""
the method to control the playing of the music..
just like the main method.....
"""
global wmp, list_name, var_scale, name, progress_scal, vio_scale, root
root =Tk()
root.title('MusicPlayer.')
root.geometry("704x467")
# set the size of the video player.
wmp = Dispatch("WMPlayer.OCX")
canvas = Canvas(root,width =220,height = 150,bg = "#DCDCDC")
img = PhotoImage(file =os.getcwd() + '\\\\pythonfiles\\\\music_control\\\\111.jpg')
canvas.create_image((110,77),image = img)
canvas.place(x=0,y=0)
canvas.coords(img,100,50)
canvas.grid(row =0,column = 0,sticky = "nw",rowspan =2)
progress_lab = LabelFrame(root,text = "PlayingProcess.")
progress_lab.grid(row =2,column =0,sticky = "we",rowspan = 2)
var_scale = DoubleVar()
progress_scal = Scale(progress_lab,orient = HORIZONTAL,showvalue = 0,length =180,variable = var_scale)
progress_scal.bind("<Button-1>",pause)
progress_scal.bind("")
progress_scal.bind("<ButtonRelease-1>",play)
progress_scal.grid(row =3,column =0)
modee_lab = LabelFrame(root,text = "PlayingMethod.")
modee_lab.grid(row =4,column =0,rowspan =2,sticky = "ws")
var_mode = IntVar()
randomradio = Radiobutton(modee_lab,variable = var_mode,value = 1,text ="RandomPlay.",command =List_random )
randomradio.grid(row =4,column =2)
# below the two are bonded..
inturnradio = Radiobutton(modee_lab,variable = var_mode,value =2,text= "OneByOnePlay.",command = play)
inturnradio.grid(row=4,column =3)
alloop = Radiobutton(modee_lab,variable = var_mode,value =2,text = "AllCirclePlay.",command = List_loop)
alloop.grid(row =5,column = 2)
sinloop = Radiobutton(modee_lab,variable = var_mode,value =3,text = "OneCirclePlay.")
sinloop.grid(row =5,column =3)
previous_play = Button(modee_lab,text = "<<=PriorSong",command = Previous_it)
previous_play.grid(row =6,column =2,rowspan =2,pady =10)
next_play = Button(modee_lab,text = "NextSong=>>",command = Next_it)
next_play.grid(row =6,column =3,rowspan =2,pady =10)
var_volume = IntVar()
vioce_lab = LabelFrame(root,text = "VolumeControl.")
vioce_lab.grid(row =8,column =0,sticky = "wes")
vio_scale = Scale(vioce_lab,orient = HORIZONTAL,length =170,variable = var_volume,command =Volume_ctr)
vio_scale.set(30)
vio_scale.grid(row =8,column =0)
vio_plus = Button(vioce_lab,width =8,text = "Volume -",command =Volume_minus)
vio_plus.grid(row =9,column =0,sticky = "w")
vio_minus = Button(vioce_lab,width =8,text ="Volume +",command =Volume_add)
vio_minus.grid(row =9,column =0,sticky ="e")
ctr_lab = LabelFrame(root,text = "PlayingControl.",height =130)
ctr_lab.grid(row =0,column =1,rowspan =12,sticky = "ns")
btn_open = Button(ctr_lab,text ="SelectMusicFiles",width =15,command = openfile, height=3)
btn_open.grid(row=0,column =1)
btn_play = Button(ctr_lab,text ="PlayMusic",width =15,command = play, height=3)
btn_play.grid(row =1,column =1,pady =5)
btn_stop = Button(ctr_lab,text ="StopMusic",width =15,command = stop, height=3)
btn_stop.grid(row =2,column =1,pady =5)
btn_pause = Button(ctr_lab,text ="PauseMusic",width =15,command = pause, height=3)
btn_pause.grid(row =3,column =1,pady =5)
listdel_all = Button(ctr_lab,width =15,text = "ClearMusicList",command = Clear_list, height=3)
listdel_all.grid(row =8,column =1,sticky ="nw",pady =5)
min_btn = Button(ctr_lab,width =15,text = "MinmizeWindow",command = root.iconify, height=3)
min_btn.grid(row =13,column =1)
list_name = Text(root,height =28,width =50)
list_name.grid(row =0,column =2,sticky = "n",rowspan =6)
root.mainloop()
# Reference: https://www.jb51.net/article/86641.htm
# main - > test.
# if __name__ == "__main__":
# """
# test the method of controlling the music playing..
# main method..
# """
# need to run....
# main_music_playing()
#
# pass
|
pixiv.py
|
#!/usr/bin/env python3
"""
pixiv
Usage:
pixiv.py
pixiv.py <id>...
pixiv.py -r [-d | --date=<date>]
pixiv.py -u
Arguments:
<id> user_ids
Options:
-r Download by ranking
-d <date> --date <date> Target date
-u Update exist folder
-h --help Show this screen
-v --version Show version
Examples:
pixiv.py 7210261 1980643
pixiv.py -r -d 2016-09-24
"""
import datetime
import math
import os
import queue
import re
import sys
import threading
import time
import requests
from docopt import docopt
from tqdm import tqdm
from api import PixivApi
from i18n import i18n as _
from model import PixivIllustModel
_THREADING_NUMBER = 10
_finished_download = 0
_CREATE_FOLDER_LOCK = threading.Lock()
_PROGRESS_LOCK = threading.Lock()
_SPEED_LOCK = threading.Lock()
_Global_Download = 0
_error_count = {}
_fast_mode_size = 20
_MAX_ERROR_COUNT = 5
def get_default_save_path():
current_path = os.path.dirname(os.path.abspath(sys.argv[0]))
filepath = os.path.join(current_path, 'illustrations')
if not os.path.exists(filepath):
with _CREATE_FOLDER_LOCK:
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
os.makedirs(filepath)
return filepath
def get_speed(elapsed):
"""Get current download speed"""
with _SPEED_LOCK:
global _Global_Download
down = _Global_Download
_Global_Download = 0
speed = down / elapsed
if speed == 0:
return '%8.2f /s' % 0
units = [' B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit = math.floor(math.log(speed, 1024.0))
speed /= math.pow(1024.0, unit)
return '%6.2f %s/s' % (speed, units[unit])
def print_progress(max_size):
global _finished_download
pbar = tqdm(total=max_size)
last = 0
while _finished_download != max_size:
pbar.update(_finished_download - last)
last = _finished_download
time.sleep(0.5)
pbar.update(_finished_download - last)
pbar.close()
def download_file(url, filepath):
headers = {'Referer': 'http://www.pixiv.net/'}
r = requests.get(url, headers=headers, stream=True, timeout=PixivApi.timeout)
if r.status_code == requests.codes.ok:
total_length = r.headers.get('content-length')
if total_length:
data = []
for chunk in r.iter_content(1024 * 16):
data.append(chunk)
with _SPEED_LOCK:
global _Global_Download
_Global_Download += len(chunk)
with open(filepath, 'wb') as f:
list(map(f.write, data))
else:
raise ConnectionError('\r', _('Connection error: %s') % r.status_code)
def download_threading(download_queue):
global _finished_download
while not download_queue.empty():
illustration = download_queue.get()
filepath = illustration['path']
filename = illustration['file']
url = illustration['url']
count = _error_count.get(url, 0)
if count < _MAX_ERROR_COUNT:
if not os.path.exists(filepath):
with _CREATE_FOLDER_LOCK:
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
try:
download_file(url, filepath)
with _PROGRESS_LOCK:
_finished_download += 1
except Exception as e:
if count < _MAX_ERROR_COUNT:
print(_('%s => %s download error, retry') % (e, filename))
download_queue.put(illustration)
_error_count[url] = count + 1
else:
print(url, 'reach max retries, canceled')
with _PROGRESS_LOCK:
_finished_download += 1
download_queue.task_done()
def start_and_wait_download_threading(download_queue, count):
"""start download threading and wait till complete"""
progress_t = threading.Thread(target=print_progress, args=(count,))
progress_t.daemon = True
progress_t.start()
for i in range(_THREADING_NUMBER):
download_t = threading.Thread(target=download_threading, args=(download_queue,))
download_t.daemon = True
download_t.start()
progress_t.join()
download_queue.join()
def get_filepath(url, illustration, save_path='.', add_user_folder=False, add_rank=False):
"""return (filename,filepath)"""
if add_user_folder:
user_id = illustration.user_id
user_name = illustration.user_name
current_path = get_default_save_path()
cur_dirs = list(filter(os.path.isdir, [os.path.join(current_path, i) for i in os.listdir(current_path)]))
cur_user_ids = [os.path.basename(cur_dir).split()[0] for cur_dir in cur_dirs]
if user_id not in cur_user_ids:
dir_name = re.sub(r'[<>:"/\\|\?\*]', ' ', user_id + ' ' + user_name)
else:
dir_name = list(i for i in cur_dirs if os.path.basename(i).split()[0] == user_id)[0]
save_path = os.path.join(save_path, dir_name)
filename = url.split('/')[-1]
# name, ext = os.path.splitext(filename)
if add_rank:
# name = illustration.rank + ' - ' + name
filename = illustration.rank + ' - ' + filename
# filename = name + ' - ' + illustration.title + ext
filepath = os.path.join(save_path, filename)
return filename, filepath
def check_files(illustrations, save_path='.', add_user_folder=False, add_rank=False):
download_queue = queue.Queue()
index_list = []
count = 0
if illustrations:
last_i = -1
for index, illustration in enumerate(illustrations):
if not illustration.image_urls:
continue
elif illustration.type == 'ugoira':
continue
else:
for url in illustration.image_urls:
filename, filepath = get_filepath(url, illustration, save_path, add_user_folder, add_rank)
if os.path.exists(filepath):
continue
else:
if last_i != index:
last_i = index
index_list.append(index)
download_queue.put({'url': url, 'file': filename, 'path': filepath})
count += 1
return download_queue, count, index_list
def count_illustrations(illustrations):
return sum(len(i.image_urls) for i in illustrations)
def is_manga(illustrate):
return True if illustrate.is_manga or illustrate.type == 'manga' else False
def download_illustrations(user, data_list, save_path='.', add_user_folder=False, add_rank=False, skip_manga=False):
"""Download illustratons
Args:
user: PixivApi()
data_list: json
save_path: str, download path of the illustrations
add_user_folder: bool, whether put the illustration into user folder
add_rank: bool, add illustration rank at the beginning of filename
"""
illustrations = PixivIllustModel.from_data(data_list, user)
if skip_manga:
manga_number = sum([is_manga(i) for i in illustrations])
if manga_number:
print('skip', manga_number, 'manga')
illustrations = list(filter(lambda x: not is_manga(x), illustrations))
download_queue, count = check_files(illustrations, save_path, add_user_folder, add_rank)[0:2]
if count > 0:
print(_('Start download, total illustrations '), count)
global _finished_download, _Global_Download
_finished_download = 0
_Global_Download = 0
start_and_wait_download_threading(download_queue, count)
print()
else:
print(_('There is no new illustration need to download'))
def download_by_user_id(user, user_ids=None):
save_path = get_default_save_path()
if not user_ids:
user_ids = input(_('Input the artist\'s id:(separate with space)')).split(' ')
for user_id in user_ids:
print(_('Artists %s') % user_id)
data_list = user.get_user_illustrations(user_id)
download_illustrations(user, data_list, save_path, add_user_folder=True)
def download_by_ranking(user):
today = str(datetime.date.today())
save_path = os.path.join(get_default_save_path(), today + ' ranking')
data_list = user.get_ranking_illustrations(per_page=100, mode='daily')
download_illustrations(user, data_list, save_path, add_rank=True)
def download_by_history_ranking(user, date=''):
if not date:
date = input(_('Input the date:(eg:2015-07-10)'))
if not (re.search("^\d{4}-\d{2}-\d{2}", date)):
print(_('[invalid]'))
date = str(datetime.date.today())
save_path = os.path.join(get_default_save_path(), date + ' ranking')
data_list = user.get_ranking_illustrations(date=date, per_page=100, mode='daily')
download_illustrations(user, data_list, save_path, add_rank=True)
def artist_folder_scanner(user, user_id_list, save_path, final_list, fast):
while not user_id_list.empty():
user_info = user_id_list.get()
user_id = user_info['id']
folder = user_info['folder']
try:
per_page = 9999
if fast:
per_page = _fast_mode_size
data_list = user.get_user_illustrations(user_id, per_page=per_page)
if len(data_list) > 0:
file_path = os.path.join(save_path, folder, data_list[-1]['image_urls']['large'].split('/')[-1])
while not os.path.exists(file_path) and per_page <= len(data_list):
per_page += _fast_mode_size
data_list = user.get_user_illustrations(user_id, per_page=per_page)
file_path = os.path.join(save_path, folder, data_list[-1]['image_urls']['large'].split('/')[-1])
else:
data_list = user.get_user_illustrations(user_id, per_page=per_page)
illustrations = PixivIllustModel.from_data(data_list, user)
count, checked_list = check_files(illustrations, save_path, add_user_folder=True, add_rank=False)[1:3]
if len(sys.argv) < 2 or count:
try:
print(_('Artists %s [%s]') % (folder, count))
except UnicodeError:
print(_('Artists %s ?? [%s]') % (user_id, count))
with _PROGRESS_LOCK:
for index in checked_list:
final_list.append(data_list[index])
except Exception as e:
print(e)
user_id_list.task_done()
def update_exist(user, fast=True):
current_path = get_default_save_path()
final_list = []
user_id_list = queue.Queue()
for folder in os.listdir(current_path):
if os.path.isdir(os.path.join(current_path, folder)):
user_id = re.search('^(\d+) ', folder)
if user_id:
user_id = user_id.group(1)
user_id_list.put({'id': user_id, 'folder': folder})
for i in range(_THREADING_NUMBER):
scan_t = threading.Thread(target=artist_folder_scanner,
args=(user, user_id_list, current_path, final_list, fast,))
scan_t.daemon = True
scan_t.start()
user_id_list.join()
download_illustrations(user, final_list, current_path, add_user_folder=True)
def remove_repeat(user):
"""Delete xxxxx.img if xxxxx_p0.img exist"""
choice = input(_('Dangerous Action: continue?(y/n)'))
if choice == 'y':
illust_path = get_default_save_path()
for folder in os.listdir(illust_path):
if os.path.isdir(os.path.join(illust_path, folder)):
if re.search('^(\d+) ', folder):
path = os.path.join(illust_path, folder)
for file_name in os.listdir(path):
illustration_id = re.search('^\d+\.', file_name)
if illustration_id:
if os.path.isfile(os.path.join(path
, illustration_id.string.replace('.', '_p0.'))):
os.remove(os.path.join(path, file_name))
print('Delete', os.path.join(path, file_name))
def main():
user = PixivApi()
if len(sys.argv) > 1:
print(datetime.datetime.now().strftime('%X %x'))
ids = arguments['<id>']
is_rank = arguments['-r']
date = arguments['--date']
is_update = arguments['-u']
if ids:
download_by_user_id(user, ids)
elif is_rank:
if date:
date = date[0]
download_by_history_ranking(user, date)
else:
download_by_ranking(user)
elif is_update:
update_exist(user)
print(datetime.datetime.now().strftime('%X %x'))
else:
print(_(' Pixiv Downloader 2.4 ').center(77, '#'))
options = {
'1': download_by_user_id,
'2': download_by_ranking,
'3': download_by_history_ranking,
'4': update_exist,
'5': remove_repeat
}
while True:
print(_('Which do you want to:'))
for i in sorted(options.keys()):
print('\t %s %s' % (i, _(options[i].__name__).replace('_', ' ')))
choose = input('\t e %s \n:' % _('exit'))
if choose in [str(i) for i in range(1, len(options) + 1)]:
print((' ' + _(options[choose].__name__).replace('_', ' ') + ' ').center(60, '#') + '\n')
if choose == 4:
options[choose](user, False)
else:
options[choose](user)
print('\n' + (' ' + _(options[choose].__name__).replace('_', ' ') + _(' finished ')).center(60,
'#') + '\n')
elif choose == 'e':
break
else:
print(_('Wrong input!'))
if __name__ == '__main__':
arguments = docopt(__doc__, version='pixiv 2.4')
sys.exit(main())
|
axel.py
|
# axel.py
#
# Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom
#
# Based on an idea by Peter Thatcher, found on
# http://www.valuedlessons.com/2008/04/events-in-python.html
#
# This module is part of Axel and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Source: http://pypi.python.org/pypi/axel
# Docs: http://packages.python.org/axel
import sys, threading
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
class Event(object):
"""
Event object inspired by C# events. Handlers can be registered and
unregistered using += and -= operators. Execution and result are
influenced by the arguments passed to the constructor and += method.
from axel import Event
event = Event()
def on_event(*args, **kw):
return (args, kw)
event += on_event # handler registration
print(event(10, 20, y=30))
>> ((True, ((10, 20), {'y': 30}), <function on_event at 0x00BAA270>),)
event -= on_event # handler is unregistered
print(event(10, 20, y=30))
>> None
class Mouse(object):
def __init__(self):
self.click = Event(self)
self.click += self.on_click # handler registration
def on_click(self, sender, *args, **kw):
assert isinstance(sender, Mouse), 'Wrong sender'
return (args, kw)
mouse = Mouse()
print(mouse.click(10, 20))
>> ((True, ((10, 20), {}),
>> <bound method Mouse.on_click of <__main__.Mouse object at 0x00B6F470>>),)
mouse.click -= mouse.on_click # handler is unregistered
print(mouse.click(10, 20))
>> None
"""
def __init__(self, sender=None, asynch=False, exc_info=False, lock=None, threads=3, traceback=False):
""" Creates an event
asynch
if True handler's are executes asynchronous
exc_info
if True, result will contain sys.exc_info()[:2] on error
lock
threading.RLock used to synchronize execution
sender
event's sender. The sender is passed as the first argument to the
handler, only if is not None. For this case the handler must have
a placeholder in the arguments to receive the sender
threads
maximum number of threads that will be started
traceback
if True, the execution result will contain sys.exc_info()
on error. exc_info must be also True to get the traceback
hash = hash(handler)
Handlers are stored in a dictionary that has as keys the handler's hash
handlers = {
hash : (handler, memoize, timeout),
hash : (handler, memoize, timeout), ...
}
The execution result is cached using the following structure
memoize = {
hash : ((args, kw, result), (args, kw, result), ...),
hash : ((args, kw, result), ...), ...
}
The execution result is returned as a tuple having this structure
exec_result = (
(True, result, handler), # on success
(False, error_info, handler), # on error
(None, None, handler), ... # asynchronous execution
)
"""
self.asynchronous = asynch
self.exc_info = exc_info
self.lock = lock
self.sender = sender
self.threads = threads
self.traceback = traceback
self.handlers = {}
self.memoize = {}
def handle(self, handler):
""" Registers a handler. The handler can be transmitted together
with two arguments as a list or dictionary. The arguments are:
memoize
if True, the execution result will be cached in self.memoize
timeout
will allocate a predefined time interval for the execution
If arguments are provided as a list, they are considered to have
this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
handler_, memoize, timeout = self._extract(handler)
self.handlers[hash(handler_)] = (handler_, memoize, timeout)
return self
def unhandle(self, handler):
""" Unregisters a handler """
h, _, _ = self._extract(handler)
key = hash(h)
if not key in self.handlers:
raise ValueError('Handler "%s" was not found' % str(h))
del self.handlers[key]
return self
def fire(self, *args, **kw):
""" Stores all registered handlers in a queue for processing """
queue = Queue()
result = []
def _execute(*args, **kw):
""" Executes all handlers stored in the queue """
while True:
try:
# handler, memoize, timeout
h, m, t = self.handlers[queue.get()]
if isinstance(self.lock, threading._RLock):
self.lock.acquire() # synchronisation
try:
r = self._memoize(h, m, t, *args, **kw)
if not self.asynchronous:
result.append(tuple(r))
except:
if not self.asynchronous:
result.append((False, self._error(sys.exc_info()), h))
finally:
if isinstance(self.lock, threading._RLock):
self.lock.release()
if not self.asynchronous:
queue.task_done()
except Empty:
break
if self.handlers:
for _ in range(self._threads()):
t = threading.Thread(target=_execute, args=args, kwargs=kw)
t.daemon = True
t.start()
for handler in self.handlers:
queue.put(handler)
if self.asynchronous:
h, _, _ = self.handlers[handler]
result.append((None, None, h))
if not self.asynchronous:
queue.join()
return tuple(result) or None
def count(self):
""" Returns the count of registered handlers """
return len(self.handlers)
def clear(self):
""" Discards all registered handlers and cached results """
self.handlers.clear()
self.memoize.clear()
def _extract(self, queue_item):
""" Extracts a handler and handler's arguments that can be provided
as list or dictionary. If arguments are provided as list, they are
considered to have this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
if not queue_item:
raise ValueError('Invalid list of arguments')
handler = None
memoize = False
timeout = 0
if not isinstance(queue_item, (list, tuple, dict)):
handler = queue_item
elif isinstance(queue_item, (list, tuple)):
if len(queue_item) == 3:
handler, memoize, timeout = queue_item
elif len(queue_item) == 2:
handler, memoize, = queue_item
elif len(queue_item) == 1:
handler = queue_item
elif isinstance(queue_item, dict):
handler = queue_item.get('handler')
memoize = queue_item.get('memoize', False)
timeout = queue_item.get('timeout', 0)
return (handler, bool(memoize), float(timeout))
def _memoize(self, handler, memoize, timeout, *args, **kw):
""" Caches the execution result of successful executions
hash = hash(handler)
memoize = {
hash : ((args, kw, result), (args, kw, result), ...),
hash : ((args, kw, result), ...), ...
}
"""
if not isinstance(handler, Event) and self.sender is not None:
args = list(args)[:]
args.insert(0, self.sender)
if not memoize:
if timeout <= 0: # no time restriction
return [True, handler(*args, **kw), handler]
result = self._timeout(timeout, handler, *args, **kw)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): # error occurred
return [False, self._error(result), handler]
return [True, result, handler]
else:
hash_ = hash(handler)
if hash_ in self.memoize:
for args_, kwargs_, result in self.memoize[hash_]:
if args_ == args and kwargs_ == kw:
return [True, result, handler]
if timeout <= 0: # no time restriction
result = handler(*args, **kw)
else:
result = self._timeout(timeout, handler, *args, **kw)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): # error occurred
return [False, self._error(result), handler]
lock = threading.RLock()
lock.acquire()
try:
if hash_ not in self.memoize:
self.memoize[hash_] = []
self.memoize[hash_].append((args, kw, result))
return [True, result, handler]
finally:
lock.release()
def _timeout(self, timeout, handler, *args, **kw):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target=handler, args=args, kw=kw)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info()
def _threads(self):
""" Calculates maximum number of threads that will be started """
if self.threads < len(self.handlers):
return self.threads
return len(self.handlers)
def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info
return exc_info[:2]
return exc_info[1]
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = count
class spawn_thread(threading.Thread):
""" Spawns a new thread and returns the execution result """
def __init__(self, target, args=(), kw={}, default=None):
threading.Thread.__init__(self)
self._target = target
self._args = args
self._kwargs = kw
self.result = default
self.exc_info = None
def run(self):
try:
self.result = self._target(*self._args, **self._kwargs)
except:
self.exc_info = sys.exc_info()
finally:
del self._target, self._args, self._kwargs
|
tcr.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token="token")
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token="token")
ki.loginResult()
kk = LINETCR.LINE()
#kk.login(qr=True)
kk.login(token="token")
kk.loginResult()
kc = LINETCR.LINE()
#kc.login(qr=True)
kc.login(token="token")
kc.loginResult()
print "登入成功"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""DPK BOT
DPK BOT
[Id︎]
[Mid]
[Me︎]
[TL︎:「Text」]
[Mc 「mid」]
[K on/off]
[Join︎ on/off]
[Gcancel:︎「Number of people」]
[Group cancelalll︎]
[Leave︎ on/off]
[Add on/off]
[Share on/off]
[Message change:「text」]
[Message check]
[Confirm]
[Jam on/off]
[Change clock:「name」]
[Up]
[DPK join]
DPK BOT
[*] Command in the groups [*]
DPK BOT
[Curl]
[Ourl]
[url]
[url:「Group ID」]
[Invite:「mid」]
[Kick:「mid」]
[Ginfo]
[jointicket]
[Cancel]
[Gn 「group name」]
[Nk 「name」]
DPK BOT
[*] Command kicker only [*]
DPK BOT
[Bye]
[Kill ban]
[Kill 「@」]
[Ban 「@」] By Tag
[Unban 「@」] By Tag
[Ban︎] Share Contact
[Unban︎] Share Contact
[Banlist︎]
[Cek ban]
[DPK mid]
[DPK ︎invite:「mid」]
[DPK ︎rename:「name」]
[DPK ︎gift]
[Respo︎n]
[Bot cancel]
[Title:]
DPK BOT
DPK BOT"""
KAC=[cl,ki,kk,kc]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid]
admin=["uf4924f41633f72613bb0990f38a93da0"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":5},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"感謝您加我好友",
"lang":"JP",
"comment":"感謝您加我好友",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"DPK BOT",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protectionOn":True,
"atjointicket":False
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == profile.mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("DPK1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("DPK1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("DPK2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("DPK2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("DPK3 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("DPK3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "DPK1 kick " in msg.text:
midd = msg.text.replace("DPK1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "DPK2 kick " in msg.text:
midd = msg.text.replace("DPK2 kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "DPK3 kick " in msg.text:
midd = msg.text.replace("DPK3 kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "DPK1 invite " in msg.text:
midd = msg.text.replace("DPK1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "DPK2 invite " in msg.text:
midd = msg.text.replace("DPK2 invite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "DPK3 invite " in msg.text:
midd = msg.text.replace("DPK3 invite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["DPK1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif msg.text in ["DPK2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","DPK1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","DPK2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kk.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","DPK3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
kc.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","All gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK cancel","Bot cancel"]:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Ourl","Link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK1 ourl","DPK1 link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done BOSS")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK2 ourl","DPK2 link on"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done BOSS")
else:
kk.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK3 ourl","DPK3 link on"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done BOSS")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Curl","Link off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK1 curl","DPK1 link off"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done BOSS")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK2 curl","DPK2 link off"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done BOSS")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK3 curl","DPK3 link off"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done BOSS")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif "All mid" == msg.text:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif "DPK1 mid" == msg.text:
ki.sendText(msg.to,Amid)
elif "DPK2 mid" == msg.text:
kk.sendText(msg.to,Bmid)
elif "DPK3 mid" == msg.text:
kc.sendText(msg.to,Cmid)
elif msg.text in ["Wkwk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galon"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL:"]:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Cn "]:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["DPK1 rename "]:
string = msg.text.replace("DPK1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["DPK2 rename "]:
string = msg.text.replace("DPK2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Mc "]:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オン","Join on","Auto join:on","自動åƒåŠ ï¼šé–‹"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オフ","Join off","Auto join:off","自動åƒåŠ ï¼šé—œ"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒ç»ã€‚è¦æ—¶å¼€è¯·æŒ‡å®šäººæ•°å‘é€")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°ç»„用自动邀请拒ç»")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Set"]:
md = ""
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
if wait["atjointicket"] == True: md+=" Auto Join Group by Ticket : on\n"
else:md+=" Auto Join Group by Ticket : off\n"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id","群組全id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif "album remove→" in msg.text:
gid = msg.text.replace("album remove→","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•候語確èª"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é 留言:開"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["コメント:オフ","Comment on","Comment off","自動首é 留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Comment","留言確èª"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["DPK3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Jam on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam Update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
elif msg.text == "$set":
cl.sendText(msg.to, "Check sider")
ki.sendText(msg.to, "Check sider")
kk.sendText(msg.to, "Check sider")
kc.sendText(msg.to, "Check sider")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "$read":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "People who readed %s\nthat's it\n\nPeople who have ignored reads\n%sIt is abnormal ♪\n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "An already read point has not been set.\n「set」you can send ♪ read point will be created ♪")
#-----------------------------------------------
#-----------------------------------------------
elif msg.text in ["All join"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text in ["DPK1 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
elif msg.text in ["DPK2 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
#-----------------------------------------------
#.acceptGroupInvitationByTicket(msg.to,Ticket)
elif msg.text in ["DPK3 join"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
print "kicker ok"
G.preventJoinByTicket = True
kc.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["Bye all"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye 1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye 2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["DPK1 @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["DPK2 @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["DPK3 @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Tg","Tag all"]:
group = cl.getGroup(msg.to)
jw = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for rs in jw:
xname = cl.getContact(rs).displayName
xlen = int(len('x')+1)
akh = akh + xlen
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(rs)+"},"""
strt = strt + int(len('x')+3)
akh = akh + 2
cb2 += "@x \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'d'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-----------------------------------------------
elif msg.text in ["Kill"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kk.sendText(msg.to,"Fuck You")
kc.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Cleanse" in msg.text:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
ki.sendText(msg.to,"Just some casual cleansing ô")
kk.sendText(msg.to,"Group cleansed.")
kc.sendText(msg.to,"Fuck You All")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
kk.sendText(msg.to,"Not found.")
kc.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes BOS")
kk.sendText(msg.to,"Fuck You")
elif "Blacklist @ " in msg.text:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
k3.sendText(msg.to,"Succes BOS")
except:
ki.sendText(msg.to,"error")
elif "Ban @" in msg.text:
if msg.toType == 2:
print "[Ban]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found BOS")
kk.sendText(msg.to,"Not found BOS")
kc.sendText(msg.to,"Not found BOS")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes BOS")
ki.sendText(msg.to,"Succes BOS")
kk.sendText(msg.to,"Succes BOS")
kc.sendText(msg.to,"Succes BOS")
except:
ki.sendText(msg.to,"Error")
kk.sendText(msg.to,"Error")
kc.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found BOS")
kk.sendText(msg.to,"Not found BOS")
kc.sendText(msg.to,"Not found BOS")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes BOS")
ki.sendText(msg.to,"Succes BOS")
kk.sendText(msg.to,"Succes BOS")
kc.sendText(msg.to,"Succes BOS")
except:
ki.sendText(msg.to,"Succes BOS")
kk.sendText(msg.to,"Succes BOS")
kc.sendText(msg.to,"Succes BOS")
#-----------------------------------------------
elif msg.text in ["Test"]:
ki.sendText(msg.to,"Ok BOS double thumbs up")
kk.sendText(msg.to,"Ok BOS double thumbs up")
kc.sendText(msg.to,"Ok BOS double thumbs up")
#-----------------------------------------------
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
#-----------------------------------------------
elif msg.text in ["BOS say hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["BOS say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
kk.sendText(msg.to,"Hinata pekok Har Har")
kc.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["BOS say didik pekok"]:
ki.sendText(msg.to,"Didik pekok Har Har")
kk.sendText(msg.to,"Didik pekok Har Har")
kc.sendText(msg.to,"Didik pekok Har Har")
elif msg.text in ["BOS say bobo ah","Bobo dulu ah"]:
ki.sendText(msg.to,"Have a nice dream BOS Har Har")
kk.sendText(msg.to,"Have a nice dream BOS Har Har")
kc.sendText(msg.to,"Have a nice dream BOS Har Har")
elif msg.text in ["BOS say chomel pekok"]:
ki.sendText(msg.to,"Chomel pekok Har Har")
kk.sendText(msg.to,"Chomel pekok Har Har")
kc.sendText(msg.to,"Chomel pekok Har Har")
elif msg.text in ["#welcome"]:
ki.sendText(msg.to,"Selamat datang di Family Room")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
elif msg.text in ["Respon","respon"]:
ki.sendText(msg.to,"1")
kk.sendText(msg.to,"2")
kc.sendText(msg.to,"3")
#-----------------------------------------------
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
#------------------------------------------------------------------
elif "Steal home @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Steal home @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif "Steal dp @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Steal dp @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
ki.sendText(msg.to,"send contact")
kk.sendText(msg.to,"send contact")
kc.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
ki.sendText(msg.to,"send contact")
kk.sendText(msg.to,"send contact")
kc.sendText(msg.to,"send contact")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
ki.sendText(msg.to,"nothing")
kk.sendText(msg.to,"nothing")
kc.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
ki.sendText(msg.to,mc)
kk.sendText(msg.to,mc)
kc.sendText(msg.to,mc)
elif msg.text in ["Cek ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
ki.sendText(msg.to,"There was no blacklist user")
kk.sendText(msg.to,"There was no blacklist user")
kc.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
kk.kickoutFromGroup(msg.to,[jj])
kc.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist emang pantas tuk di usir")
ki.sendText(msg.to,"Blacklist emang pantas tuk di usir")
kk.sendText(msg.to,"Blacklist emang pantas tuk di usir")
kc.sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random:" in msg.text:
if msg.toType == 2:
strnum = msg.text.replace("random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "album→" in msg.text:
try:
albumtags = msg.text.replace("album→","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakec→" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakec→","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
verify.py
|
from tkinter import *
from tkinter.ttk import *
import time
from queue import Queue
from threading import Thread
from views import ViewHasBackButton
from statement import HistoryStatement
from rsa import RSAKeypair
from hash import sha256
from common import signed_message_to_bytes
ZEROS_32 = bytearray([0 for _ in range(32)])
# Returns a deserialised HistoryStatement or false
def verify_statement(statement, key, alleged_username, hashPrev=ZEROS_32):
# Decrypt it
decrypted = key.decrypt_signed(statement['payload'])
# If it fails, this is invalid
if decrypted == False:
return False
# Deserialise it
hs = HistoryStatement.from_bytes(decrypted)
# Make sure previous hash matches what's expected
if hashPrev != hs.hashPrev:
return False
# Make sure username matches what it should be
if alleged_username != hs.username:
return False
# Looks valid
return hs
# TODO: Error handling
def verify_history(app, file, queue):
# Get a list of users so we have all public keys on hand
app.ensure_authorised()
res = app.get('directory')
res = res.json()
if not res['success']:
raise Exception(res['message'])
public_keys = {}
for row in res['users']:
public_keys[row['name']] = RSAKeypair.deserialise(row['public_key'])
# Get all history statements
res = app.get('file/getHistory?id=%s' % file['id'])
res = res.json()
if not res['success']:
raise Exception(res['message'])
# For each one,
for index, statement in enumerate(res['file']['history']):
# Find the correct public key to use
key = public_keys[statement['alleged_username']]
if index > 0:
prev_statement = res['file']['history'][index - 1]
hs = verify_statement(statement, key, statement['alleged_username'],
sha256(signed_message_to_bytes(prev_statement['payload'])))
else:
hs = verify_statement(statement, key, statement['alleged_username'])
if hs != False:
# Add it to the queue so it's displayed
queue.put({
'valid': 'Y',
'user': hs.username,
'comment': hs.comment,
'time': statement['created_at']
})
else:
# It was invalid so add some row to the table saying so
queue.put({
'valid': 'N',
'user': '-',
'comment': '-',
'time': statement['created_at']
})
queue.put('END')
class VerifyHistoryView(ViewHasBackButton):
def __init__(self, app, frame, file):
self.app = app
self.frame = frame
self.file = file
# Initialise widgets
## Tree
self.tree = Treeview(self.frame, selectmode="browse")
self.tree['columns'] = ('time', 'user', 'valid')
### Column configuration
self.tree.column('#0', width=500, minwidth=100)
self.tree.heading('#0', text='Comment')
self.tree.column('time', minwidth=100)
self.tree.heading('time', text='Time')
self.tree.column('user', minwidth=100)
self.tree.heading('user', text='Username')
self.tree.column('valid', minwidth=100)
self.tree.heading('valid', text='Valid?')
self.tree.grid(row=0, column=0)
## Progress bar
self.progress_bar = Progressbar(self.frame, orient="horizontal", mode="indeterminate")
self.progress_bar.grid(row=1, column=0, sticky=N+S+E+W, pady=5)
## Back button
self.add_back_button(row=2, column=0)
# Start thread to verify history
self.queue = Queue()
self.worker_thread = Thread(target=verify_history, args=(self.app, self.file, self.queue), daemon=True)
self.worker_thread.start()
# Start polling the queue for new entries
self.scheduled_task = self.app.tk.after(20, self.poll_queue)
def poll_queue(self):
self.progress_bar['value'] = (self.progress_bar['value'] + 1) % 100
while not self.queue.empty():
item = self.queue.get()
if item == "END":
self.worker_thread.join()
del self.worker_thread
del self.queue
self.progress_bar.destroy()
del self.progress_bar
self.scheduled_task = None
return
self.tree.insert("", 'end', text=item['comment'], values=(
item['time'],
item['user'],
item['valid']
))
self.scheduled_task = self.app.tk.after(1000, self.poll_queue)
def back_home(self):
if self.scheduled_task != None:
self.app.tk.after_cancel(self.scheduled_task)
super().back_home()
|
__main__.py
|
# Copyright (c) 2020 Andika Wasisto
# Modified by Tobias Raayoni Last
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import threading
import json
import socket
import requests
from flask import Flask, request, Response
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from quanttp.data.qng_wrapper_linux import QngWrapperLinux
from quanttp.data.qng_wrapper_windows import QngWrapperWindows
app = Flask(__name__)
sockets = Sockets(app)
qng_wrapper = QngWrapperWindows() if (os.name == 'nt') else QngWrapperLinux()
def main():
# Commandline Arguments (servername, port)
argNo = len(sys.argv) - 1
if argNo < 2:
print("--------------------------------------------")
print("Please provide arguments: <servername> <port>")
print("--------------------------------------------")
else:
servername = sys.argv[1]
port = int(sys.argv[2])
ip = requests.get('https://api.ipify.org').text
id = str(qng_wrapper.deviceId())
print("----------------------------------------------------------------------------------------")
print("Serving Entropy from TRNG ", id, " as pod \"", servername, "\" on http://", ip, ":", port, "/api/...", sep='')
print("----------------------------------------------------------------------------------------")
register = requests.post('https://webhook.site/02e1d079-ab19-4e29-9e13-1aacb17161ad', data = {'name': servername, 'device': id, 'ip': ip, 'port': port })
print(register)
serve(servername, port, id)
def serve(servername, port, id):
# Original API ----------------------------------------------
@app.route('/api/randint32')
def randint32():
return Response(str(qng_wrapper.randint32()), content_type='text/plain')
@app.route('/api/randuniform')
def randuniform():
return Response(str(qng_wrapper.randuniform()), content_type='text/plain')
@app.route('/api/randnormal')
def randnormal():
return Response(str(qng_wrapper.randnormal()), content_type='text/plain')
@app.route('/api/randhex')
def randhex():
try:
length = int(request.args.get('length'))
if length < 1:
return Response('length must be greater than 0', status=400, content_type='text/plain')
return Response(qng_wrapper.randbytes(length).hex(), content_type='text/plain')
except (TypeError, ValueError) as e:
return Response(str(e), status=400, content_type='text/plain')
@app.route('/api/randbytes')
def randbytes():
try:
length = int(request.args.get('length'))
if length < 1:
return Response('length must be greater than 0', status=400, content_type='text/plain')
return Response(qng_wrapper.randbytes(length), content_type='application/octet-stream')
except (TypeError, ValueError) as e:
return Response(str(e), status=400, content_type='text/plain')
# JSON API ----------------------------------------------
@app.route('/api/json/randint32')
def randjsonint32():
try:
length = int(request.args.get('length'))
if length < 1:
return Response(json.dumps({"error": 'length must be greater than 0', "success":"false"}), status=400, content_type='text/plain')
int32array = []
for x in range(0, length):
int32array.append(qng_wrapper.randint32())
return Response(json.dumps({"server" : servername, "device": id, "type": "string", "format": "int32", "length":length, "data": int32array, "success": "true"}), content_type='text/plain')
except (TypeError, ValueError) as e:
return Response(json.dumps({"error": str(e), "success":"false"}), status=400, content_type='text/plain')
@app.route('/api/json/randuniform')
def randjsonuniform():
try:
length = int(request.args.get('length'))
if length < 1:
return Response(json.dumps({"error": 'length must be greater than 0', "success":"false"}), status=400, content_type='text/plain')
uniformarray = []
for x in range(0, length):
uniformarray.append(qng_wrapper.randuniform())
return Response(json.dumps({"server" : servername, "device": id, "type": "string", "format": "uniform", "length":length, "data": uniformarray, "success": "true"}), content_type='text/plain')
except (TypeError, ValueError) as e:
return Response(json.dumps({"error": str(e), "success":"false"}), status=400, content_type='text/plain')
@app.route('/api/json/randnormal')
def randjsonnormal():
try:
length = int(request.args.get('length'))
if length < 1:
return Response(json.dumps({"error": 'length must be greater than 0', "success":"false"}), status=400, content_type='text/plain')
normarray = []
for x in range(0, length):
normarray.append(qng_wrapper.randnormal())
return Response(json.dumps({"server" : servername, "device": id, "type": "string", "format": "normal", "length":length, "data": normarray, "success": "true"}), content_type='text/plain')
except (TypeError, ValueError) as e:
return Response(json.dumps({"error": str(e), "success":"false"}), status=400, content_type='text/plain')
@app.route('/api/json/randhex')
def randjsonhex():
try:
length = int(request.args.get('length'))
size = int(request.args.get('size'))
if length < 1:
return Response(json.dumps({"error": 'length must be greater than 0', "success":"false"}), status=400, content_type='text/plain')
if size < 1:
return Response(json.dumps({"error": 'size must be greater than 0', "success":"false"}), status=400, content_type='text/plain')
hexarray = []
for x in range(0, length):
hexarray.append(qng_wrapper.randbytes(size).hex())
return Response(json.dumps({"server" : servername, "device": id, "type": "string", "format": "hex", "length":length, "size": size, "data": hexarray, "success": "true"}), content_type='text/plain')
except (TypeError, ValueError) as e:
return Response(json.dumps({"error": str(e), "success":"false"}), status=400, content_type='text/plain')
@app.route('/api/clear')
def clear():
qng_wrapper.clear()
return Response(status=204)
# Websockets ----------------------------------------------
@sockets.route('/ws')
def ws(websocket):
subscribed = [False]
while not websocket.closed:
threading.Thread(target=handle_ws_message, args=(websocket.receive(), websocket, subscribed)).start()
def handle_ws_message(message, websocket, subscribed):
try:
split_message = message.strip().upper().split()
if split_message[0] == 'RANDINT32':
websocket.send(str(qng_wrapper.randint32()))
elif split_message[0] == 'RANDUNIFORM':
websocket.send(str(qng_wrapper.randuniform()))
elif split_message[0] == 'RANDNORMAL':
websocket.send(str(qng_wrapper.randnormal()))
elif split_message[0] == 'RANDBYTES':
length = int(split_message[1])
if length < 1:
raise ValueError()
websocket.send(qng_wrapper.randbytes(length))
elif split_message[0] == 'SUBSCRIBEINT32':
if not subscribed[0]:
subscribed[0] = True
while subscribed[0] and not websocket.closed:
websocket.send(str(qng_wrapper.randint32()))
elif split_message[0] == 'SUBSCRIBEUNIFORM':
if not subscribed[0]:
subscribed[0] = True
while subscribed[0] and not websocket.closed:
websocket.send(str(qng_wrapper.randuniform()))
elif split_message[0] == 'SUBSCRIBENORMAL':
if not subscribed[0]:
subscribed[0] = True
while subscribed[0] and not websocket.closed:
websocket.send(str(qng_wrapper.randnormal()))
elif split_message[0] == 'SUBSCRIBEBYTES':
chunk = int(split_message[1])
if chunk < 1:
raise ValueError()
if not subscribed[0]:
subscribed[0] = True
while subscribed[0] and not websocket.closed:
websocket.send(qng_wrapper.randbytes(chunk))
elif split_message[0] == 'SUBSCRIBEHEX':
chunk = int(split_message[1])
if chunk < 1:
raise ValueError()
if not subscribed[0]:
subscribed[0] = True
while subscribed[0] and not websocket.closed:
websocket.send(qng_wrapper.randbytes(chunk).hex())
elif split_message[0] == 'UNSUBSCRIBE':
subscribed[0] = False
websocket.send('UNSUBSCRIBED')
elif split_message[0] == 'CLEAR':
qng_wrapper.clear()
except (IndexError, ValueError, BlockingIOError):
pass
except Exception as e:
websocket.close(code=1011, message=str(e))
@app.errorhandler(Exception)
def handle_exception(e):
return Response(e.description, status=e.code, content_type='text/plain')
server = pywsgi.WSGIServer(('0.0.0.0', port), application=app, handler_class=WebSocketHandler)
server.serve_forever()
if __name__ == "__main__":
main()
|
recipe-577083.py
|
#! /usr/bin/env python
"""
silence.py
Peter Waller
March 2010
"""
from __future__ import with_statement
from contextlib import contextmanager, nested
from threading import Thread
from tempfile import mkdtemp
from os.path import join as pjoin
from os import (dup, fdopen, open as osopen, O_NONBLOCK, O_RDONLY, remove,
rmdir, mkfifo)
from fcntl import fcntl, F_GETFL, F_SETFL
from select import select
from sys import stdout, stderr
from ctypes import PyDLL, CDLL, c_void_p, c_char_p, py_object
pyapi = PyDLL(None)
this_exe = CDLL(None)
def make_fn(what, res, *args):
what.restype = res
what.argtypes = args
return what
FILE_p = c_void_p
PyFile_AsFile = make_fn(pyapi.PyFile_AsFile, FILE_p, py_object)
freopen = make_fn(this_exe.freopen, FILE_p, c_char_p, c_char_p, FILE_p)
@contextmanager
def fifo():
"""
Create a fifo in a temporary place.
"""
tmpdir = mkdtemp()
filename = pjoin(tmpdir, 'myfifo')
try:
mkfifo(filename)
except OSError, e:
print >>stderr, "Failed to create FIFO: %s" % e
raise
else:
yield filename
remove(filename)
rmdir(tmpdir)
def reader_thread_func(filename, filter_, real_stdout):
"""
Sit there, reading lines from the pipe `filename`, sending those for which
`filter_()` returns False to `real_stdout`
"""
with fdopen(osopen(filename, O_NONBLOCK | O_RDONLY)) as fd:
while True:
rlist, _, _ = select([fd], [], [])
line = fd.readline()
if not line:
break
elif not filter_(line):
real_stdout.write(line)
@contextmanager
def threaded_file_reader(*args):
"""
Operate a read_thread_func in another thread. Block with statement exit
until the function completes.
"""
reader_thread = Thread(target=reader_thread_func, args=args)
reader_thread.start()
try:
yield
finally:
reader_thread.join()
@contextmanager
def silence(filter_=lambda line: True, file_=stdout):
"""
Prevent lines matching `filter_` ending up on `file_` (defaults to stdout)
"""
if not filter_:
yield
return
saved_stdout = dup(file_.fileno())
stdout_file = PyFile_AsFile(file_)
with nested(fdopen(saved_stdout, "w"), fifo()) as (real_stdout, filename):
with threaded_file_reader(filename, filter_, real_stdout):
# Redirect stdout to pipe
freopen(filename, "w", stdout_file)
try:
yield
finally:
# Redirect stdout back to it's original place
freopen("/dev/fd/%i" % saved_stdout, "w", stdout_file)
def test():
def filter_stupid(line):
if line.startswith("Stupid"):
return True
print "Before with block.."
with silence(filter_stupid):
print "Stupid output from a C library I don't want to hear"
print "Sensible stuff!"
print "After the silence block"
if __name__ == "__main__":
test()
|
8_rms_wait_die_Speak.py
|
# Author Emeka Ugwuanyi Emmanuel
from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import threading
import ast
import time
import os
import psutil
import datetime as dt
import getpass as gp
import paho.mqtt.client as mqtt
from netifaces import interfaces, ifaddresses, AF_INET
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
memory = []
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
# received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_task_queue = [] # [(task_list,wait_time), ....]
thread_record = []
port = 65000
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
stop = 0
t_track = 1
shared_resource_lock = threading.Lock()
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(algo.memory_percent(), 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = pc.verbose_ping(host)
return round(rtt, 4)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
connect_client.subscribe('mec')
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
data = data[2:]
received_task = ast.literal_eval(data)
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
_client.publish(received_task.split('.')[2], str({received_task: get_time()}))
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
'''
else:
print('data: ', data)
elif data[0] == 't':
print('send: ', data[2:])
'''
def connect_to_broker():
global _client
global broker_ip
global topic
username = 'mec'
password = 'password'
broker_ip = 'localhost'
broker_port_no = 1883
topic = 'mec' # topic used to exchange mec details to clients
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_forever()
def load_tasks():
global tasks
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
tasks['idle'] = {'wcet': lcm_period, 'period': lcm_period + 1}
return lcm_period
def scheduler(_lcm_): # RMS algorithm
queue = list(tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
return rms
# generate execution sequence using wait_die algorithm
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', n_need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(p)]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # multi-casting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def algo_id():
no = int(os.path.basename(__file__)[0])
if no <= 2:
return 2
elif no <= 4:
return 3
elif no <= 7:
return 7
elif no <= 10:
return 10
elif no <= 13:
return 12
else:
return 16
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str(['speaker', ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
elif mg == 'update':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = mg + ' ' + str(ho)
sock1.sendto(str.encode(smg), _multicast_group)
# print('\n===**====**==update message sent===**======**=========')
elif mg == 'client':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = f'm {ho}_{algo_id()}'
_client.publish(topic, smg, retain=True)
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
global hosts
while True:
if stop == 1:
print('Stopped : receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
# print('received: ', hosts)
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
elif data.decode().strip() == 'user':
send_message('update')
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_client.publish(cloud_ip, str([i.split('_')[0], t_time[i.split('_')[0]][0]]))
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [j, t_time[j][0]]))
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(
round(mec_waiting_time[_host][-1] + (t_time[j][0]) / 2, 3)) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_client.publish(cloud_ip, str([j, t_time[j][0]]))
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
def execute_re_offloaded_task(offloaded_task):
exec_list = get_exec_seq(offloaded_task[0])
for i in exec_list:
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
time.sleep(t_time[j][0] / 2)
print('#' * ((local.index(i) + 1) * 3), ' Executed: ', i)
if j.split('.')[1] != node_id:
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
elif j.split('.')[1] == node_id:
# send_client({j: get_time()}, send_back_host)
_client.publish(j.split('.')[2], str({j: get_time()}))
print('============== EXECUTION DONE ===============')
def receive_offloaded_task_mec(): # run as a thread
global _inward_mec
global t_track
while True:
if stop == 1:
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and da[0] == node_id: # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
_client.publish(da[1].split('.')[2], str({da[1]: get_time()}))
elif (address[0] not in ip_set) and da[0] == 'ex' and da[1] == node_id:
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload():
global reoffload_list
while True:
if stop == 1:
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
time.sleep(1)
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+wait_die {}'.format(get_hostname())
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def run_me():
global discovering
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts['speaker']
discovering = 1
break
time.sleep(2)
speak = Thread(target=speaking_node)
thread_record.append(speak)
speak.daemon = True
speak.start()
start_loop()
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
s_port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, s_port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_abort():
global stop
_id_ = get_hostname()[-1]
result = f"wt{_id_}_10_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_10_{mec_no} = {mec_rtt} \ncpu{_id_}_10_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_10_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_10_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_10_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_10_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_10_{mec_no} = {deadlock} \nmemory{_id_}_10_{mec_no} = {memory}"
list_result = [
f"wt{_id_}_10_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_10_{mec_no} = {mec_rtt} \ncpu{_id_}_10_{mec_no} = {_cpu} ",
f"\no_mec{_id_}_10_{mec_no} = {_off_mec} \no_cloud{_id_}_10_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_10_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_10_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_10_{mec_no} = {deadlock} \nmemory{_id_}_10_{mec_no} = {memory}"
]
for i in list_result:
cmd = 'echo "{}" >> data.py'.format(i)
os.system(cmd)
send_result(hosts['osboxes-0'], list_result)
send_email(result)
stop += 1
'''
for i in thread_record:
i.join()
'''
_client.loop_stop()
time.sleep(1)
print('done')
os.system('kill -9 {}'.format(os.getpid()))
def start_loop():
global _loc
global tasks
global t_time
global node_id
global stop
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
_threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
for i in _threads_:
Thread(target=i).daemon = True
Thread(target=i).start()
time.sleep(2)
send_message('client') # send mec details to clients
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
a = load_tasks()
list_seq = get_exec_seq(scheduler(a))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.5)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=3):
print('terminating programme 3 mins elapsed')
save_and_abort()
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
save_and_abort()
break
def speaking_node():
global mec_no
while True:
if len(hosts) > (mec_no - 1):
send_message('update')
mec_no = len(hosts) + 1
time.sleep(2)
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print('Broker IP: ', s.getsockname()[0])
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h2 = Thread(target=receive_offloaded_task_mec)
h1.daemon = True
h2.daemon = True
h1.start()
h2.start()
time.sleep(1.5)
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
global algo
os.system('clear')
print('mec ip: ', ip_address())
algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
run_me()
if __name__ == "__main__":
main()
|
multiprocessor.py
|
from concurrent.futures import ThreadPoolExecutor
from .utils import CountUpDownLatch
import threading
import logging
import multiprocessing
import os
import logging.handlers
from .exceptions import FileNotFoundError
try:
from queue import Empty # Python 3
import _thread
except ImportError:
from Queue import Empty # Python 2
import thread
WORKER_THREAD_PER_PROCESS = 50
QUEUE_BUCKET_SIZE = 10
END_QUEUE_SENTINEL = [None, None]
GLOBAL_EXCEPTION = None
GLOBAL_EXCEPTION_LOCK = threading.Lock()
def monitor_exception(exception_queue, process_ids):
global GLOBAL_EXCEPTION
logger = logging.getLogger(__name__)
while True:
try:
local_exception = exception_queue.get(timeout=0.1)
if local_exception == END_QUEUE_SENTINEL:
break
logger.log(logging.DEBUG, "Setting global exception")
GLOBAL_EXCEPTION_LOCK.acquire()
GLOBAL_EXCEPTION = local_exception
GLOBAL_EXCEPTION_LOCK.release()
logger.log(logging.DEBUG, "Closing processes")
for p in process_ids:
p.terminate()
logger.log(logging.DEBUG, "Joining processes")
for p in process_ids:
p.join()
logger.log(logging.DEBUG, "Interrupting main")
raise Exception(local_exception)
except Empty:
pass
def log_listener_process(queue):
while True:
try:
record = queue.get(timeout=0.1)
queue.task_done()
if record == END_QUEUE_SENTINEL: # We send this as a sentinel to tell the listener to quit.
break
logger = logging.getLogger(record.name)
logger.handlers.clear()
logger.handle(record) # No level or filter logic applied - just do it!
except Empty: # Try again
pass
except Exception as e:
import sys, traceback
print('Problems in logging')
traceback.print_exc(file=sys.stderr)
def multi_processor_change_acl(adl, path=None, method_name="", acl_spec="", number_of_sub_process=None):
logger = logging.getLogger(__name__)
def launch_processes(number_of_processes):
if number_of_processes is None:
number_of_processes = max(2, multiprocessing.cpu_count() - 1)
process_list = []
for i in range(number_of_processes):
process_list.append(multiprocessing.Process(target=processor,
args=(adl, file_path_queue, finish_queue_processing_flag,
method_name, acl_spec, log_queue, exception_queue)))
process_list[-1].start()
return process_list
def walk(walk_path):
try:
paths = []
all_files = adl.ls(path=walk_path, detail=True)
for files in all_files:
if files['type'] == 'DIRECTORY':
dir_processed_counter.increment() # A new directory to process
walk_thread_pool.submit(walk, files['name'])
paths.append((files['name'], files['type'] == 'FILE'))
if len(paths) == QUEUE_BUCKET_SIZE:
file_path_queue.put(list(paths))
paths = []
if paths != []:
file_path_queue.put(list(paths)) # For leftover paths < bucket_size
except FileNotFoundError:
pass # Continue in case the file was deleted in between
except Exception:
import traceback
logger.exception("Failed to walk for path: " + str(walk_path) + ". Exiting!")
exception_queue.put(traceback.format_exc())
finally:
dir_processed_counter.decrement() # Processing complete for this directory
# Initialize concurrency primitives
log_queue = multiprocessing.JoinableQueue()
exception_queue = multiprocessing.Queue()
finish_queue_processing_flag = multiprocessing.Event()
file_path_queue = multiprocessing.JoinableQueue()
dir_processed_counter = CountUpDownLatch()
# Start relevant threads and processes
log_listener = threading.Thread(target=log_listener_process, args=(log_queue,))
log_listener.start()
child_processes = launch_processes(number_of_sub_process)
exception_monitor_thread = threading.Thread(target=monitor_exception, args=(exception_queue, child_processes))
exception_monitor_thread.start()
walk_thread_pool = ThreadPoolExecutor(max_workers=WORKER_THREAD_PER_PROCESS)
# Root directory needs to be explicitly passed
file_path_queue.put([(path, False)])
dir_processed_counter.increment()
# Processing starts here
walk(path)
if dir_processed_counter.is_zero(): # Done processing all directories. Blocking call.
walk_thread_pool.shutdown()
file_path_queue.close() # No new elements to add
file_path_queue.join() # Wait for operations to be done
logger.log(logging.DEBUG, "file path queue closed")
finish_queue_processing_flag.set() # Set flag to break loop of child processes
for child in child_processes: # Wait for all child process to finish
logger.log(logging.DEBUG, "Joining process: "+str(child.pid))
child.join()
# Cleanup
logger.log(logging.DEBUG, "Sending exception sentinel")
exception_queue.put(END_QUEUE_SENTINEL)
exception_monitor_thread.join()
logger.log(logging.DEBUG, "Exception monitor thread finished")
logger.log(logging.DEBUG, "Sending logger sentinel")
log_queue.put(END_QUEUE_SENTINEL)
log_queue.join()
log_queue.close()
logger.log(logging.DEBUG, "Log queue closed")
log_listener.join()
logger.log(logging.DEBUG, "Log thread finished")
def processor(adl, file_path_queue, finish_queue_processing_flag, method_name, acl_spec, log_queue, exception_queue):
logger = logging.getLogger(__name__)
removed_default_acl_spec = ",".join([x for x in acl_spec.split(',') if not x.lower().startswith("default")])
try:
logger.addHandler(logging.handlers.QueueHandler(log_queue))
logger.propagate = False # Prevents double logging
except AttributeError:
# Python 2 doesn't have Queue Handler. Default to best effort logging.
pass
try:
func_table = {"mod_acl": adl.modify_acl_entries, "set_acl": adl.set_acl, "rem_acl": adl.remove_acl_entries}
function_thread_pool = ThreadPoolExecutor(max_workers=WORKER_THREAD_PER_PROCESS)
adl_function = func_table[method_name]
logger.log(logging.DEBUG, "Started processor pid:"+str(os.getpid()))
def func_wrapper(func, path, spec):
try:
func(path=path, acl_spec=spec)
except FileNotFoundError:
logger.exception("File "+str(path)+" not found")
# Complete Exception is being logged in the relevant acl method. Don't print exception here
except Exception as e:
logger.exception("File " + str(path) + " not set. Exception "+str(e))
logger.log(logging.DEBUG, "Completed running on path:" + str(path))
while finish_queue_processing_flag.is_set() == False:
try:
file_paths = file_path_queue.get(timeout=0.1)
file_path_queue.task_done() # Will not be called if empty
for file_path in file_paths:
is_file = file_path[1]
if is_file:
spec = removed_default_acl_spec
else:
spec = acl_spec
logger.log(logging.DEBUG, "Starting on path:" + str(file_path))
function_thread_pool.submit(func_wrapper, adl_function, file_path[0], spec)
except Empty:
pass
except Exception as e:
import traceback
logger.exception("Exception in pid "+str(os.getpid())+"Exception: " + str(e))
exception_queue.put(traceback.format_exc())
finally:
function_thread_pool.shutdown() # Blocking call. Will wait till all threads are done executing.
logger.log(logging.DEBUG, "Finished processor pid: " + str(os.getpid()))
|
test_redrock.py
|
import redis
import time
import threading
import random
import threading
POOL = redis.ConnectionPool(host='127.0.0.1',
port='6379',
db=0,
decode_responses=True,
encoding='utf-8',
socket_connect_timeout=2)
LIST_VAL_LEN = 100
SET_VAL_LEN = 1000
HASH_VAL_LEN = 1000
ZSET_VAL_LEN = 100
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --save ""
# or
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb no --save "" --maxmemory-policy allkeys-random
# after success, run redis-cli, issue command 'rock keyreport'
# if you see how many keys in disk, you can check one rock key in the report by command 'get'
# the value for it is like '01234567....'
# try using 1, 2, 3, 4 million for the two situations
# when error exception: OOM command not allowed when used memory > 'maxmemory'
# in linux, you need add sudo and --bind 0.0.0.0
def _warm_up_with_string(max_keys: int = 1_000_000):
r = redis.StrictRedis(connection_pool=POOL)
r.flushall()
val = ''
for i in range(2_000):
val += str(i)
print(f'value length = {len(val)}')
start = time.time()
for i in range(max_keys):
if i % 100_000 == 0:
print(f"i = {i}, at time = {int(time.time())}")
r.set(i, val)
end = time.time()
print(f'Success! Warm up for total keys = {max_keys}, duration = {int(end-start)} seconds')
# after _warm_up_with_string, using this to check all key's value even with most value in rocksdb
def _check_all_key_in_string(max_keys: int = 1_000_000):
r = redis.StrictRedis(connection_pool=POOL)
val = ''
for i in range(2_000):
val += str(i)
start = time.time()
for i in range(max_keys):
if i % 100_000 == 0:
print(f'i = {i}, time = {int(time.time())}')
db_val = r.get(str(i))
if db_val is None:
print(f'None until {i}')
return
if db_val != val:
print(f'wrong value, key = {i}, db val = {db_val}')
return
end = time.time()
print(f'Success! all keys value check correct!!! latency = {int(end-start)} seconds, avg = {int(100000/(end-start))} rps')
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --save ""
# include every datatype
# string: 0123...1999 string value
# list: 0,1,100 list
# set: 0,1,1000
# hashset: field: 0, 1000, val: 0123..99
# Zset: score 0-999, field 0-999
def _warm_up_with_all_data_types(max_keys: int = 5_000):
r = redis.StrictRedis(connection_pool=POOL)
r.flushall()
string_val = ''
for i in range(2_000):
string_val += str(i)
hash_field_val = ''
for i in range(100):
hash_field_val += str(i)
start = time.time()
for i in range(max_keys):
if i % 1000 == 0:
print(f"i = {i}, at time = {int(time.time())}")
data_type = i % 8
if data_type is 0:
# String
r.set(i, string_val)
elif data_type is 1:
# list
for j in range(LIST_VAL_LEN):
r.lpush(i, j)
elif data_type is 2:
# set
for j in range(SET_VAL_LEN):
r.sadd(i, j)
elif data_type is 3:
# hash
for j in range(HASH_VAL_LEN):
r.hset(i, j, hash_field_val)
elif data_type is 4:
# zset
for j in range(ZSET_VAL_LEN):
r.zadd(i, {j:j})
elif data_type is 5:
# Geo
r.geoadd(i, 13.361389, 38.115556, "Palermo", 15.087269, 37.502669, "Catania")
elif data_type is 6:
# HyperLogLog
r.pfadd(i, 'a', 'b', 'c', 'd', 'e', 'f', 'g')
elif data_type is 7:
# Stream
r.xadd(i, fields={'field1':'value1', 'field2':'value2'})
else:
raise AssertionError
end = time.time()
print(f'Success! Warm up for total keys = {max_keys}, duration = {int(end-start)} seconds')
def _check_all_key_in_data_types(max_keys: int = 5_000):
r = redis.StrictRedis(connection_pool=POOL)
string_check = ''
for i in range(2_000):
string_check += str(i)
list_check = []
for i in range(LIST_VAL_LEN):
list_check.append(str(i))
list_check.sort()
set_check = set()
for i in range(SET_VAL_LEN):
set_check.add(str(i))
hash_field_val = ''
for i in range(100):
hash_field_val += str(i)
hash_check = dict()
for i in range(HASH_VAL_LEN):
hash_check[str(i)] = hash_field_val
zset_check = []
for i in range(ZSET_VAL_LEN):
zset_check.append((str(i), float(i)))
start = time.time()
for i in range(max_keys):
if i % 1_000 == 0:
print(f"i = {i}, at time = {int(time.time())}")
data_type = i % 8
if data_type is 0:
# String
val = r.get(i)
if val is None:
raise Exception("None for string")
if val != string_check:
raise Exception("String value wrong")
elif data_type is 1:
# list
val = r.lrange(i, 0, -1)
if val is None:
raise Exception("None for list")
val.sort()
if val != list_check:
raise Exception("List value wrong")
elif data_type is 2:
# set
val = r.smembers(i)
if val is None:
raise Exception("None for set")
if val != set_check:
raise Exception("Set value wrong")
elif data_type is 3:
# hash
val = r.hgetall(i)
if val is None:
raise Exception("None for hash")
if val != hash_check:
raise Exception("Hash value wrong")
elif data_type is 4:
# zset
val = r.zrange(i, 0, -1, withscores=True)
if val is None:
raise Exception("None for hash")
if val != zset_check:
raise Exception("Zset value wrong")
elif data_type is 5:
# Geo
val = r.geohash(i, 'Catania', 'Palermo')
if val is None:
raise Exception("None for Geo")
if val != ['sqdtr74hyu0', 'sqc8b49rny0']:
raise Exception("Geo value wrong")
elif data_type is 6:
# HyperLogLog
val = r.pfcount(i)
if val is None:
raise Exception("None for HyperLogLog")
if val != 7:
raise Exception("HyperLogLog value wrong")
elif data_type is 7:
# Stream
val = r.xrange(i)
if val is None:
raise Exception("None for Stream")
if val[0][1] != {'field1':'value1', 'field2':'value2'}:
raise Exception("HyperLogLog value wrong")
else:
raise AssertionError
end = time.time()
print(f'Success! all keys value with all types check correct!!! latency = {int(end-start)} seconds, avg = {int(100000/(end-start))} rps')
# please run _warm_up_with_string() first
def _check_pipeline(max_keys: int = 1_000_000):
r = redis.StrictRedis(connection_pool=POOL)
val = ''
for i in range(2_000):
val += str(i)
for _ in range(1_000):
with r.pipeline(transaction=False) as pipe:
for _ in range(100):
random_key = random.randint(0, max_keys-1)
pipe.get(random_key)
batch = pipe.execute()
for i in range(100):
if batch is None or batch[i] != val:
raise Exception("pipeline return wrong values")
print("Success for pipeline!")
# please run _warm_up_with_string() first
def _check_transaction(max_keys: int = 1_000_000):
r = redis.StrictRedis(connection_pool=POOL)
basic_val = ''
for i in range(2_000):
basic_val += str(i)
basic_len = len(basic_val)
for i in range(10_000):
if i % 1_000 == 0:
print(f"transction i = {i}, at time = {int(time.time())}")
with r.pipeline(transaction=True) as pipe:
random_key1 = random.randint(0, max_keys-1)
pipe.get(random_key1)
random_key2 = random.randint(0, max_keys-1)
pipe.get(random_key2)
pipe.append(random_key2, basic_val)
pipe.execute()
multi_count = 0
for i in range(max_keys):
if i % 10_000 == 0:
print(f"transaction check i = {i}, at time = {int(time.time())}")
val = r.get(i)
multi = int(len(val)/basic_len)
if multi != 1:
multi_count += 1
if val != basic_val*multi:
raise Exception("transaction error")
print(f"Success for transaction! multi count = {multi_count}")
# ./redis-server --maxmemory 100m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --save ""
def _warm_up_for_block(max_keys: int = 50_000):
r = redis.StrictRedis(connection_pool=POOL)
r.flushall()
string_val = ''
for i in range(2_000):
string_val += str(i)
list_val = []
for i in range(1_000):
list_val.append(i)
for i in range(max_keys):
if i % 1_000 == 0:
print(f'i = {i}, time = {int(time.time())}')
is_list = (i % 2 == 0)
if is_list:
r.rpush(i, *list_val)
else:
r.set(i, string_val)
# run _warm_up_for_block() first
def _check_block(max_keys: int = 50_000):
r = redis.StrictRedis(connection_pool=POOL)
for i in range(max_keys):
if i % 1_000 == 0:
print(f'i = {i}, time = {int(time.time())}')
is_list = (i % 2 == 0)
if is_list:
val = r.blpop(i)
if val is None or val != (str(i), '0'):
raise Exception(f"error for blpop, val = {val}")
print(f"Success for block!")
def _check_rdb_or_aof():
# first, run _warm_up_with_string() with
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --save ""
# then redis-cli, bgsave, it takes time (you need check the save success result, i.e. "DB saved on disk"
# and you can see two process named as redis-server, one with huge memory
# theh, ctrl-c (double)
# then, ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes
# then _check_all_key_in_string()
# repeat above but use save (not fork())
# for aof, use BGREWRITEAOF to create appendonly.aof, then
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --save "" --appendonly yes
pass
def _check_replication():
# run first redis-server,
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --save ""
# run _warm_up_with_string() to ingest data to the first redis-server
# run second redis-server (if same machine like me, use different port and different folder, check the follwing)
# ./redis-server --port 6380 --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb yes --rockdbdir /opt/redrock_rocksdb2/ --save ""
# redis-cli -p 6380 to connect to the second redis-server, use replicaof 127.0.0.1 6379 command
# it will take a long time ...
# use _check_all_key_in_string() but change the redis POOL config to use second port 6380 (you can test first 6379 also)
pass
# run _warm_up_with_string() first
def _check_lua1(max_keys: int = 1_000_000):
r = redis.StrictRedis(connection_pool=POOL)
check_val = ''
for i in range(2_000):
check_val += str(i)
script = """
local val = redis.call('GET', KEYS[1])
return val
"""
func = r.register_script(script)
for _ in range(10_000):
rand_key = str(random.randint(0, max_keys-1))
res = func(keys=[rand_key])
if res != check_val:
raise Exception("val not correct!")
print(f"Success for lua1!")
# run _warm_up_with_string() first
def _check_lua2(max_keys: int = 1_000_000):
r = redis.StrictRedis(connection_pool=POOL)
check_val = ''
for i in range(2_000):
check_val += str(i)
script = """
local vals = {}
local total = ARGV[1]
for i = 1, total, 1
do
local val = redis.call('GET', KEYS[i])
vals[i] = val
end
return vals
"""
lua_func = r.register_script(script)
TOTAL_THREAD_NUMBER = 10
thread_return_strings = []
for _ in range(TOTAL_THREAD_NUMBER):
thread_return_strings.append('')
def thread_func(tid: int, key: str):
r_thread = redis.StrictRedis(connection_pool=POOL)
thread_val = r_thread.get(key)
if thread_val is None:
raise Exception("thread failed for Nil")
thread_return_strings[tid] = thread_val
for _ in range(1000):
random_keys = []
for _ in range(TOTAL_THREAD_NUMBER):
random_keys.append(str(random.randint(0, max_keys-1)))
ts = []
for i in range(TOTAL_THREAD_NUMBER):
t = threading.Thread(target=thread_func, args=(i, random_keys[i],))
ts.append(t)
t.start()
lua_return_strings = lua_func(keys=random_keys, args=[TOTAL_THREAD_NUMBER])
for i in range(TOTAL_THREAD_NUMBER):
ts[i].join()
# check the value
for i in range(TOTAL_THREAD_NUMBER):
if lua_return_strings[i] != check_val:
raise Exception("lua return value not correct!")
if thread_return_strings[i] != check_val:
raise Exception("thread value not correct!")
print(f"Success for lua2!")
# run
# ./redis-server --maxmemory 200m --enable-rocksdb-feature yes --maxmemory-only-for-rocksdb no --save "" --maxmemory-policy allkeys-lfu
def _warm_lfu_for_eviction_check():
r = redis.StrictRedis(connection_pool=POOL)
r.flushall()
val = ''
for i in range(2_000):
val += str(i)
touch_keys = []
for i in range(1_000):
touch_keys.append(str(i))
def refresh_key_for_lfu():
r.touch(*touch_keys)
start = time.time()
for i in range(3_000_000):
if i >= 1_000 and i % 1_000 == 0:
refresh_key_for_lfu()
if i % 100_000 == 0:
print(f"i = {i}, at time = {int(time.time())}")
r.set(i, val)
end = time.time()
print(f'Success! Warm up for total keys = 3_000_000, duration = {int(end-start)} seconds')
# please run _warm_lru_lfu_for_eviction_check() first
def _check_lfu_for_eviction():
r = redis.StrictRedis(connection_pool=POOL)
val = ''
for i in range(2_000):
val += str(i)
# check
evict_total = 0
evict_count_lfu = 0
err_count = 0
for i in range(3_000_000):
if i % 100_000 == 0:
print(f'i = {i}, time = {int(time.time())}')
check_val = r.get(i)
if check_val is None:
if i < 1000:
evict_count_lfu += 1
evict_total += 1
elif check_val != val:
err_count += 1
print(f'eviction total = {evict_total}, eviction of lfu = {evict_total}, err = {err_count}')
print(f'eviction Per mille = {int(1000*evict_total/3_000_000)} , '
f'eviction lfu Per mille = {int(1000*evict_count_lfu/1_000)}')
def _main():
_warm_up_with_string()
_check_all_key_in_string()
#_warm_up_with_all_data_types()
#_check_all_key_in_data_types()
#_check_pipeline()
#_warm_up_for_block()
#_check_block()
#_check_transaction()
#_check_lua1()
#_check_lua2()
#_warm_lfu_for_eviction_check()
#_check_lfu_for_eviction()
pass
if __name__ == '__main__':
_main()
|
postproc.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.postproc - threaded post-processing of jobs
"""
import os
import logging
import sabnzbd
import xml.sax.saxutils
import functools
import time
import re
import queue
from sabnzbd.newsunpack import unpack_magic, par2_repair, external_processing, \
sfv_check, build_filelists, rar_sort
from threading import Thread
from sabnzbd.misc import on_cleanup_list
from sabnzbd.filesystem import real_path, get_unique_path, move_to_path, \
make_script_path, long_path, clip_path, renamer, remove_dir, globber, \
globber_full, set_permissions, cleanup_empty_directories, fix_unix_encoding, \
sanitize_and_trim_path, sanitize_files_in_folder, remove_file, recursive_listdir, setname_from_path, \
create_all_dirs, get_unique_filename
from sabnzbd.sorting import Sorter
from sabnzbd.constants import REPAIR_PRIORITY, TOP_PRIORITY, POSTPROC_QUEUE_FILE_NAME, \
POSTPROC_QUEUE_VERSION, sample_match, JOB_ADMIN, Status, VERIFIED_FILE
from sabnzbd.rating import Rating
import sabnzbd.emailer as emailer
import sabnzbd.dirscanner as dirscanner
import sabnzbd.downloader
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.nzbqueue
import sabnzbd.database as database
import sabnzbd.notifier as notifier
import sabnzbd.utils.rarfile as rarfile
import sabnzbd.utils.rarvolinfo as rarvolinfo
import sabnzbd.utils.checkdir
MAX_FAST_JOB_COUNT = 3
# Match samples
RE_SAMPLE = re.compile(sample_match, re.I)
class PostProcessor(Thread):
""" PostProcessor thread, designed as Singleton """
do = None # Link to instance of the thread
def __init__(self):
""" Initialize PostProcessor thread """
Thread.__init__(self)
# This history queue is simply used to log what active items to display in the web_ui
self.load()
if self.history_queue is None:
self.history_queue = []
# Fast-queue for jobs already finished by DirectUnpack
self.fast_queue = queue.Queue()
# Regular queue for jobs that might need more attention
self.slow_queue = queue.Queue()
# Load all old jobs
for nzo in self.history_queue:
self.process(nzo)
# Counter to not only process fast-jobs
self.__fast_job_count = 0
# State variables
self.__stop = False
self.__busy = False
self.paused = False
PostProcessor.do = self
def save(self):
""" Save postproc queue """
logging.info("Saving postproc queue")
sabnzbd.save_admin((POSTPROC_QUEUE_VERSION, self.history_queue), POSTPROC_QUEUE_FILE_NAME)
def load(self):
""" Save postproc queue """
self.history_queue = []
logging.info("Loading postproc queue")
data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME)
if data is None:
return
try:
version, history_queue = data
if POSTPROC_QUEUE_VERSION != version:
logging.warning(T('Old queue detected, use Status->Repair to convert the queue'))
elif isinstance(history_queue, list):
self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.downpath)]
except:
logging.info('Corrupt %s file, discarding', POSTPROC_QUEUE_FILE_NAME)
logging.info("Traceback: ", exc_info=True)
def delete(self, nzo_id, del_files=False):
""" Remove a job from the post processor queue """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
if nzo.status in (Status.FAILED, Status.COMPLETED):
nzo.to_be_removed = True
elif nzo.status in (Status.DOWNLOADING, Status.QUEUED):
self.remove(nzo)
nzo.purge_data(delete_all_data=del_files)
logging.info('Removed job %s from postproc queue', nzo.final_name)
nzo.work_name = '' # Mark as deleted job
break
def process(self, nzo):
""" Push on finished job in the queue """
if nzo not in self.history_queue:
self.history_queue.append(nzo)
# Fast-track if it has DirectUnpacked jobs or if it's still going
if nzo.direct_unpacker and (nzo.direct_unpacker.success_sets or not nzo.direct_unpacker.killed):
self.fast_queue.put(nzo)
else:
self.slow_queue.put(nzo)
self.save()
sabnzbd.history_updated()
def remove(self, nzo):
""" Remove given nzo from the queue """
try:
self.history_queue.remove(nzo)
except:
pass
self.save()
sabnzbd.history_updated()
def stop(self):
""" Stop thread after finishing running job """
self.__stop = True
self.slow_queue.put(None)
self.fast_queue.put(None)
def cancel_pp(self, nzo_id):
""" Change the status, so that the PP is canceled """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
nzo.abort_direct_unpacker()
if nzo.pp_active:
nzo.pp_active = False
return True
return None
def empty(self):
""" Return True if pp queue is empty """
return self.slow_queue.empty() and self.fast_queue.empty() and not self.__busy
def get_queue(self):
""" Return list of NZOs that still need to be processed """
return [nzo for nzo in self.history_queue if nzo.work_name]
def get_path(self, nzo_id):
""" Return download path for given nzo_id or None when not found """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
return nzo.downpath
return None
def run(self):
""" Postprocessor loop """
# First we do a dircheck
complete_dir = sabnzbd.cfg.complete_dir.get_path()
if sabnzbd.utils.checkdir.isFAT(complete_dir):
logging.warning(T('Completed Download Folder %s is on FAT file system, limiting maximum file size to 4GB') % complete_dir)
else:
logging.info("Completed Download Folder %s is not on FAT", complete_dir)
# Start looping
check_eoq = False
while not self.__stop:
self.__busy = False
if self.paused:
time.sleep(5)
continue
# Something in the fast queue?
try:
# Every few fast-jobs we should check allow a
# slow job so that they don't wait forever
if self.__fast_job_count >= MAX_FAST_JOB_COUNT and self.slow_queue.qsize():
raise queue.Empty
nzo = self.fast_queue.get(timeout=2)
self.__fast_job_count += 1
except queue.Empty:
# Try the slow queue
try:
nzo = self.slow_queue.get(timeout=2)
# Reset fast-counter
self.__fast_job_count = 0
except queue.Empty:
# Check for empty queue
if check_eoq:
check_eoq = False
handle_empty_queue()
# No fast or slow jobs, better luck next loop!
continue
# Stop job
if not nzo:
continue
# Job was already deleted.
if not nzo.work_name:
check_eoq = True
continue
# Flag NZO as being processed
nzo.pp_active = True
# Pause downloader, if users wants that
if cfg.pause_on_post_processing():
sabnzbd.downloader.Downloader.do.wait_for_postproc()
self.__busy = True
process_job(nzo)
if nzo.to_be_removed:
history_db = database.HistoryDB()
history_db.remove_history(nzo.nzo_id)
history_db.close()
nzo.purge_data()
# Processing done
nzo.pp_active = False
self.remove(nzo)
check_eoq = True
# Allow download to proceed
sabnzbd.downloader.Downloader.do.resume_from_postproc()
def process_job(nzo):
""" Process one job """
start = time.time()
# keep track of whether we can continue
all_ok = True
# keep track of par problems
par_error = False
# keep track of any unpacking errors
unpack_error = False
# Signal empty download, for when 'empty_postproc' is enabled
empty = False
nzb_list = []
# These need to be initialized in case of a crash
workdir_complete = ''
script_log = ''
script_line = ''
# Get the job flags
nzo.save_attribs()
flag_repair, flag_unpack, flag_delete = nzo.repair_opts
# Normalize PP
if flag_delete:
flag_unpack = True
if flag_unpack:
flag_repair = True
# Get the NZB name
filename = nzo.final_name
if nzo.fail_msg: # Special case: aborted due to too many missing data
nzo.status = Status.FAILED
nzo.save_attribs()
all_ok = False
par_error = True
unpack_error = 1
try:
# Get the folder containing the download result
workdir = nzo.downpath
tmp_workdir_complete = None
# if no files are present (except __admin__), fail the job
if all_ok and len(globber(workdir)) < 2:
if nzo.precheck:
_enough, ratio = nzo.check_availability_ratio()
req_ratio = float(cfg.req_completion_rate()) / 100.0
# Make sure that rounded ratio doesn't equal required ratio
# when it is actually below required
if (ratio < req_ratio) and (req_ratio - ratio) < 0.001:
ratio = req_ratio - 0.001
emsg = '%.1f%%' % (ratio * 100.0)
emsg2 = '%.1f%%' % float(cfg.req_completion_rate())
emsg = T('Download might fail, only %s of required %s available') % (emsg, emsg2)
else:
emsg = T('Download failed - Not on your server(s)')
empty = True
emsg += ' - https://sabnzbd.org/not-complete'
nzo.fail_msg = emsg
nzo.set_unpack_info('Fail', emsg)
nzo.status = Status.FAILED
# do not run unpacking or parity verification
flag_repair = flag_unpack = False
all_ok = cfg.empty_postproc() and empty
if not all_ok:
par_error = True
unpack_error = 1
script = nzo.script
logging.info('Starting Post-Processing on %s' +
' => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s',
filename, flag_repair, flag_unpack, flag_delete, script, nzo.cat)
# Set complete dir to workdir in case we need to abort
workdir_complete = workdir
# Par processing, if enabled
if all_ok and flag_repair:
par_error, re_add = parring(nzo, workdir)
if re_add:
# Try to get more par files
return False
# If we don't need extra par2, we can disconnect
if sabnzbd.nzbqueue.NzbQueue.do.actives(grabs=False) == 0 and cfg.autodisconnect():
# This was the last job, close server connections
sabnzbd.downloader.Downloader.do.disconnect()
# Sanitize the resulting files
if sabnzbd.WIN32:
sanitize_files_in_folder(workdir)
# Check if user allows unsafe post-processing
if flag_repair and cfg.safe_postproc():
all_ok = all_ok and not par_error
if all_ok:
# Fix encodings
fix_unix_encoding(workdir)
# Use dirs generated by direct-unpacker
if nzo.direct_unpacker and nzo.direct_unpacker.unpack_dir_info:
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = nzo.direct_unpacker.unpack_dir_info
else:
# Generate extraction path
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(nzo)
newfiles = []
# Run Stage 2: Unpack
if flag_unpack:
# Set the current nzo status to "Extracting...". Used in History
nzo.status = Status.EXTRACTING
logging.info("Running unpack_magic on %s", filename)
unpack_error, newfiles = unpack_magic(nzo, workdir, tmp_workdir_complete, flag_delete, one_folder, (), (), (), (), ())
logging.info("Unpacked files %s", newfiles)
if sabnzbd.WIN32:
# Sanitize the resulting files
newfiles = sanitize_files_in_folder(tmp_workdir_complete)
logging.info("Finished unpack_magic on %s", filename)
if cfg.safe_postproc():
all_ok = all_ok and not unpack_error
if all_ok:
# Move any (left-over) files to destination
nzo.status = Status.MOVING
nzo.set_action_line(T('Moving'), '...')
for root, _dirs, files in os.walk(workdir):
if not root.endswith(JOB_ADMIN):
for file_ in files:
path = os.path.join(root, file_)
new_path = path.replace(workdir, tmp_workdir_complete)
ok, new_path = move_to_path(path, new_path)
if new_path:
newfiles.append(new_path)
if not ok:
nzo.set_unpack_info('Unpack', T('Failed moving %s to %s') % (path, new_path))
all_ok = False
break
# Set permissions right
set_permissions(tmp_workdir_complete)
if all_ok and marker_file:
del_marker(os.path.join(tmp_workdir_complete, marker_file))
remove_from_list(marker_file, newfiles)
if all_ok:
# Remove files matching the cleanup list
cleanup_list(tmp_workdir_complete, True)
# Check if this is an NZB-only download, if so redirect to queue
# except when PP was Download-only
if flag_repair:
nzb_list = nzb_redirect(tmp_workdir_complete, nzo.final_name, nzo.pp, script, nzo.cat, priority=nzo.priority)
else:
nzb_list = None
if nzb_list:
nzo.set_unpack_info('Download', T('Sent %s to queue') % nzb_list)
cleanup_empty_directories(tmp_workdir_complete)
else:
cleanup_list(tmp_workdir_complete, False)
script_output = ''
script_ret = 0
if not nzb_list:
# Give destination its final name
if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
if all_ok:
try:
newfiles = rename_and_collapse_folder(tmp_workdir_complete, workdir_complete, newfiles)
except:
logging.error(T('Error renaming "%s" to "%s"'), clip_path(tmp_workdir_complete), clip_path(workdir_complete))
logging.info('Traceback: ', exc_info=True)
# Better disable sorting because filenames are all off now
file_sorter.sort_file = None
else:
workdir_complete = tmp_workdir_complete.replace('_UNPACK_', '_FAILED_')
workdir_complete = get_unique_path(workdir_complete, n=0, create_dir=False)
if empty:
job_result = -1
else:
job_result = int(par_error) + int(bool(unpack_error)) * 2
if cfg.ignore_samples():
remove_samples(workdir_complete)
# TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
if all_ok and file_sorter.sort_file:
if newfiles:
file_sorter.rename(newfiles, workdir_complete)
workdir_complete, ok = file_sorter.move(workdir_complete)
else:
workdir_complete, ok = file_sorter.rename_with_ext(workdir_complete)
if not ok:
nzo.set_unpack_info('Unpack', T('Failed to move files'))
all_ok = False
# Run the user script
script_path = make_script_path(script)
if (all_ok or not cfg.safe_postproc()) and (not nzb_list) and script_path:
# Set the current nzo status to "Ext Script...". Used in History
nzo.status = Status.RUNNING
nzo.set_action_line(T('Running script'), script)
nzo.set_unpack_info('Script', T('Running user script %s') % script, unique=True)
script_log, script_ret = external_processing(script_path, nzo, clip_path(workdir_complete),
nzo.final_name, job_result)
script_line = get_last_line(script_log)
if script_log:
script_output = nzo.nzo_id
if script_line:
nzo.set_unpack_info('Script', script_line, unique=True)
else:
nzo.set_unpack_info('Script', T('Ran %s') % script, unique=True)
else:
script = ""
script_line = ""
script_ret = 0
# Maybe bad script result should fail job
if script_ret and cfg.script_can_fail():
script_error = True
all_ok = False
nzo.fail_msg = T('Script exit code is %s') % script_ret
else:
script_error = False
# Email the results
if (not nzb_list) and cfg.email_endjob():
if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and (unpack_error or par_error or script_error)):
emailer.endjob(nzo.final_name, nzo.cat, all_ok, workdir_complete, nzo.bytes_downloaded,
nzo.fail_msg, nzo.unpack_info, script, script_log, script_ret)
if script_output:
# Can do this only now, otherwise it would show up in the email
if script_ret:
script_ret = 'Exit(%s) ' % script_ret
else:
script_ret = ''
if len(script_log.rstrip().split('\n')) > 1:
nzo.set_unpack_info('Script',
'%s%s <a href="./scriptlog?name=%s">(%s)</a>' % (script_ret, script_line,
xml.sax.saxutils.escape(script_output), T('More')), unique=True)
else:
# No '(more)' button needed
nzo.set_unpack_info('Script', '%s%s ' % (script_ret, script_line), unique=True)
# Cleanup again, including NZB files
if all_ok:
cleanup_list(workdir_complete, False)
# Force error for empty result
all_ok = all_ok and not empty
# Update indexer with results
if cfg.rating_enable():
if nzo.encrypted > 0:
Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_ENCRYPTED)
if empty:
hosts = [s.host for s in sabnzbd.downloader.Downloader.do.nzo_servers(nzo)]
if not hosts:
hosts = [None]
for host in hosts:
Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_EXPIRED, host)
except:
logging.error(T('Post Processing Failed for %s (%s)'), filename, T('see logfile'))
logging.info("Traceback: ", exc_info=True)
nzo.fail_msg = T('PostProcessing was aborted (%s)') % T('see logfile')
notifier.send_notification(T('Download Failed'), filename, 'failed', nzo.cat)
nzo.status = Status.FAILED
par_error = True
all_ok = False
if cfg.email_endjob():
emailer.endjob(nzo.final_name, nzo.cat, all_ok, clip_path(workdir_complete), nzo.bytes_downloaded,
nzo.fail_msg, nzo.unpack_info, '', '', 0)
if all_ok:
# If the folder only contains one file OR folder, have that as the path
# Be aware that series/generic/date sorting may move a single file into a folder containing other files
workdir_complete = one_file_or_folder(workdir_complete)
workdir_complete = os.path.normpath(workdir_complete)
# Clean up the NZO data
try:
nzo.purge_data(delete_all_data=all_ok)
except:
logging.error(T('Cleanup of %s failed.'), nzo.final_name)
logging.info("Traceback: ", exc_info=True)
# Use automatic retry link on par2 errors and encrypted/bad RARs
if par_error or unpack_error in (2, 3):
try_alt_nzb(nzo)
# Show final status in history
if all_ok:
notifier.send_notification(T('Download Completed'), filename, 'complete', nzo.cat)
nzo.status = Status.COMPLETED
else:
notifier.send_notification(T('Download Failed'), filename, 'failed', nzo.cat)
nzo.status = Status.FAILED
# Log the overall time taken for postprocessing
postproc_time = int(time.time() - start)
# Create the history DB instance
history_db = database.HistoryDB()
# Add the nzo to the database. Only the path, script and time taken is passed
# Other information is obtained from the nzo
history_db.add_history_db(nzo, clip_path(workdir_complete), nzo.downpath, postproc_time, script_log, script_line)
# Purge items
history_db.auto_history_purge()
# The connection is only used once, so close it here
history_db.close()
sabnzbd.history_updated()
return True
def prepare_extraction_path(nzo):
""" Based on the information that we have, generate
the extraction path and create the directory.
Separated so it can be called from DirectUnpacker
"""
one_folder = False
marker_file = None
# Determine class directory
catdir = config.get_categories(nzo.cat).dir()
if catdir.endswith('*'):
catdir = catdir.strip('*')
one_folder = True
complete_dir = real_path(cfg.complete_dir.get_path(), catdir)
complete_dir = long_path(complete_dir)
# TV/Movie/Date Renaming code part 1 - detect and construct paths
if cfg.enable_meta():
file_sorter = Sorter(nzo, nzo.cat)
else:
file_sorter = Sorter(None, nzo.cat)
complete_dir = file_sorter.detect(nzo.final_name, complete_dir)
if file_sorter.sort_file:
one_folder = False
complete_dir = sanitize_and_trim_path(complete_dir)
if one_folder:
workdir_complete = create_all_dirs(complete_dir, umask=True)
else:
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
marker_file = set_marker(workdir_complete)
if not workdir_complete or not os.path.exists(workdir_complete):
logging.error(T('Cannot create final folder %s') % os.path.join(complete_dir, nzo.final_name))
raise IOError
if cfg.folder_rename() and not one_folder:
prefixed_path = prefix(workdir_complete, '_UNPACK_')
tmp_workdir_complete = get_unique_path(prefix(workdir_complete, '_UNPACK_'), create_dir=False)
try:
renamer(workdir_complete, tmp_workdir_complete)
except:
pass # On failure, just use the original name
# Is the unique path different? Then we also need to modify the final path
if prefixed_path != tmp_workdir_complete:
workdir_complete = workdir_complete + os.path.splitext(tmp_workdir_complete)[1]
else:
tmp_workdir_complete = workdir_complete
return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file
def parring(nzo, workdir):
""" Perform par processing. Returns: (par_error, re_add) """
filename = nzo.final_name
notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat)
logging.info('Starting verification and repair of %s', filename)
# Get verification status of sets
verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}
repair_sets = list(nzo.extrapars.keys())
re_add = False
par_error = False
single = len(repair_sets) == 1
if repair_sets:
for setname in repair_sets:
if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
continue
if not verified.get(setname, False):
logging.info("Running verification and repair on set %s", setname)
parfile_nzf = nzo.partable[setname]
# Check if file maybe wasn't deleted and if we maybe have more files in the parset
if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or nzo.extrapars[setname]:
need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)
# Was it aborted?
if not nzo.pp_active:
re_add = False
par_error = True
break
re_add = re_add or need_re_add
verified[setname] = res
else:
continue
par_error = par_error or not res
else:
# We must not have found any par2..
logging.info("No par2 sets for %s", filename)
nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % filename)
if cfg.sfv_check() and not verified.get('', False):
par_error = not try_sfv_check(nzo, workdir)
verified[''] = not par_error
# If still no success, do RAR-check or RAR-rename
if not par_error and cfg.enable_unrar():
_, _, rars, _, _ = build_filelists(workdir)
# If there's no RAR's, they might be super-obfuscated
if not rars:
# Returns number of renamed RAR's
if rar_renamer(nzo, workdir):
# Re-parse the files so we can do RAR-check
_, _, rars, _, _ = build_filelists(workdir)
if rars:
par_error = not try_rar_check(nzo, rars)
verified[''] = not par_error
if re_add:
logging.info('Re-added %s to queue', filename)
if nzo.priority != TOP_PRIORITY:
nzo.priority = REPAIR_PRIORITY
nzo.status = Status.FETCHING
sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
sabnzbd.downloader.Downloader.do.resume_from_postproc()
sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)
logging.info('Verification and repair finished for %s', filename)
return par_error, re_add
def try_sfv_check(nzo, workdir):
""" Attempt to verify set using SFV file
Return True if verified, False when failed
"""
# Get list of SFV names; shortest name first, minimizes the chance on a mismatch
sfvs = globber_full(workdir, '*.sfv')
sfvs.sort(key=lambda x: len(x))
par_error = False
found = False
for sfv in sfvs:
found = True
setname = setname_from_path(sfv)
nzo.status = Status.VERIFYING
nzo.set_unpack_info('Repair', T('Trying SFV verification'), setname)
nzo.set_action_line(T('Trying SFV verification'), '...')
failed = sfv_check(sfv)
if failed:
fail_msg = T('Some files failed to verify against "%s"') % setname
msg = fail_msg + '; '
msg += '; '.join(failed)
nzo.set_unpack_info('Repair', msg, setname)
par_error = True
else:
nzo.set_unpack_info('Repair', T('Verified successfully using SFV files'), setname)
# Show error in GUI
if found and par_error:
nzo.status = Status.FAILED
nzo.fail_msg = fail_msg
return False
# Success or just no SFV's
return True
def try_rar_check(nzo, rars):
""" Attempt to verify set using the RARs
Return True if verified, False when failed
When setname is '', all RAR files will be used, otherwise only the matching one
If no RAR's are found, returns True
"""
# Sort for better processing
rars.sort(key=functools.cmp_to_key(rar_sort))
# Test
if rars:
setname = setname_from_path(rars[0])
nzo.status = Status.VERIFYING
nzo.set_unpack_info('Repair', T('Trying RAR-based verification'), setname)
nzo.set_action_line(T('Trying RAR-based verification'), '...')
try:
# Set path to unrar and open the file
# Requires de-unicode for RarFile to work!
rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND
zf = rarfile.RarFile(rars[0])
# Skip if it's encrypted
if zf.needs_password():
msg = T('[%s] RAR-based verification failed: %s') % (setname, T('Passworded'))
nzo.set_unpack_info('Repair', msg)
return True
# Will throw exception if something is wrong
zf.testrar()
# Success!
msg = T('RAR files verified successfully')
nzo.set_unpack_info('Repair', msg, setname)
logging.info(msg)
return True
except rarfile.Error as e:
nzo.fail_msg = T('RAR files failed to verify')
msg = T('[%s] RAR-based verification failed: %s') % (setname, e)
nzo.set_unpack_info('Repair', msg, setname)
logging.info(msg)
return False
else:
# No rar-files, so just continue
return True
def rar_renamer(nzo, workdir):
""" Try to use the the header information to give RAR-files decent names """
nzo.status = Status.VERIFYING
nzo.set_unpack_info('Repair', T('Trying RAR-based verification'))
nzo.set_action_line(T('Trying RAR-based verification'), '...')
renamed_files = 0
workdir_files = recursive_listdir(workdir)
for file_to_check in workdir_files:
# The function will check if it's a RAR-file
# We do a sanity-check for the returned number
rar_vol, new_extension = rarvolinfo.get_rar_extension(file_to_check)
if 0 < rar_vol < 1000:
logging.debug("Detected volume-number %s from RAR-header: %s ", rar_vol, file_to_check)
new_rar_name = "%s.%s" % (nzo.final_name, new_extension)
new_rar_name = os.path.join(workdir, new_rar_name)
# Right now we don't support multiple sets inside the same NZB
# So we have to make sure the name is unique
new_rar_name = get_unique_filename(new_rar_name)
renamer(file_to_check, new_rar_name)
renamed_files += 1
else:
logging.debug("No RAR-volume-number found in %s", file_to_check)
return renamed_files
def handle_empty_queue():
""" Check if empty queue calls for action """
if sabnzbd.nzbqueue.NzbQueue.do.actives() == 0:
sabnzbd.save_state()
# Perform end-of-queue action when one is set
if sabnzbd.QUEUECOMPLETEACTION:
logging.info("Queue has finished, launching: %s (%s)",
sabnzbd.QUEUECOMPLETEACTION, sabnzbd.QUEUECOMPLETEARG)
if sabnzbd.QUEUECOMPLETEARG:
sabnzbd.QUEUECOMPLETEACTION(sabnzbd.QUEUECOMPLETEARG)
else:
Thread(target=sabnzbd.QUEUECOMPLETEACTION).start()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
def cleanup_list(wdir, skip_nzb):
""" Remove all files whose extension matches the cleanup list,
optionally ignoring the nzb extension
"""
if cfg.cleanup_list():
try:
files = os.listdir(wdir)
except:
files = ()
for filename in files:
path = os.path.join(wdir, filename)
if os.path.isdir(path):
cleanup_list(path, skip_nzb)
else:
if on_cleanup_list(filename, skip_nzb):
try:
logging.info("Removing unwanted file %s", path)
remove_file(path)
except:
logging.error(T('Removing %s failed'), clip_path(path))
logging.info("Traceback: ", exc_info=True)
if files:
try:
remove_dir(wdir)
except:
pass
def prefix(path, pre):
""" Apply prefix to last part of path
'/my/path' and 'hi_' will give '/my/hi_path'
"""
p, d = os.path.split(path)
return os.path.join(p, pre + d)
def nzb_redirect(wdir, nzbname, pp, script, cat, priority):
""" Check if this job contains only NZB files,
if so send to queue and remove if on clean-up list
Returns list of processed NZB's
"""
files = recursive_listdir(wdir)
for file_ in files:
if os.path.splitext(file_)[1].lower() != '.nzb':
return None
# For multiple NZBs, cannot use the current job name
if len(files) != 1:
nzbname = None
# Process all NZB files
for nzb_file in files:
dirscanner.process_single_nzb(os.path.split(nzb_file)[1], file_, pp, script, cat,
priority=priority, keep=False, dup_check=False, nzbname=nzbname)
return files
def one_file_or_folder(folder):
""" If the dir only contains one file or folder, join that file/folder onto the path """
if os.path.exists(folder) and os.path.isdir(folder):
try:
cont = os.listdir(folder)
if len(cont) == 1:
folder = os.path.join(folder, cont[0])
folder = one_file_or_folder(folder)
except WindowsError:
# Can occur on paths it doesn't like, for example "C:"
pass
return folder
TAG_RE = re.compile(r'<[^>]+>')
def get_last_line(txt):
""" Return last non-empty line of a text, trim to 150 max """
# First we remove HTML code in a basic way
txt = TAG_RE.sub(' ', txt)
# Then we get the last line
lines = txt.split('\n')
n = len(lines) - 1
while n >= 0 and not lines[n].strip('\r\t '):
n = n - 1
line = lines[n].strip('\r\t ')
if len(line) >= 150:
line = line[:147] + '...'
return line
def remove_samples(path):
""" Remove all files that match the sample pattern
Skip deleting if it matches all files or there is only 1 file
"""
files_to_delete = []
nr_files = 0
for root, _dirs, files in os.walk(path):
for file_to_match in files:
nr_files += 1
if RE_SAMPLE.search(file_to_match):
files_to_delete.append(os.path.join(root, file_to_match))
# Make sure we skip false-positives
if len(files_to_delete) < nr_files:
for path in files_to_delete:
try:
logging.info("Removing unwanted sample file %s", path)
remove_file(path)
except:
logging.error(T('Removing %s failed'), clip_path(path))
logging.info("Traceback: ", exc_info=True)
else:
logging.info("Skipping sample-removal, false-positive")
def rename_and_collapse_folder(oldpath, newpath, files):
""" Rename folder, collapsing when there's just a single subfolder
oldpath --> newpath OR oldpath/subfolder --> newpath
Modify list of filenames accordingly
"""
orgpath = oldpath
items = globber(oldpath)
if len(items) == 1:
folder = items[0]
folder_path = os.path.join(oldpath, folder)
if os.path.isdir(folder_path) and folder not in ('VIDEO_TS', 'AUDIO_TS'):
logging.info('Collapsing %s', os.path.join(newpath, folder))
oldpath = folder_path
oldpath = os.path.normpath(oldpath)
newpath = os.path.normpath(newpath)
files = [os.path.normpath(f).replace(oldpath, newpath) for f in files]
renamer(oldpath, newpath)
try:
remove_dir(orgpath)
except:
pass
return files
def set_marker(folder):
""" Set marker file and return name """
name = cfg.marker_file()
if name:
path = os.path.join(folder, name)
logging.debug('Create marker file %s', path)
try:
fp = open(path, 'w')
fp.close()
except:
logging.info('Cannot create marker file %s', path)
logging.info("Traceback: ", exc_info=True)
name = None
return name
def del_marker(path):
""" Remove marker file """
if path and os.path.exists(path):
logging.debug('Removing marker file %s', path)
try:
remove_file(path)
except:
logging.info('Cannot remove marker file %s', path)
logging.info("Traceback: ", exc_info=True)
def remove_from_list(name, lst):
if name:
for n in range(len(lst)):
if lst[n].endswith(name):
logging.debug('Popping %s', lst[n])
lst.pop(n)
return
def try_alt_nzb(nzo):
""" Try to get a new NZB if available """
url = nzo.nzo_info.get('failure')
if url and cfg.new_nzb_on_failure():
sabnzbd.add_url(url, nzo.pp, nzo.script, nzo.cat, nzo.priority)
|
trans_client.py
|
# coding:utf-8
"""
UDP客户端,尝试连接服务端。
当建立连接后发送指定数据。
"""
import asyncio
import hashlib
import json
import os
import random
import threading
import time
class FilePart:
"""文件信息类"""
def __init__(self, name, size, part, total, data):
self.name = name
self.size = size
self.part = part
self.total = total
self.data = data
class ClientProtocol(asyncio.DatagramProtocol):
"""客户端主控类。"""
def __init__(self, fstream, que, tc, loop):
self.fstream = fstream # 文件流
self.que = que # 客户端消息队列
self.tc = tc # 多线程控制
self.loop = loop
self.transport = None
self.on_con_lost = loop.create_future()
self.time_counter = self.loop.call_later(10, self.on_con_lost.set_result, True)
if fstream and tc: # 判定是否发送中断消息
self.path = fstream.name
size = os.path.getsize(self.path)
total = size // 65000 + 1
self.gener = (FilePart(os.path.split(self.path)[1], size, i, total, fstream.read(65000))
for i in range(total))
self.now = next(self.gener)
self.md5 = None
self.thread_md5 = None # MD5计算线程
def connection_made(self, transport):
"""连接建立时的行为。"""
self.transport = transport
if self.fstream and self.tc: # 发送文件时开始计算MD5值
msg = json.dumps({'type': 'message', 'data': 'established', 'name': os.path.split(self.path)[1]}).encode()
self.thread_md5 = threading.Thread(target=self.md5_gener)
self.thread_md5.start()
else: # 发送中断包
msg = json.dumps({'type': 'message', 'data': 'abort'}).encode()
time.sleep(0.5) # 防止服务端还没新建计时器实例
self.message_sender(msg)
def datagram_received(self, data, addr):
"""接收数据报时的行为。"""
message = json.loads(data)
if message['type'] == 'message':
if message['data'] == 'complete':
if message['name'] == self.now.name:
self.time_counter.cancel()
elif message['data'] == 'MD5_passed': # 向主进程传递MD5信息并释放锁
self.que.put({'type': 'info', 'name': message['name'], 'message': 'MD5_passed'})
msg = json.dumps({'type': 'message', 'data': 'terminated'}).encode()
self.transport.sendto(msg)
self.tc.release()
self.fstream.close()
elif message['data'] == 'MD5_failed':
self.que.put({'type': 'info', 'name': message['name'], 'message': 'MD5_failed'})
msg = json.dumps({'type': 'message', 'data': 'terminated'}).encode()
self.transport.sendto(msg)
self.tc.release()
self.fstream.close()
elif message['data'] == 'get':
if message['part'] == self.now.part and message['name'] == self.now.name:
# 接收到成功回包则更新进度条消息并发送下一个包
self.time_counter.cancel()
self.que.put({'type': 'prog', 'name': message['name'], 'part': message['part']})
try:
self.file_sender()
self.now = next(self.gener)
except StopIteration:
self.fstream.close()
elif message['data'] == 'aborted':
self.time_counter.cancel()
self.que.put({'type': 'info', 'message': 'aborted', 'name': 'None'})
self.transport.close()
def connection_lost(self, exc):
"""连接断开时的行为。"""
if self.fstream:
self.fstream.close()
self.on_con_lost.set_result(True)
def file_sender(self):
"""数据报的发送行为。"""
if self.md5:
raw_msg = {'type': 'data', 'name': self.now.name, 'size': self.now.size, 'part': self.now.part,
'all': self.now.total, 'md5': self.md5}
else:
# MD5未计算完成则判定当前是否为末块:
# 是则等待MD5计算,不是则继续发送
if self.now.part + 1 == self.now.total:
self.thread_md5.join()
raw_msg = {'type': 'data', 'name': self.now.name, 'size': self.now.size, 'part': self.now.part,
'all': self.now.total, 'md5': self.md5}
else:
raw_msg = {'type': 'data', 'name': self.now.name, 'size': self.now.size, 'part': self.now.part,
'all': self.now.total}
fdata = json.dumps(raw_msg).encode() + b'---+++data+++---' + self.now.data
self.message_sender(fdata)
def message_sender(self, message):
"""
自带随机秒重发机制的消息回发(0.1-0.3s)。
传入参数必须用json打包
"""
self.time_counter.cancel()
self.transport.sendto(message)
self.time_counter = self.loop.call_later(random.uniform(0.1, 0.3), self.message_sender, message)
def md5_gener(self):
md5 = hashlib.md5()
with open(self.path, 'rb') as f:
for line in f:
md5.update(line)
self.md5 = md5.hexdigest()
async def main(host, port, path, threading_controller, que):
"""传输控制主函数,传输端点在此关闭。"""
loop = asyncio.get_running_loop()
if path and threading_controller: # 正常传输
threading_controller.acquire()
fstream = open(path, 'rb')
transport, protocol = await loop.create_datagram_endpoint(
lambda: ClientProtocol(fstream, que, threading_controller, loop),
remote_addr=(host, port))
else: # 中断传输
transport, protocol = await loop.create_datagram_endpoint(
lambda: ClientProtocol(None, que, None, loop),
remote_addr=(host, port))
try:
await protocol.on_con_lost
finally:
transport.close()
def starter(host, port, file, file_at_same_time, que):
"""传输线程启动函数。"""
if file and file_at_same_time:
threading_controller = threading.BoundedSemaphore(value=file_at_same_time)
for path in file:
thread_asyncio = threading.Thread(target=asyncio.run,
args=(main(host, port, path, threading_controller, que),))
thread_asyncio.start()
else:
thread_asyncio = threading.Thread(target=asyncio.run,
args=(main(host, port, [], None, que),))
thread_asyncio.start()
thread_asyncio.join()
|
utils_test.py
|
from __future__ import annotations
import asyncio
import copy
import functools
import gc
import inspect
import io
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import uuid
import warnings
import weakref
from collections import defaultdict
from collections.abc import Callable
from contextlib import contextmanager, nullcontext, suppress
from glob import glob
from itertools import count
from time import sleep
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from typing_extensions import Literal
from distributed.compatibility import MACOS
from distributed.scheduler import Scheduler
try:
import ssl
except ImportError:
ssl = None # type: ignore
import pytest
import yaml
from tlz import assoc, memoize, merge
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from distributed.comm.tcp import TCP
from . import system
from . import versions as version_module
from .client import Client, _global_clients, default_client
from .comm import Comm
from .compatibility import WINDOWS
from .config import initialize_logging
from .core import CommClosedError, ConnectionPool, Status, connect, rpc
from .deploy import SpecCluster
from .diagnostics.plugin import WorkerPlugin
from .metrics import time
from .nanny import Nanny
from .node import ServerNode
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
DequeHandler,
TimeoutError,
_offload_executor,
get_ip,
get_ipv6,
iscoroutinefunction,
log_errors,
mp_context,
reset_logger_locks,
sync,
)
from .worker import RUNNING, Worker
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_TEST_TIMEOUT = 30
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
class _UnhashableCallable:
# FIXME https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
def __call__(self, x):
return x + 1
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict: defaultdict[str, int] = defaultdict(int)
_varying_key_gen = count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues: dict[Any, asyncio.Queue] = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, config, port=0, **kwargs):
with dask.config.set(config):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
# Compatibility. A lot of tests simply use `c` as fixture name
c = client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=10,
disconnect_timeout=20,
scheduler_kwargs={},
config={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1, config),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q, config),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
pytest.xfail("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=30)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with rpc(addr, **rpc_kwargs) as w:
# If the worker was killed hard (e.g. sigterm) during test runtime,
# we do not know at this point and may not be able to connect
with suppress(EnvironmentError, CommClosedError):
# Do not request a reply since comms will be closed by the
# worker before a reply can be made and we will always trigger
# the timeout
await w.terminate(reply=False)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*(disconnect(addr, timeout, rpc_kwargs) for addr in addresses))
def gen_test(timeout: float = _TEST_TIMEOUT) -> Callable[[Callable], Callable]:
"""Coroutine test
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_test(timeout=5)
async def test_foo(param)
await ... # use tornado coroutines
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
def _(func):
def test_func(*args, **kwargs):
with clean() as loop:
injected_func = functools.partial(func, *args, **kwargs)
if iscoroutinefunction(func):
cor = injected_func
else:
cor = gen.coroutine(injected_func)
loop.run_sync(cor, timeout=timeout)
# Patch the signature so pytest can inject fixtures
test_func.__signature__ = inspect.signature(func)
return test_func
return _
async def start_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]],
scheduler_addr: str,
loop: IOLoop,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
) -> tuple[Scheduler, list[ServerNode]]:
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(
merge(worker_kwargs, ncore[2]) # type: ignore
if len(ncore) > 2
else worker_kwargs
),
)
for i, ncore in enumerate(nthreads)
]
await asyncio.gather(*workers)
start = time()
while (
len(s.workers) < len(nthreads)
or any(ws.status != Status.running for ws in s.workers.values())
or any(comm.comm is None for comm in s.stream_comms.values())
):
await asyncio.sleep(0.01)
if time() > start + 30:
await asyncio.gather(*(w.close(timeout=1) for w in workers))
await s.close(fast=True)
raise TimeoutError("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*(end_worker(w) for w in workers))
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]] = [
("127.0.0.1", 1),
("127.0.0.1", 2),
],
ncores: None = None, # deprecated
scheduler="127.0.0.1",
timeout: float = _TEST_TIMEOUT,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
client: bool = False,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
client_kwargs: dict[str, Any] = {},
active_rpc_timeout: float = 1,
config: dict[str, Any] = {},
clean_kwargs: dict[str, Any] = {},
allow_unclosed: bool = False,
cluster_dump_directory: str | Literal[False] = "test_cluster_dump",
) -> Callable[[Callable], Callable]:
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, param):
await ... # use tornado coroutines
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, pytest_fixture_a, pytest_fixture_b):
await ... # use tornado coroutines
See also:
start
end
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
scheduler_kwargs = merge(
{"dashboard": False, "dashboard_address": ":0"}, scheduler_kwargs
)
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 15}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
raise RuntimeError("gen_cluster only works for coroutine functions.")
@functools.wraps(func)
def test_func(*outer_args, **kwargs):
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for _ in range(60):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster: "
f"{e.__class__.__name__}: {e}; retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = asyncio.wait_for(asyncio.shield(task), timeout)
result = await coro2
if s.validate:
s.validate_state()
except asyncio.TimeoutError:
assert task
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
if cluster_dump_directory:
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio and
# not from the code being tested.
raise TimeoutError(
f"Test timeout after {timeout}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
except pytest.xfail.Exception:
raise
except Exception:
if cluster_dump_directory and not has_pytestmark(
test_func, "xfail"
):
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
raise
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 60:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except OSError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
# Patch the signature so pytest can inject fixtures
orig_sig = inspect.signature(func)
args = [None] * (1 + len(nthreads)) # scheduler, *workers
if client:
args.insert(0, None)
bound = orig_sig.bind_partial(*args)
test_func.__signature__ = orig_sig.replace(
parameters=[
p
for name, p in orig_sig.parameters.items()
if name not in bound.arguments
]
)
return test_func
return _
async def dump_cluster_state(
s: Scheduler, ws: list[ServerNode], output_dir: str, func_name: str
) -> None:
"""A variant of Client.dump_cluster_state, which does not rely on any of the below
to work:
- Having a client at all
- Client->Scheduler comms
- Scheduler->Worker comms (unless using Nannies)
"""
scheduler_info = s._to_dict()
workers_info: dict[str, Any]
versions_info = version_module.get_versions()
if not ws or isinstance(ws[0], Worker):
workers_info = {w.address: w._to_dict() for w in ws}
else:
workers_info = await s.broadcast(msg={"op": "dump_state"}, on_error="return")
workers_info = {
k: repr(v) if isinstance(v, Exception) else v
for k, v in workers_info.items()
}
state = {
"scheduler": scheduler_info,
"workers": workers_info,
"versions": versions_info,
}
os.makedirs(output_dir, exist_ok=True)
fname = os.path.join(output_dir, func_name) + ".yaml"
with open(fname, "w") as fh:
yaml.safe_dump(state, fh) # Automatically convert tuples to lists
print(f"Dumped cluster state to {fname}")
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(30)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError(f"Failed to connect to {address}")
try:
sock = socket.create_connection(address, timeout=timeout)
except OSError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
if os.getenv("DISABLE_IPV6") == "1":
return False
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
return True
except OSError:
return False
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip(f"rlimit too low ({soft}) and can't be increased: {e}")
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
"""Context manager to ensure we haven't leaked any threads"""
active_threads_start = threading.enumerate()
yield
start = time()
while True:
bad_threads = [
thread
for thread in threading.enumerate()
if thread not in active_threads_start
and "Threaded" not in thread.name
and "watch message" not in thread.name
and "TCP-Executor" not in thread.name
# TODO: Make sure profile thread is cleaned up
# and remove the line below
and "Profile" not in thread.name
# asyncio default executor thread pool is not shut down until loop
# is shut down
and "asyncio_" not in thread.name
]
if not bad_threads:
break
else:
sleep(0.01)
if time() > start + 5:
# Raise an error with information about leaked threads
from distributed import profile
bad_thread = bad_threads[0]
call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
assert False, (bad_thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(200):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status in RUNNING:
w.loop.add_callback(w.close)
Worker._instances.clear()
start = time()
while any(c.status != "closed" for c in Worker._initialized_clients):
sleep(0.1)
assert time() < start + 10
Worker._initialized_clients.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
with check_thread_leak() if threads else nullcontext():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else nullcontext():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
class LockedComm(TCP):
def __init__(self, comm, read_event, read_queue, write_event, write_queue):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.comm = comm
assert isinstance(comm, TCP)
def __getattr__(self, name):
return getattr(self.comm, name)
async def write(self, msg, serializers=None, on_error="message"):
if self.write_queue:
await self.write_queue.put((self.comm.peer_address, msg))
if self.write_event:
await self.write_event.wait()
return await self.comm.write(msg, serializers=serializers, on_error=on_error)
async def read(self, deserializers=None):
msg = await self.comm.read(deserializers=deserializers)
if self.read_queue:
await self.read_queue.put((self.comm.peer_address, msg))
if self.read_event:
await self.read_event.wait()
return msg
class _LockedCommPool(ConnectionPool):
"""A ConnectionPool wrapper to intercept network traffic between servers
This wrapper can be attached to a running server to intercept outgoing read or write requests in test environments.
Examples
--------
>>> w = await Worker(...)
>>> read_event = asyncio.Event()
>>> read_queue = asyncio.Queue()
>>> w.rpc = _LockedCommPool(
w.rpc,
read_event=read_event,
read_queue=read_queue,
)
# It might be necessary to remove all existing comms
# if the wrapped pool has been used before
>>> w.remove(remote_address)
>>> async def ping_pong():
return await w.rpc(remote_address).ping()
>>> with pytest.raises(asyncio.TimeoutError):
>>> await asyncio.wait_for(ping_pong(), 0.01)
>>> read_event.set()
>>> await ping_pong()
"""
def __init__(
self, pool, read_event=None, read_queue=None, write_event=None, write_queue=None
):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.pool = pool
def __getattr__(self, name):
return getattr(self.pool, name)
async def connect(self, *args, **kwargs):
comm = await self.pool.connect(*args, **kwargs)
return LockedComm(
comm, self.read_event, self.read_queue, self.write_event, self.write_queue
)
def xfail_ssl_issue5601():
"""Work around https://github.com/dask/distributed/issues/5601 where any test that
inits Security.temporary() crashes on MacOS GitHub Actions CI
"""
pytest.importorskip("cryptography")
try:
Security.temporary()
except ImportError:
if MACOS:
pytest.xfail(reason="distributed#5601")
raise
def assert_worker_story(
story: list[tuple], expect: list[tuple], *, strict: bool = False
) -> None:
"""Test the output of ``Worker.story``
Parameters
==========
story: list[tuple]
Output of Worker.story
expect: list[tuple]
Expected events. Each expected event must contain exactly 2 less fields than the
story (the last two fields are always the stimulus_id and the timestamp).
Elements of the expect tuples can be
- callables, which accept a single element of the event tuple as argument and
return True for match and False for no match;
- arbitrary objects, which are compared with a == b
e.g.
.. code-block:: python
expect=[
("x", "missing", "fetch", "fetch", {}),
("gather-dependencies", worker_addr, lambda set_: "x" in set_),
]
strict: bool, optional
If True, the story must contain exactly as many events as expect.
If False (the default), the story may contain more events than expect; extra
events are ignored.
"""
now = time()
prev_ts = 0.0
for ev in story:
try:
assert len(ev) > 2
assert isinstance(ev, tuple)
assert isinstance(ev[-2], str) and ev[-2] # stimulus_id
assert isinstance(ev[-1], float) # timestamp
assert prev_ts <= ev[-1] # Timestamps are monotonic ascending
# Timestamps are within the last hour. It's been observed that a timestamp
# generated in a Nanny process can be a few milliseconds in the future.
assert now - 3600 < ev[-1] <= now + 1
prev_ts = ev[-1]
except AssertionError:
raise AssertionError(
f"Malformed story event: {ev}\nin story:\n{_format_story(story)}"
)
try:
if strict and len(story) != len(expect):
raise StopIteration()
story_it = iter(story)
for ev_expect in expect:
while True:
event = next(story_it)
# Ignore (stimulus_id, timestamp)
event = event[:-2]
if len(event) == len(ev_expect) and all(
ex(ev) if callable(ex) else ev == ex
for ev, ex in zip(event, ev_expect)
):
break
except StopIteration:
raise AssertionError(
f"assert_worker_story(strict={strict}) failed\n"
f"story:\n{_format_story(story)}\n"
f"expect:\n{_format_story(expect)}"
) from None
def _format_story(story: list[tuple]) -> str:
if not story:
return "(empty story)"
return "- " + "\n- ".join(str(ev) for ev in story)
class BrokenComm(Comm):
peer_address = ""
local_address = ""
def close(self):
pass
def closed(self):
return True
def abort(self):
pass
def read(self, deserializers=None):
raise OSError()
def write(self, msg, serializers=None, on_error=None):
raise OSError()
def has_pytestmark(test_func: Callable, name: str) -> bool:
"""Return True if the test function is marked by the given @pytest.mark.<name>;
False otherwise.
FIXME doesn't work with individually marked parameters inside
@pytest.mark.parametrize
"""
marks = getattr(test_func, "pytestmark", [])
return any(mark.name == name for mark in marks)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
offic.py
|
import cv2 as cv
import time
import numpy as np
import multiprocessing as mp
from multiprocessing import Process
def st(z):
r = z[0]**2-(z[1]**2)
i = z[0]*z[1]*2
return r, i
def f(z, c):
s = st(z)
r = s[0]+c[0]
i = s[1]+c[1]
return r, i
def mon(r, i, n=100):
z = (0, 0)
c = (r, i)
try:
for i in range(n):
z = f(z, c)
except OverflowError:
p = False
else:
p = True
return p
def drmon(qlt, devi1, dell, part, qq=0):
part -= 1
# qlt*=2
sqq = sqrt(qlt)
de = 100*qlt
h1 = de*-1.25
v1 = de*-2.1 # qlt*185
hr, vr = (250/devi1)*qlt+1, (265/dell)*qlt
v1 += vr*part
#print(-h1+(h1+hr), -v1+(v1+vr), qlt, devi1, dell, part)
h1, v1, de, hr, vr = int(h1), int(v1), int(de), int(hr), int(vr)
#print(-h1+(h1+hr), -v1+(v1+vr), qlt, devi1, dell, part)
ww = [[[None, i/de, j/de] for j in range(h1, h1+hr)] for i in range(v1, v1+vr)]
for i in ww:
for j in i:
j[0] = mon(j[1], j[2])
if qq != 0:
qq.put(ww)
else:
return ww
def funccol(qlt, mode, dell):
ran = range(1, dell+1)
qq, pr, w, wg = {}, {}, {}, []
for i in ran:
qq[i] = mp.Queue()
pr[i] = Process(target=drmon, args=([qlt, mode, dell, i, qq[i]]))
pr[i].start()
for i in ran:
w[i] = qq[i].get()
wg += w[i]
pr[i].join()
return wg
def myar_to_img(w, fcolor, scolor, mode):
hei = len(w)*1
wid = len(w[0])*1
ar1 = np.zeros((hei, wid, 3), np.uint8)
ar1[:] = fcolor
ar = np.zeros((hei*2, wid*2, 3), np.uint8)
ii = -1
for i in w:
ii += 1
jj = -1
for j in i:
jj += 1
if j[0] is True:
ar1[ii][jj] = scolor
ar2 = np.copy(ar1)
for i in range(len(ar2)):
ar2[i][::] = ar2[i][::-1]
ar1 = ar1[0:len(ar1), 0:len(ar1[0])-1]
ar = np.concatenate((ar1, ar2), axis=1)
if mode == 1:
return ar2
elif mode == 2:
return ar
else:
return "error"
if __name__ == '__main__':
#for i in range(1,11):
# drmon(2**i,2,1,1)
factor = 0
while 1:
factor += 1 # quality factor
start_0 = time.time()
qual = 2**factor # quality
processes_num = 8 # number of processes used in multiprocessing
mode = 2 # when “1” calculates the whole image,
# when “2” calculates the mirror half; only affects performance
# h1,v1 = 50,50
myar = funccol(qual, mode, processes_num) # multiprocessing
img = myar_to_img(myar, (255, 255, 255), (0, 0, 0), mode)
end_0 = time.time() - start_0
print(end_0, "sec")
# cv.namedWindow ( "b" , cv.WINDOW_NORMAL)
cv.imshow(f"mon_img_{qual}", img)
cv.imwrite(f"mon_img_{qual}.png", img)
cv.waitKey(0)
cv.destroyAllWindows()
|
formats.py
|
from __future__ import print_function, division, absolute_import
import os
import stat
import struct
import sys
import tarfile
import threading
import time
import zipfile
import zlib
from contextlib import closing
from io import BytesIO
from multiprocessing.pool import ThreadPool
from .compat import Queue, on_win
from .core import CondaPackException
def _parse_n_threads(n_threads=1):
if n_threads == -1:
from multiprocessing import cpu_count
return cpu_count()
if n_threads < 1:
raise CondaPackException("n-threads must be >= 1, or -1 for all cores")
return n_threads
def archive(fileobj, arcroot, format, compress_level=4, zip_symlinks=False,
zip_64=True, n_threads=1):
n_threads = _parse_n_threads(n_threads)
if format == 'zip':
return ZipArchive(fileobj, arcroot, zip_symlinks=zip_symlinks,
zip_64=zip_64)
# Tar archives
if format in ('tar.gz', 'tgz'):
if n_threads == 1:
mode = 'w:gz'
close_file = False
else:
mode = 'w'
close_file = True
fileobj = ParallelGzipFileWriter(fileobj, compresslevel=compress_level,
n_threads=n_threads)
elif format in ('tar.bz2', 'tbz2'):
if n_threads == 1:
mode = 'w:bz2'
close_file = False
else:
mode = 'w'
close_file = True
fileobj = ParallelBZ2FileWriter(fileobj, compresslevel=compress_level,
n_threads=n_threads)
else: # format == 'tar'
mode = 'w'
close_file = False
return TarArchive(fileobj, arcroot, close_file=close_file,
mode=mode, compresslevel=compress_level)
class ParallelFileWriter(object):
def __init__(self, fileobj, compresslevel=9, n_threads=1):
self.fileobj = fileobj
self.compresslevel = compresslevel
self.n_threads = n_threads
# Initialize file state
self.size = 0
self._init_state()
self._write_header()
# Parallel initialization
self.buffers = []
self.buffer_length = 0
self.pool = ThreadPool(n_threads)
self.compress_queue = Queue(maxsize=n_threads)
self._consumer_thread = threading.Thread(target=self._consumer)
self._consumer_thread.daemon = True
self._consumer_thread.start()
def tell(self):
return self.size
def write(self, data):
if not isinstance(data, bytes):
data = memoryview(data)
n = len(data)
if n > 0:
self._per_buffer_op(data)
self.size += n
self.buffer_length += n
self.buffers.append(data)
if self.buffer_length > self._block_size:
self.compress_queue.put(self.buffers)
self.buffers = []
self.buffer_length = 0
return n
def _consumer(self):
with closing(self.pool):
for buffers in self.pool.imap(
self._compress, iter(self.compress_queue.get, None)):
for buf in buffers:
if len(buf):
self.fileobj.write(buf)
def _compress(self, in_bufs):
out_bufs = []
compressor = self._new_compressor()
for data in in_bufs:
out_bufs.append(compressor.compress(data))
out_bufs.append(self._flush_compressor(compressor))
return out_bufs
def close(self):
if self.fileobj is None:
return
# Flush any waiting buffers
if self.buffers:
self.compress_queue.put(self.buffers)
# Wait for all work to finish
self.compress_queue.put(None)
self._consumer_thread.join()
# Write the closing bytes
self._write_footer()
# Flush fileobj
self.fileobj.flush()
# Cache shutdown state
self.compress_queue = None
self.pool = None
self.fileobj = None
class ParallelGzipFileWriter(ParallelFileWriter):
# Since it's hard for us to keep a running dictionary (a serial operation)
# with parallel compression of blocks, we use a blocksize > a few factors
# bigger than the max dict size (32 KiB). In practice this is fine - we
# only lose out by a small factor of unneeded redundancy, and real files
# often lack enough redundant byte sequences to make this significant. Pigz
# uses 128 KiB, but does more work to keep a running dict.
_block_size = 256 * 2**10
def _init_state(self):
self.crc = zlib.crc32(b"") & 0xffffffff
def _new_compressor(self):
return zlib.compressobj(self.compresslevel, zlib.DEFLATED,
-zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
def _per_buffer_op(self, buffer):
self.crc = zlib.crc32(buffer, self.crc) & 0xffffffff
def _write32u(self, value):
self.fileobj.write(struct.pack("<L", value))
def _write_header(self):
self.fileobj.write(b'\037\213\010')
self.fileobj.write(b'\x00')
self._write32u(int(time.time()))
self.fileobj.write(b'\002\377')
def _write_footer(self):
self.fileobj.write(self._new_compressor().flush(zlib.Z_FINISH))
self._write32u(self.crc)
self._write32u(self.size & 0xffffffff)
def _flush_compressor(self, compressor):
return compressor.flush(zlib.Z_FULL_FLUSH)
class ParallelBZ2FileWriter(ParallelFileWriter):
def _init_state(self):
# bzip2 compresslevel dictates its blocksize of 100 - 900 kb
self._block_size = self.compresslevel * 100 * 2**10
def _new_compressor(self):
import bz2
return bz2.BZ2Compressor(self.compresslevel)
def _per_buffer_op(self, buffer):
pass
def _write_header(self):
pass
def _write_footer(self):
pass
def _flush_compressor(self, compressor):
return compressor.flush()
class ArchiveBase(object):
def add(self, source, target):
target = os.path.join(self.arcroot, target)
self._add(source, target)
def add_bytes(self, source, sourcebytes, target):
target = os.path.join(self.arcroot, target)
self._add_bytes(source, sourcebytes, target)
class TarArchive(ArchiveBase):
def __init__(self, fileobj, arcroot, close_file=False,
mode='w', compresslevel=4):
self.fileobj = fileobj
self.arcroot = arcroot
self.close_file = close_file
self.mode = mode
self.compresslevel = compresslevel
def __enter__(self):
kwargs = {'compresslevel': self.compresslevel} if self.mode != 'w' else {}
# Hard links seem to throw off the tar file format on windows.
# Revisit when libarchive is used.
self.archive = tarfile.open(fileobj=self.fileobj,
dereference=on_win,
mode=self.mode,
**kwargs)
return self
def __exit__(self, *args):
self.archive.close()
if self.close_file:
self.fileobj.close()
def _add(self, source, target):
self.archive.add(source, target, recursive=False)
def _add_bytes(self, source, sourcebytes, target):
info = self.archive.gettarinfo(source, target)
info.size = len(sourcebytes)
self.archive.addfile(info, BytesIO(sourcebytes))
class ZipArchive(ArchiveBase):
def __init__(self, fileobj, arcroot, zip_symlinks=False, zip_64=True):
self.fileobj = fileobj
self.arcroot = arcroot
self.zip_symlinks = zip_symlinks
self.zip_64 = zip_64
def __enter__(self):
self.archive = zipfile.ZipFile(self.fileobj, "w",
allowZip64=self.zip_64,
compression=zipfile.ZIP_DEFLATED)
return self
def __exit__(self, type, value, traceback):
self.archive.close()
if isinstance(value, zipfile.LargeZipFile):
raise CondaPackException(
"Large Zip File: ZIP64 extensions required "
"but were disabled")
def _add(self, source, target):
try:
st = os.lstat(source)
is_link = stat.S_ISLNK(st.st_mode)
except (OSError, AttributeError):
is_link = False
if is_link:
if self.zip_symlinks:
info = zipfile.ZipInfo(target)
info.create_system = 3
info.external_attr = (st.st_mode & 0xFFFF) << 16
if os.path.isdir(source):
info.external_attr |= 0x10 # MS-DOS directory flag
self.archive.writestr(info, os.readlink(source))
else:
if os.path.isdir(source):
for root, dirs, files in os.walk(source, followlinks=True):
root2 = os.path.join(target, os.path.relpath(root, source))
for fil in files:
self.archive.write(os.path.join(root, fil),
os.path.join(root2, fil))
if not dirs and not files:
# root is an empty directory, write it now
self.archive.write(root, root2)
else:
self.archive.write(source, target)
else:
self.archive.write(source, target)
def _add_bytes(self, source, sourcebytes, target):
info = zipinfo_from_file(source, target)
self.archive.writestr(info, sourcebytes)
if sys.version_info >= (3, 6):
zipinfo_from_file = zipfile.ZipInfo.from_file
else: # pragma: no cover
# Backported from python 3.6
def zipinfo_from_file(filename, arcname=None):
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo
|
tutorial_performancesettings.py
|
"""
mss.tutorials.tutorial_performancesettings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This python script generates an automatic demonstration of how to change the performance of flight track in table
view such as managing fuel capacity, etc.
This file is part of mss.
:copyright: Copyright 2021 Hrithik Kumar Verma
:copyright: Copyright 2021-2022 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyautogui as pag
import multiprocessing
import sys
import os.path
from sys import platform
from pyscreeze import ImageNotFoundException
from tutorials import screenrecorder as sr
from mslib.msui import mss_pyui
def initial_ops():
"""
Executes the initial operations such as closing all opened windows and showing the desktop.
"""
pag.sleep(5)
if platform == "linux" or platform == "linux2":
pag.hotkey('winleft', 'd')
print("\n INFO : Automation is running on Linux system..\n")
elif platform == "darwin":
pag.hotkey('option', 'command', 'm')
print("\n INFO : Automation is running on Mac OS..\n")
elif platform == "win32":
pag.hotkey('win', 'd')
print("\nINFO : Automation is running on Windows OS..\n")
else:
pag.alert(text="Sorry, no support on this platform!", title="Platform Exception", button='OK')
def call_recorder():
"""
Calls the screen recorder class to start the recording of the automation.
"""
rec = sr.ScreenRecorder(80, 80, int(pag.size()[0]) - 400, int(pag.size()[1]) - 150)
rec.capture()
rec.stop_capture()
def call_mss():
"""
Calls the main MSS GUI window since operations are to be performed on it only.
"""
mss_pyui.main()
def automate_performance():
"""
This is the main automating script of the performance settings of table view tutorial which will be recorded and
saved to a file having dateframe nomenclature with a .mp4 extension(codec).
"""
# Giving time for loading of the MSS GUI.
pag.sleep(5)
# Platform specific things
if platform == 'linux' or platform == 'linux2':
enter = 'enter'
wms_path = 'pictures/tutorial_wms/linux/'
ps_path = 'pictures/performance_settings/linux/'
win = 'winleft'
ctrl = 'ctrl'
elif platform == 'win32':
enter = 'enter'
wms_path = 'pictures/tutorial_wms/win32/'
ps_path = 'pictures/performance_settings/linux/'
win = 'win'
ctrl = 'ctrl'
elif platform == 'darwin':
enter = 'return'
wms_path = 'pictures/tutorial_wms/linux/'
ps_path = 'pictures/performance_settings/linux/'
ctrl = 'command'
# Satellite Predictor file path
path = os.path.normpath(os.getcwd() + os.sep + os.pardir)
ps_file_path = os.path.join(path, 'docs/samples/config/mss/performance_simple.json')
# Maximizing the window
try:
pag.hotkey('ctrl', 'command', 'f') if platform == 'darwin' else pag.hotkey(win, 'up')
except Exception:
print("\nException : Enable Shortcuts for your system or try again!")
pag.sleep(2)
pag.hotkey('ctrl', 't')
pag.sleep(3)
# Opening Performance Settings dockwidget
try:
x, y = pag.locateCenterOnScreen(f'{wms_path}selecttoopencontrol.png')
# Relocating the table view window
pag.moveTo(x, y - 462, duration=1)
if platform == 'linux' or platform == 'linux2':
pag.dragRel(10, 100, duration=3)
elif platform == 'win32' or platform == 'darwin':
pag.dragRel(10, 10, duration=2)
pag.sleep(2)
x, y = pag.locateCenterOnScreen(f'{wms_path}selecttoopencontrol.png')
pag.click(x, y, interval=2)
pag.sleep(1)
pag.press('down', presses=2, interval=1)
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'select to open control\' button/option not found on the screen.")
# Exploring through the file system and loading the performance settings json file for a dummy aircraft.
try:
x, y = pag.locateCenterOnScreen(f'{ps_path}select.png')
pag.click(x, y, duration=2)
pag.sleep(1)
pag.typewrite(ps_file_path, interval=0.1)
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Select\' button (for loading performance_settings.json file) not found on the screen.")
# Checking the Show Performance checkbox to display the settings file in the table view
try:
x, y = pag.locateCenterOnScreen(f'{ps_path}show_performance.png')
pag.click(x, y, duration=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Show Performance\' checkbox not found on the screen.")
# Changing the maximum take off weight
try:
x, y = pag.locateCenterOnScreen(f'{ps_path}maximum_takeoff_weight.png')
pag.click(x + 318, y, duration=2)
pag.sleep(4)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
pag.typewrite('87000', interval=0.3)
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Maximum Takeoff Weight\' fill box not found on the screen.")
# Changing the aircraft weight of the dummy aircraft
try:
x, y = pag.locateCenterOnScreen(f'{ps_path}aircraft_weight.png')
pag.click(x + 300, y, duration=2)
pag.sleep(4)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
pag.typewrite('48000', interval=0.3)
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Aircraft weight\' fill box not found on the screen.")
# Changing the take off time of the dummy aircraft
try:
x, y = pag.locateCenterOnScreen(f'{ps_path}take_off_time.png')
pag.click(x + 410, y, duration=2)
pag.sleep(4)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
for _ in range(5):
pag.press('up')
pag.sleep(2)
pag.typewrite('04', interval=0.5)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Take off time\' fill box not found on the screen.")
# Showing and hiding the performance settings
try:
x, y = pag.locateCenterOnScreen(f'{ps_path}show_performance.png')
pag.click(x, y, duration=2)
pag.sleep(3)
pag.click(x, y, duration=2)
pag.sleep(3)
pag.click(x, y, duration=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Show Performance\' checkbox not found on the screen.")
print("\nAutomation is over for this tutorial. Watch next tutorial for other functions.")
# Close Everything!
try:
if platform == 'linux' or platform == 'linux2':
for _ in range(2):
pag.hotkey('altleft', 'f4')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('enter')
pag.sleep(2)
pag.keyDown('altleft')
pag.press('tab')
pag.press('left')
pag.keyUp('altleft')
pag.press('q')
if platform == 'win32':
for _ in range(2):
pag.hotkey('alt', 'f4')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('enter')
pag.sleep(2)
pag.hotkey('alt', 'tab')
pag.press('q')
elif platform == 'darwin':
for _ in range(2):
pag.hotkey('command', 'w')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('return')
pag.sleep(2)
pag.hotkey('command', 'tab')
pag.press('q')
except Exception:
print("Cannot automate : Enable Shortcuts for your system or try again")
def main():
"""
This function runs the above functions as different processes at the same time and can be
controlled from here. (This is the main process.)
"""
p1 = multiprocessing.Process(target=call_mss)
p2 = multiprocessing.Process(target=automate_performance)
p3 = multiprocessing.Process(target=call_recorder)
print("\nINFO : Starting Automation.....\n")
p3.start()
pag.sleep(3)
initial_ops()
p1.start()
p2.start()
p2.join()
p1.join()
p3.join()
print("\n\nINFO : Automation Completes Successfully!")
sys.exit()
if __name__ == '__main__':
main()
|
greatfet_uart.py
|
#!/usr/bin/env python3
#
# This file is part of GreatFET.
#
from __future__ import print_function, absolute_import
import os
import sys
import time
import queue
import select
import threading
import greatfet
from greatfet import GreatFET
from greatfet.utils import from_eng_notation, GreatFETArgumentParser
from greatfet.util.console import Console
from greatfet.peripherals.uart import UART
console = None
input_thread = None
termination_request = None
last_keycodes = bytearray()
def input_handler(console, input_queue, termination_request):
""" Thread body that gathers input from the user and enqueues it for processing. """
def should_check_for_data():
if os.name == 'posix':
return select.select([sys.stdin], [], [], 0) != ([], [], [])
else:
return True
while not termination_request.is_set():
# If we don't have data waiting, skip this iteration.
# This prevents us from entering a blocking read and sticking there
# after termination is desired.
if not should_check_for_data():
time.sleep(0.01)
continue
key = console.getkey()
input_queue.put(key)
def exit(code):
termination_request.set()
input_thread.join()
console.cleanup()
sys.exit(code)
def handle_special_functions(keycode):
""" Handles any special functions associated with the relevant key. """
global last_keycodes
# Keep track of the last four keycodes.
# Add any new keycodes to our list, deleting any existing keys that would push us past 4.
last_keycodes.extend(keycode)
while len(last_keycodes) > 2:
last_keycodes.pop(0)
# If the user's entered CTRL+A, CTRL+C, exit.
if last_keycodes.endswith(b"\x01\x03"):
exit(0)
def main():
""" Core command. """
global input_thread, termination_request, console
parity_modes = {
'none': UART.PARITY_NONE,
'odd': UART.PARITY_ODD,
'even': UART.PARITY_EVEN,
'one': UART.PARITY_STUCK_AT_ONE,
'zero': UART.PARITY_STUCK_AT_ZERO
}
# Set up a simple argument parser.
# TODO: support configurations such as '8n1'
parser = GreatFETArgumentParser(description="Simple GreatFET UART monitor.")
parser.add_argument('baud', nargs='?', type=from_eng_notation, default=115200, help="Baud rate; in symbols/second. Defaults to 115200.")
parser.add_argument('-d', '--data', type=int, default=8, help="The number of data bits per frame.")
parser.add_argument('-S', '--stop', type=int, default=1, help="The number of stop bits per frame.")
parser.add_argument('-P', '--parity', choices=parity_modes, default=0, help="The type of parity to use.")
parser.add_argument('-E', '--echo', action='store_true', help="If provided, local echo will be enabled.")
parser.add_argument('-N', '--no-newline-translation', action='store_false', dest='tr_newlines',
help="Provide this option to disable newline translation.")
args = parser.parse_args()
device = parser.find_specified_device()
# Grab our log functions.
log_function, log_error = parser.get_log_functions()
# Configure our UART.
if not hasattr(device, 'uart'):
log_error("This device doesn't appear to support the UART API. Perhaps it needs a firmware upgrade?")
sys.exit(-1)
# Notify the user that we're entering monitor mode.
log_function("Entering monitor mode. To terminate, type CTRL+A, then CTRL+C.")
# Create a console object.
console = Console()
console.setup()
# Create a thread to capture input data into a locally-processed queue.
input_queue = queue.Queue()
termination_request = threading.Event()
input_thread = threading.Thread(target=input_handler, args=(console, input_queue, termination_request))
input_thread.start()
# Configure our UART parameters.
device.uart.update_parameters(baud=args.baud, data_bits=args.data, stop_bits=args.stop, parity=args.parity)
# Generate our UART monitor.
while True:
# Grab any data from the serial port, and print it to the screen.
data = device.uart.read()
# If we're preforming newline translation, prepend a "\r" to any newline.
if args.tr_newlines and (data == b"\n"):
console.write_bytes(b"\r")
# Stick the UART data onscreen.
console.write_bytes(data)
# Grab any data from the user, and send it via serial.
try:
new_key = input_queue.get_nowait()
handle_special_functions(new_key)
# If local echo is on, print the character to our local console.
if args.echo:
sys.stdout.buffer.write(new_key)
if args.tr_newlines and (new_key == b"\n"):
device.uart.write(b"\r")
device.uart.write(new_key)
except queue.Empty:
pass
if __name__ == '__main__':
main()
|
utils.py
|
import jwtoken
import threading
m3ustr = "#EXTM3U \n\n"
kodiPropLicenseType = "#KODIPROP:inputstream.adaptive.license_type=com.widevine.alpha"
def processTokenChunks(channelList):
global m3ustr
kodiPropLicenseUrl = ""
if not channelList:
print("Channel List is empty ..Exiting")
exit(1)
for channel in channelList:
ls_session_key = jwtoken.generateJWT(channel['channel_id'], iterative=False)
if ls_session_key != "":
licenseUrl = channel['channel_license_url'] + "&ls_session=" + ls_session_key
kodiPropLicenseUrl = "#KODIPROP:inputstream.adaptive.license_key=" + licenseUrl
else:
print("Didn't get license for channel: Id: {0} Name:{1}".format(channel['channel_id'],
channel['channel_name']))
print('Continuing...Please get license manually for channel :', channel['channel_name'])
m3ustr += kodiPropLicenseType + "\n" + kodiPropLicenseUrl + "\n" + "#EXTINF:-1 "
m3ustr += "tvg-id=" + "\"" + channel['channel_id'] + "\" " + "tvg-logo=\"" + channel[
'channel_logo'] + "\" ," + channel['channel_name'] + "\n" + channel['channel_url'] + "\n\n"
def m3ugen():
ts = []
global m3ustr
channelList = jwtoken.getUserChannelSubscribedList()
for i in range(0, len(channelList), 5):
t = threading.Thread(target=processTokenChunks, args=([channelList[i:i + 5]]))
ts.append(t)
t.start()
for t in ts:
t.join()
print("Found total {0} channels subscribed by user \n Saving them to m3u file".format(len(channelList)))
saveM3ustringtofile(m3ustr)
def saveM3ustringtofile(m3ustr):
with open("allChannelPlaylist.m3u", "w") as allChannelPlaylistFile:
allChannelPlaylistFile.write(m3ustr)
if __name__ == '__main__':
m3ugen()
|
sim.py
|
import copy
import inspect
import itertools
from functools import partial
import numpy as np
import os
import random
import threading
import time as ttime
import uuid
import weakref
import warnings
from collections import deque, OrderedDict
from tempfile import mkdtemp
from .signal import Signal, EpicsSignal, EpicsSignalRO
from .areadetector.base import EpicsSignalWithRBV
from .status import DeviceStatus, StatusBase
from .device import (Device, Component as Cpt,
DynamicDeviceComponent as DDCpt, Kind)
from types import SimpleNamespace
from .pseudopos import (PseudoPositioner, PseudoSingle,
real_position_argument, pseudo_position_argument)
from .positioner import SoftPositioner
from .utils import ReadOnlyError, LimitError
from .log import logger
# two convenience functions 'vendored' from bluesky.utils
def new_uid():
return str(uuid.uuid4())
def short_uid(label=None, truncate=6):
"Return a readable but unique id like 'label-fjfi5a'"
if label:
return '-'.join([label, new_uid()[:truncate]])
else:
return new_uid()[:truncate]
class NullStatus(StatusBase):
"A simple Status object that is always immediately done, successfully."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_finished()
class EnumSignal(Signal):
def __init__(self, *args, value=0, enum_strings, **kwargs):
super().__init__(*args, value=0, **kwargs)
self._enum_strs = tuple(enum_strings)
self._metadata['enum_strs'] = tuple(enum_strings)
self.put(value)
def put(self, value, **kwargs):
if value in self._enum_strs:
value = self._enum_strs.index(value)
elif isinstance(value, str):
err = f'{value} not in enum strs {self._enum_strs}'
raise ValueError(err)
return super().put(value, **kwargs)
def get(self, *, as_string=True, **kwargs):
"""
Implement getting as enum strings
"""
value = super().get()
if as_string:
if self._enum_strs is not None and isinstance(value, int):
return self._enum_strs[value]
elif value is not None:
return str(value)
return value
def describe(self):
desc = super().describe()
desc[self.name]['enum_strs'] = self._enum_strs
return desc
class SynSignal(Signal):
"""
A synthetic Signal that evaluates a Python function when triggered.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
# This signature is arranged to mimic the signature of EpicsSignal, where
# the Python function (func) takes the place of the PV.
def __init__(self, func=None, *,
name, # required, keyword-only
exposure_time=0,
precision=3,
parent=None,
labels=None,
kind=None,
**kwargs):
if func is None:
# When triggered, just put the current value.
func = self.get
# Initialize readback with 0.
self._readback = 0
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
self._func = func
self.exposure_time = exposure_time
self.precision = precision
super().__init__(value=self._func(), timestamp=ttime.time(), name=name,
parent=parent, labels=labels, kind=kind, **kwargs)
self._metadata.update(
connected=True,
)
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.precision
return res
def trigger(self):
st = DeviceStatus(device=self)
delay_time = self.exposure_time
if delay_time:
def sleep_and_finish():
self.log.info('sleep_and_finish %s', self)
ttime.sleep(delay_time)
self.put(self._func())
st.set_finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
else:
self.put(self._func())
st.set_finished()
return st
def sim_set_func(self, func):
"""
Update the SynSignal function to set a new value on trigger.
"""
self._func = func
class SynSignalRO(SynSignal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata.update(
connected=True,
write_access=False,
)
def put(self, value, *, timestamp=None, force=False):
msg = f"{self}.put(value={value}, timestamp={timestamp}, force={force})"
self.log.error(msg)
raise ReadOnlyError(msg)
def set(self, value, *, timestamp=None, force=False):
msg = f"{self} is readonly"
self.log.error(msg)
raise ReadOnlyError(msg)
class SynPeriodicSignal(SynSignal):
"""
A synthetic Signal that evaluates a Python function periodically.
The signal value is updated in a background thread. To start the thread,
call the `start_simulation()` method before the beginning of simulation.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal generates white noise on [0, 1].
name : string, keyword only
period : number, optional
How often the Signal's value is updated in the background. Default is
1 second.
period_jitter : number, optional
Random Gaussian variation of the period. Default is 1 second.
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
def __init__(self, func=None, *,
name, # required, keyword-only
period=1, period_jitter=1,
exposure_time=0,
parent=None,
labels=None,
kind=None,
**kwargs):
if func is None:
func = np.random.rand
self._period = period
self._period_jitter = period_jitter
super().__init__(name=name, func=func,
exposure_time=exposure_time,
parent=parent, labels=labels, kind=kind,
**kwargs)
self.__thread = None
def start_simulation(self):
"""
Start background thread that performs periodic value updates. The method
should be called at least once before the beginning of simulation. Multiple
calls to the method are ignored.
"""
if self.__thread is None:
def periodic_update(ref, period, period_jitter):
while True:
signal = ref()
if not signal:
# Our target Signal has been garbage collected. Shut
# down the Thread.
return
signal.put(signal._func())
del signal
# Sleep for period +/- period_jitter.
ttime.sleep(
max(self._period + self._period_jitter * np.random.randn(), 0))
self.__thread = threading.Thread(target=periodic_update,
daemon=True,
args=(weakref.ref(self),
self._period,
self._period_jitter))
self.__thread.start()
def _start_simulation_deprecated(self):
"""Call `start_simulation` and print deprecation warning."""
if self.__thread is None:
msg = ("Deprecated API: Objects of SynPeriodicSignal must be initialized before simulation\n"
"by calling 'start_simulation()' method. Two such objects ('rand' and 'rand2') are\n"
"created by 'ophyd.sim' module. Call\n"
" rand.start_simulation() or rand2.start_simulation()\n"
"before the object is used.")
self.log.warning(msg)
self.start_simulation()
def trigger(self):
self._start_simulation_deprecated()
return super().trigger()
def get(self, **kwargs):
self._start_simulation_deprecated()
return super().get(**kwargs)
def put(self, *args, **kwargs):
self._start_simulation_deprecated()
super().put(*args, **kwargs)
def set(self, *args, **kwargs):
self._start_simulation_deprecated()
return super().set(*args, **kwargs)
def read(self):
self._start_simulation_deprecated()
return super().read()
def subscribe(self, *args, **kwargs):
self._start_simulation_deprecated()
return super().subscribe(*args, **kwargs)
class _ReadbackSignal(Signal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata.update(
connected=True,
write_access=False,
)
def get(self):
self._readback = self.parent.sim_state['readback']
return self._readback
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of
# generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['readback_ts']
def put(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
def set(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
class _SetpointSignal(Signal):
def put(self, value, *, timestamp=None, force=False):
self._readback = float(value)
self.parent.set(float(value))
def get(self):
self._readback = self.parent.sim_state['setpoint']
return self.parent.sim_state['setpoint']
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['setpoint_ts']
class SynAxis(Device):
"""
A synthetic settable Device mimic any 1D Axis (position, temperature).
Parameters
----------
name : string, keyword only
readback_func : callable, optional
When the Device is set to ``x``, its readback will be updated to
``f(x)``. This can be used to introduce random noise or a systematic
offset.
Expected signature: ``f(x) -> value``.
value : object, optional
The initial value. Default is 0.
delay : number, optional
Simulates how long it takes the device to "move". Default is 0 seconds.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
readback = Cpt(_ReadbackSignal, value=0, kind='hinted')
setpoint = Cpt(_SetpointSignal, value=0, kind='normal')
velocity = Cpt(Signal, value=1, kind='config')
acceleration = Cpt(Signal, value=1, kind='config')
unused = Cpt(Signal, value=1, kind='omitted')
SUB_READBACK = 'readback'
_default_sub = SUB_READBACK
def __init__(self, *,
name,
readback_func=None, value=0, delay=0,
precision=3,
parent=None,
labels=None,
kind=None,
**kwargs):
if readback_func is None:
def readback_func(x):
return x
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
self.sim_state = {}
self._readback_func = readback_func
self.delay = delay
self.precision = precision
# initialize values
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.sim_state['readback'] = readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
super().__init__(name=name, parent=parent, labels=labels, kind=kind,
**kwargs)
self.readback.name = self.name
def set(self, value):
old_setpoint = self.sim_state['setpoint']
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.setpoint._run_subs(sub_type=self.setpoint.SUB_VALUE,
old_value=old_setpoint,
value=self.sim_state['setpoint'],
timestamp=self.sim_state['setpoint_ts'])
def update_state():
old_readback = self.sim_state['readback']
self.sim_state['readback'] = self._readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
self.readback._run_subs(sub_type=self.readback.SUB_VALUE,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
self._run_subs(sub_type=self.SUB_READBACK,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
st = DeviceStatus(device=self)
if self.delay:
def sleep_and_finish():
ttime.sleep(self.delay)
update_state()
st.set_finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
else:
update_state()
st.set_finished()
return st
@property
def position(self):
return self.readback.get()
class SynAxisEmptyHints(SynAxis):
@property
def hints(self):
return {}
class SynAxisNoHints(SynAxis):
readback = Cpt(_ReadbackSignal, value=0, kind='omitted')
@property
def hints(self):
raise AttributeError
class SynGauss(Device):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : string
motor : Device
motor_field : string
center : number
center of peak
Imax : number
max intensity of peak
sigma : number, optional
Default is 1.
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak.
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
def _compute(self):
m = self._motor.read()[self._motor_field]['value']
# we need to do this one at a time because
# - self.read() may be screwed with by the user
# - self.get() would cause infinite recursion
Imax = self.Imax.get()
center = self.center.get()
sigma = self.sigma.get()
noise = self.noise.get()
noise_multiplier = self.noise_multiplier.get()
v = Imax * np.exp(-(m - center) ** 2 /
(2 * sigma ** 2))
if noise == 'poisson':
v = int(self.random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += self.random_state.uniform(-1, 1) * noise_multiplier
return v
val = Cpt(SynSignal, kind='hinted')
Imax = Cpt(Signal, value=10, kind='config')
center = Cpt(Signal, value=0, kind='config')
sigma = Cpt(Signal, value=1, kind='config')
noise = Cpt(EnumSignal, value='none', kind='config',
enum_strings=('none', 'poisson', 'uniform'))
noise_multiplier = Cpt(Signal, value=1, kind='config')
def __init__(self, name, motor, motor_field, center, Imax,
*, random_state=None,
**kwargs):
set_later = {}
for k in ('sigma', 'noise', 'noise_multiplier'):
v = kwargs.pop(k, None)
if v is not None:
set_later[k] = v
super().__init__(name=name, **kwargs)
self._motor = motor
self._motor_field = motor_field
self.center.put(center)
self.Imax.put(Imax)
self.random_state = random_state or np.random
self.val.name = self.name
self.val.sim_set_func(self._compute)
for k, v in set_later.items():
getattr(self, k).put(v)
self.trigger()
def subscribe(self, *args, **kwargs):
return self.val.subscribe(*args, **kwargs)
def clear_sub(self, cb, event_type=None):
return self.val.clear_sub(cb, event_type=event_type)
def unsubscribe(self, cid):
return self.val.unsubscribe(cid)
def unsubscribe_all(self):
return self.val.unsubscribe_all()
def trigger(self, *args, **kwargs):
return self.val.trigger(*args, **kwargs)
@property
def precision(self):
return self.val.precision
@precision.setter
def precision(self, v):
self.val.precision = v
@property
def exposure_time(self):
return self.val.exposure_time
@exposure_time.setter
def exposure_time(self, v):
self.val.exposure_time = v
class Syn2DGauss(Device):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : str
The name of the detector
motor0 : SynAxis
The 'x' coordinate of the 2-D gaussian blob
motor_field0 : str
The name field of the motor. Should be the key in motor0.describe()
motor1 : SynAxis
The 'y' coordinate of the 2-D gaussian blob
motor_field1 : str
The name field of the motor. Should be the key in motor1.describe()
center : iterable, optional
The center of the gaussian blob
Defaults to (0,0)
Imax : float, optional
The intensity at `center`
Defaults to 1
sigma : float, optional
Standard deviation for gaussian blob
Defaults to 1
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak..
Defaults to None
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
Defaults to 1
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
val = Cpt(SynSignal, kind='hinted')
Imax = Cpt(Signal, value=10, kind='config')
center = Cpt(Signal, value=0, kind='config')
sigma = Cpt(Signal, value=1, kind='config')
noise = Cpt(EnumSignal, value='none', kind='config',
enum_strings=('none', 'poisson', 'uniform'))
noise_multiplier = Cpt(Signal, value=1, kind='config')
def _compute(self):
x = self._motor0.read()[self._motor_field0]['value']
y = self._motor1.read()[self._motor_field1]['value']
m = np.array([x, y])
Imax = self.Imax.get()
center = self.center.get()
sigma = self.sigma.get()
noise = self.noise.get()
noise_multiplier = self.noise_multiplier.get()
v = Imax * np.exp(-np.sum((m - center) ** 2) / (2 * sigma ** 2))
if noise == 'poisson':
v = int(self.random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += self.random_state.uniform(-1, 1) * noise_multiplier
return v
def __init__(self, name, motor0, motor_field0, motor1, motor_field1,
center, Imax, sigma=1, noise="none", noise_multiplier=1,
random_state=None, **kwargs):
super().__init__(name=name, **kwargs)
self._motor0 = motor0
self._motor1 = motor1
self._motor_field0 = motor_field0
self._motor_field1 = motor_field1
self.center.put(center)
self.Imax.put(Imax)
self.sigma.put(sigma)
self.noise.put(noise)
self.noise_multiplier.put(noise_multiplier)
if random_state is None:
random_state = np.random
self.random_state = random_state
self.val.name = self.name
self.val.sim_set_func(self._compute)
self.trigger()
def trigger(self, *args, **kwargs):
return self.val.trigger(*args, **kwargs)
class TrivialFlyer:
"""Trivial flyer that complies to the API but returns empty data."""
name = 'trivial_flyer'
parent = None
def kickoff(self):
return NullStatus()
def describe_collect(self):
return {'stream_name': {}}
def read_configuration(self):
return OrderedDict()
def describe_configuration(self):
return OrderedDict()
def complete(self):
return NullStatus()
def collect(self):
for i in range(100):
yield {'data': {}, 'timestamps': {}, 'time': i, 'seq_num': i}
def stop(self, *, success=False):
pass
class NewTrivialFlyer(TrivialFlyer):
"""
The old-style API inserted Resource and Datum documents into a database
directly. The new-style API only caches the documents and provides an
interface (collect_asset_docs) for accessing that cache. This change was
part of the "asset refactor" that changed that way Resource and Datum
documents flowed through ophyd, bluesky, and databroker. Trivial flyer that
complies to the API but returns empty data.
"""
name = 'new_trivial_flyer'
def collect_asset_docs(self):
for _ in ():
yield _
class MockFlyer:
"""
Class for mocking a flyscan API implemented with stepper motors.
"""
def __init__(self, name, detector, motor, start, stop, num, **kwargs):
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = np.linspace(start, stop, num)
self._data = deque()
self._completion_status = None
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
if kwargs:
raise TypeError(
f'{self.__class__}.__init__ got unexpected '
f'keyword arguments {list(kwargs)}')
def __setstate__(self, val):
name, detector, motor, steps = val
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = steps
self._completion_status = None
def __getstate__(self):
return (self.name, self._detector, self._mot, self._steps)
def read_configuration(self):
return OrderedDict()
def describe_configuration(self):
return OrderedDict()
def describe_collect(self):
dd = dict()
dd.update(self._mot.describe())
dd.update(self._detector.describe())
return {'stream_name': dd}
def complete(self):
if self._completion_status is None:
raise RuntimeError("No collection in progress")
return self._completion_status
def kickoff(self):
if self._completion_status is not None:
raise RuntimeError("Already kicked off.")
self._data = deque()
st = DeviceStatus(device=self)
self._completion_status = st
def flyer_worker():
self._scan()
st.set_finished()
threading.Thread(target=flyer_worker, daemon=True).start()
return st
def collect(self):
if self._completion_status is None or not self._completion_status.done:
raise RuntimeError("No reading until done!")
self._completion_status = None
yield from self._data
def _scan(self):
"This will be run on a separate thread, started in self.kickoff()"
ttime.sleep(.1)
for p in self._steps:
stat = self._mot.set(p)
stat.wait()
stat = self._detector.trigger()
stat.wait()
event = dict()
event['time'] = ttime.time()
event['data'] = dict()
event['timestamps'] = dict()
for r in [self._mot, self._detector]:
d = r.read()
for k, v in d.items():
event['data'][k] = v['value']
event['timestamps'][k] = v['timestamp']
self._data.append(event)
def stop(self, *, success=False):
pass
class SynSignalWithRegistry(SynSignal):
"""
A SynSignal integrated with databroker.assets
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
reg : Registry, optional
DEPRECATED. If used, this is ignored and a warning is issued. In a
future release, this parameter will be removed.
save_path : str, optional
Path to save files to, if None make a temp dir, defaults to None.
save_func : function, optional
The function to save the data, function signature must be:
`func(file_path, array)`, defaults to np.save
save_spec : str, optional
The spec for the save function, defaults to 'RWFS_NPY'
save_ext : str, optional
The extension to add to the file name, defaults to '.npy'
"""
def __init__(self, *args, save_path=None,
save_func=partial(np.save, allow_pickle=False),
save_spec='NPY_SEQ', save_ext='npy', **kwargs):
super().__init__(*args, **kwargs)
self.save_func = save_func
self.save_ext = save_ext
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache = deque()
if save_path is None:
self.save_path = mkdtemp()
else:
self.save_path = save_path
self._spec = save_spec # spec name stored in resource doc
self._file_stem = None
self._path_stem = None
self._result = {}
def stage(self):
self._file_stem = short_uid()
self._datum_counter = itertools.count()
self._path_stem = os.path.join(self.save_path, self._file_stem)
# This is temporarily more complicated than it will be in the future.
# It needs to support old configurations that have a registry.
resource = {'spec': self._spec,
'root': self.save_path,
'resource_path': self._file_stem,
'resource_kwargs': {},
'path_semantics': {'posix': 'posix', 'nt': 'windows'}[os.name]}
self._resource_uid = new_uid()
resource['uid'] = self._resource_uid
self._asset_docs_cache.append(('resource', resource))
def trigger(self):
super().trigger()
# save file stash file name
self._result.clear()
for idx, (name, reading) in enumerate(super().read().items()):
# Save the actual reading['value'] to disk. For a real detector,
# this part would be done by the detector IOC, not by ophyd.
data_counter = next(self._datum_counter)
self.save_func('{}_{}.{}'.format(self._path_stem, data_counter,
self.save_ext), reading['value'])
# This is temporarily more complicated than it will be in the
# future. It needs to support old configurations that have a
# registry.
datum = {'resource': self._resource_uid,
'datum_kwargs': dict(index=data_counter)}
# If a Registry is not set, we need to generate the datum_id.
datum_id = '{}/{}'.format(self._resource_uid,
data_counter)
datum['datum_id'] = datum_id
self._asset_docs_cache.append(('datum', datum))
# And now change the reading in place, replacing the value with
# a reference to Registry.
reading['value'] = datum_id
self._result[name] = reading
return NullStatus()
def read(self):
return self._result
def describe(self):
res = super().describe()
for key in res:
res[key]['external'] = "FILESTORE"
return res
def collect_asset_docs(self):
items = list(self._asset_docs_cache)
self._asset_docs_cache.clear()
for item in items:
yield item
def unstage(self):
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache.clear()
self._file_stem = None
self._path_stem = None
self._result.clear()
class NumpySeqHandler:
specs = {'NPY_SEQ'}
def __init__(self, filename, root=''):
self._name = os.path.join(root, filename)
def __call__(self, index):
return np.load('{}_{}.npy'.format(self._name, index),
allow_pickle=False)
def get_file_list(self, datum_kwarg_gen):
"This method is optional. It is not needed for access, but for export."
return ['{name}_{index}.npy'.format(name=self._name, **kwargs)
for kwargs in datum_kwarg_gen]
class ABDetector(Device):
a = Cpt(SynSignal, func=random.random, kind=Kind.hinted)
b = Cpt(SynSignal, func=random.random)
def trigger(self):
return self.a.trigger() & self.b.trigger()
class DetWithCountTime(Device):
intensity = Cpt(SynSignal, func=lambda: 0, kind=Kind.hinted)
count_time = Cpt(Signal)
class DetWithConf(Device):
a = Cpt(SynSignal, func=lambda: 1, kind=Kind.hinted)
b = Cpt(SynSignal, func=lambda: 2, kind=Kind.hinted)
c = Cpt(SynSignal, func=lambda: 3)
d = Cpt(SynSignal, func=lambda: 4)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read_attrs = ['a', 'b']
self.configuration_attrs = ['c', 'd']
def trigger(self):
return self.a.trigger() & self.b.trigger()
class InvariantSignal(SynSignal):
# Always returns the same reading, including timestamp.
def read(self):
res = super().read()
for k in res:
res[k]['timestamp'] = 0
return res
def __repr__(self):
return "<INVARIANT REPR>"
class SPseudo3x3(PseudoPositioner):
pseudo1 = Cpt(PseudoSingle, limits=(-10, 10), egu='a', kind=Kind.hinted)
pseudo2 = Cpt(PseudoSingle, limits=(-10, 10), egu='b', kind=Kind.hinted)
pseudo3 = Cpt(PseudoSingle, limits=None, egu='c', kind=Kind.hinted)
real1 = Cpt(SoftPositioner, init_pos=0)
real2 = Cpt(SoftPositioner, init_pos=0)
real3 = Cpt(SoftPositioner, init_pos=0)
sig = Cpt(Signal, value=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo2,
real3=-pseudo_pos.pseudo3)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1,
pseudo2=-real_pos.real2,
pseudo3=-real_pos.real3)
class SPseudo1x3(PseudoPositioner):
pseudo1 = Cpt(PseudoSingle, limits=(-10, 10), kind=Kind.hinted)
real1 = Cpt(SoftPositioner, init_pos=0)
real2 = Cpt(SoftPositioner, init_pos=0)
real3 = Cpt(SoftPositioner, init_pos=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo1,
real3=-pseudo_pos.pseudo1)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1)
class SynAxisNoPosition(SynAxis):
@property
def position(self):
raise AttributeError
def make_fake_device(cls):
"""
Inspect cls and construct a fake device that has the same structure.
This works by replacing EpicsSignal with FakeEpicsSignal and EpicsSignalRO
with FakeEpicsSignalRO. The fake class will be a subclass of the real
class.
This assumes that EPICS connections are done entirely in EpicsSignal and
EpicsSignalRO subcomponents. If this is not true, this will fail silently
on class construction and loudly when manipulating an object.
Parameters
----------
cls : Device
A real Device class to inspect and create a fake Device class from
Returns
-------
fake_device : Device
The resulting fake Device class
"""
# Cache to avoid repeating work.
# EpicsSignal and EpicsSignalRO begin in the cache.
if cls not in fake_device_cache:
if not issubclass(cls, Device):
# Ignore non-devices and non-epics-signals
logger.debug('Ignore cls=%s, bases are %s', cls, cls.__bases__)
fake_device_cache[cls] = cls
return cls
fake_dict = {}
# Update all the components recursively
for cpt_name in cls.component_names:
cpt = getattr(cls, cpt_name)
if isinstance(cpt, DDCpt):
# Make a regular Cpt out of the DDC, as it already has
# been generated
fake_cpt = Cpt(cls=cpt.cls, suffix=cpt.suffix,
lazy=cpt.lazy,
trigger_value=cpt.trigger_value,
kind=cpt.kind, add_prefix=cpt.add_prefix,
doc=cpt.doc, **cpt.kwargs,
)
else:
fake_cpt = copy.copy(cpt)
fake_cpt.cls = make_fake_device(cpt.cls)
logger.debug('switch cpt_name=%s to cls=%s', cpt_name,
fake_cpt.cls)
fake_dict[cpt_name] = fake_cpt
fake_class = type('Fake{}'.format(cls.__name__), (cls,), fake_dict)
fake_device_cache[cls] = fake_class
logger.debug('fake_device_cache[%s] = %s', cls, fake_class)
return fake_device_cache[cls]
def clear_fake_device(dev, *, default_value=0, default_string_value='',
ignore_exceptions=False):
'''Clear a fake device by setting all signals to a specific value
Parameters
----------
dev : Device
The fake device
default_value : any, optional
The value to put to non-string components
default_string_value : any, optional
The value to put to components determined to be strings
ignore_exceptions : bool, optional
Ignore any exceptions raised by `sim_put`
Returns
-------
all_values : list
List of all (signal_instance, value) that were set
'''
all_values = []
for walk in dev.walk_signals(include_lazy=True):
sig = walk.item
if not hasattr(sig, 'sim_put'):
continue
try:
string = getattr(sig, 'as_string', False)
value = (default_string_value
if string
else default_value)
sig.sim_put(value)
except Exception:
if not ignore_exceptions:
raise
else:
all_values.append((sig, value))
return all_values
def instantiate_fake_device(dev_cls, *, name=None, prefix='_prefix',
**specified_kw):
'''Instantiate a fake device, optionally specifying some initializer kwargs
If unspecified, all initializer keyword arguments will default to the
string f"_{argument_name}_".
Parameters
----------
dev_cls : class
The device class to instantiate. This is allowed to be a regular
device, as `make_fake_device` will be called on it first.
name : str, optional
The instantiated device name
prefix : str, optional
The instantiated device prefix
**specified_kw :
Keyword arguments to override with a specific value
Returns
-------
dev : dev_cls instance
The instantiated fake device
'''
dev_cls = make_fake_device(dev_cls)
sig = inspect.signature(dev_cls)
ignore_kw = {'kind', 'read_attrs', 'configuration_attrs', 'parent',
'args', 'name', 'prefix'}
def get_kwarg(name, param):
default = param.default
if default == param.empty:
# NOTE: could check param.annotation here
default = '_{}_'.format(param.name)
return specified_kw.get(name, default)
kwargs = {name: get_kwarg(name, param)
for name, param in sig.parameters.items()
if param.kind != param.VAR_KEYWORD and
name not in ignore_kw
}
kwargs['name'] = (name if name is not None else dev_cls.__name__)
kwargs['prefix'] = prefix
return dev_cls(**kwargs)
class FakeEpicsSignal(SynSignal):
"""
Fake version of EpicsSignal that's really just a SynSignal.
Wheras SynSignal is generally used to test plans, FakeEpicsSignal is
generally used in conjunction with make_fake_device to test any logic
inside of a Device subclass.
Unlike in SynSignal, this class is generally instantiated inside of a
subcomponent generated automatically by make_fake_device. This means we
need extra hooks for modifying the signal's properties after the class
instantiates.
We can emulate EpicsSignal features here. We currently emulate the put
limits and some enum handling.
"""
def __init__(self, read_pv, write_pv=None, *, put_complete=False,
string=False, limits=False, auto_monitor=False, name=None,
**kwargs):
"""
Mimic EpicsSignal signature
"""
self.as_string = string
self._enum_strs = None
super().__init__(name=name, **kwargs)
self._use_limits = limits
self._put_func = None
self._limits = None
self._metadata.update(
connected=True,
)
def describe(self):
desc = super().describe()
if self._enum_strs is not None:
desc[self.name]['enum_strs'] = self.enum_strs
return desc
def sim_set_putter(self, putter):
"""
Define arbirary behavior on signal put.
This can be used to emulate basic IOC behavior.
"""
self._put_func = putter
def get(self, *, as_string=None, connection_timeout=1.0, **kwargs):
"""
Implement getting as enum strings
"""
if as_string is None:
as_string = self.as_string
value = super().get()
if as_string:
if self.enum_strs is not None and isinstance(value, int):
return self.enum_strs[value]
elif value is not None:
return str(value)
return value
def put(self, value, *args, **kwargs):
"""
Implement putting as enum strings and put functions
"""
if self.enum_strs is not None:
if value in self.enum_strs:
value = self.enum_strs.index(value)
elif isinstance(value, str):
err = '{} not in enum strs {}'.format(value, self.enum_strs)
raise ValueError(err)
if self._put_func is not None:
return self._put_func(value, *args, **kwargs)
return super().put(value, *args, **kwargs)
def sim_put(self, *args, **kwargs):
"""
Update the read-only signal's value.
Implement here instead of FakeEpicsSignalRO so you can call it with
every fake signal.
"""
force = kwargs.pop('force', True)
# The following will emit SUB_VALUE:
ret = Signal.put(self, *args, force=force, **kwargs)
# Also, ensure that SUB_META has been emitted:
self._run_subs(sub_type=self.SUB_META, **self._metadata)
return ret
@property
def enum_strs(self):
"""
Simulated enum strings.
Use sim_set_enum_strs during setup to set the enum strs.
"""
return self._enum_strs
def sim_set_enum_strs(self, enums):
"""
Set the enum_strs for a fake device
Parameters
----------
enums: list or tuple of str
The enums will be accessed by array index, e.g. the first item in
enums will be 0, the next will be 1, etc.
"""
self._enum_strs = tuple(enums)
self._metadata['enum_strs'] = tuple(enums)
self._run_subs(sub_type=self.SUB_META, **self._metadata)
@property
def limits(self):
return self._limits
def sim_set_limits(self, limits):
"""
Set the fake signal's limits.
"""
self._limits = limits
def check_value(self, value):
"""
Implement some of the checks from EpicsSignal
"""
super().check_value(value)
if value is None:
raise ValueError('Cannot write None to EPICS PVs')
if self._use_limits and not self.limits[0] <= value <= self.limits[1]:
raise LimitError(f'value={value} not within limits {self.limits}')
class FakeEpicsSignalRO(SynSignalRO, FakeEpicsSignal):
"""
Read-only FakeEpicsSignal
"""
pass
class FakeEpicsSignalWithRBV(FakeEpicsSignal):
"""
FakeEpicsSignal with PV and PV_RBV; used in the AreaDetector PV naming
scheme
"""
def __init__(self, prefix, **kwargs):
super().__init__(prefix + '_RBV', write_pv=prefix, **kwargs)
fake_device_cache = {EpicsSignal: FakeEpicsSignal,
EpicsSignalRO: FakeEpicsSignalRO,
EpicsSignalWithRBV: FakeEpicsSignalWithRBV,
}
class DirectImage(Device):
img = Cpt(SynSignal, kind='hinted')
def __init__(self, *args, func=None, **kwargs):
super().__init__(*args, **kwargs)
if func is not None:
self.img.sim_set_func(func)
def trigger(self):
return self.img.trigger()
def hw(save_path=None):
"Build a set of synthetic hardware (hence the abbreviated name, hw)"
motor = SynAxis(name='motor', labels={'motors'})
motor1 = SynAxis(name='motor1', labels={'motors'})
motor2 = SynAxis(name='motor2', labels={'motors'})
motor3 = SynAxis(name='motor3', labels={'motors'})
jittery_motor1 = SynAxis(name='jittery_motor1',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
jittery_motor2 = SynAxis(name='jittery_motor2',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
noisy_det = SynGauss('noisy_det', motor, 'motor', center=0, Imax=1,
noise='uniform', sigma=1, noise_multiplier=0.1,
labels={'detectors'})
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
identical_det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
det1 = SynGauss('det1', motor1, 'motor1', center=0, Imax=5, sigma=0.5,
labels={'detectors'})
det2 = SynGauss('det2', motor2, 'motor2', center=1, Imax=2, sigma=2,
labels={'detectors'})
det3 = SynGauss('det3', motor3, 'motor3', center=-1, Imax=2, sigma=1,
labels={'detectors'})
det4 = Syn2DGauss('det4', motor1, 'motor1', motor2, 'motor2',
center=(0, 0), Imax=1, labels={'detectors'})
det5 = Syn2DGauss('det5', jittery_motor1, 'jittery_motor1', jittery_motor2,
'jittery_motor2', center=(0, 0), Imax=1,
labels={'detectors'})
flyer1 = MockFlyer('flyer1', det, motor, 1, 5, 20)
flyer2 = MockFlyer('flyer2', det, motor, 1, 5, 10)
trivial_flyer = TrivialFlyer()
new_trivial_flyer = NewTrivialFlyer()
ab_det = ABDetector(name='det', labels={'detectors'})
# area detector that directly stores image data in Event
direct_img = DirectImage(func=lambda: np.array(np.ones((10, 10))),
name='direct', labels={'detectors'})
direct_img.img.name = 'img'
direct_img_list = DirectImage(func=lambda: [[1] * 10] * 10,
name='direct', labels={'detectors'})
direct_img_list.img.name = 'direct_img_list'
# area detector that stores data in a file
img = SynSignalWithRegistry(func=lambda: np.array(np.ones((10, 10))),
name='img', labels={'detectors'},
save_path=save_path)
invariant1 = InvariantSignal(func=lambda: 0, name='invariant1',
labels={'detectors'})
invariant2 = InvariantSignal(func=lambda: 0, name='invariant2',
labels={'detectors'})
det_with_conf = DetWithConf(name='det', labels={'detectors'})
det_with_count_time = DetWithCountTime(name='det', labels={'detectors'})
rand = SynPeriodicSignal(name='rand', labels={'detectors'})
rand2 = SynPeriodicSignal(name='rand2', labels={'detectors'})
motor_no_pos = SynAxisNoPosition(name='motor', labels={'motors'})
bool_sig = Signal(value=False, name='bool_sig', labels={'detectors'})
motor_empty_hints1 = SynAxisEmptyHints(name='motor1', labels={'motors'})
motor_empty_hints2 = SynAxisEmptyHints(name='motor2', labels={'motors'})
motor_no_hints1 = SynAxisNoHints(name='motor1', labels={'motors'})
motor_no_hints2 = SynAxisNoHints(name='motor2', labels={'motors'})
# Because some of these reference one another we must define them (above)
# before we pack them into a namespace (below).
signal = SynSignal(name='signal')
return SimpleNamespace(
motor=motor,
motor1=motor1,
motor2=motor2,
motor3=motor3,
jittery_motor1=jittery_motor1,
jittery_motor2=jittery_motor2,
noisy_det=noisy_det,
det=det,
identical_det=identical_det,
det1=det1,
det2=det2,
det3=det3,
det4=det4,
det5=det5,
flyer1=flyer1,
flyer2=flyer2,
trivial_flyer=trivial_flyer,
new_trivial_flyer=new_trivial_flyer,
ab_det=ab_det,
direct_img=direct_img,
direct_img_list=direct_img_list,
img=img,
invariant1=invariant1,
invariant2=invariant2,
pseudo3x3=SPseudo3x3(name='pseudo3x3'),
pseudo1x3=SPseudo1x3(name='pseudo1x3'),
sig=Signal(name='sig', value=0),
det_with_conf=det_with_conf,
det_with_count_time=det_with_count_time,
rand=rand,
rand2=rand2,
motor_no_pos=motor_no_pos,
motor_empty_hints1=motor_empty_hints1,
motor_empty_hints2=motor_empty_hints2,
motor_no_hints1=motor_no_hints1,
motor_no_hints2=motor_no_hints2,
bool_sig=bool_sig,
signal=signal,
)
# Dump instances of the example hardware generated by hw() into the global
# namespcae for convenience and back-compat.
globals().update(hw().__dict__)
|
iptables_cmd_invoke.py
|
import os
import time
import json
import socket
import requests
import threading
import pandas as pd
from configparser import ConfigParser
start_global = 0.0
api_invoke_add = []
api_invoke_delete = []
rule_apply = []
total_time = []
global_time = []
type_test = []
def change_format_float_list(old_list):
new_list = list()
for flt in old_list:
new_list.append(str(flt).replace('.', ','))
return new_list
def check_port_connection(host, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.05)
s.connect((host, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except:
return False
def take_time(destination_net, action):
global start_global
global rule_apply
global global_time
# accept case
response = False
escape = True
if action == 'DROP':
print("in DROP")
response = not response
escape = not escape
start_apply = time.monotonic()
while response != escape:
response = check_port_connection(destination_net[:-3], 80)
end_apply = time.monotonic()
print(f"Wait_apply_rule_any: start {start_apply} - end {end_apply} Time: {end_apply - start_apply}")
print(f"Global_time_rule_any: start {start_global} - end {end_apply} Time: {end_apply - start_global}")
rule_apply.append(end_apply - start_apply)
global_time.append(end_apply - start_global)
def add_rule(uri_path, action, source_net, destination_net, protocol, saveTime=False):
global start_global
global api_invoke_add
global type_test
data = dict(rule=dict(src=source_net, table="FORWARD", protocol=protocol,
dst=destination_net, action=action, extra_flag="--destination-port 80"))
start_send = time.monotonic()
try:
r = requests.post(uri_path, verify=False, json=data['rule'])
if r.status_code == 201:
end_send = time.monotonic()
print(f"Add_rule: start {start_send} - end {end_send} Time: {end_send - start_send}")
if saveTime:
api_invoke_add.append(end_send - start_send)
else:
print("Issues with rule entry\n")
response = json.loads(r.text)
print(response)
exit(1)
except requests.exceptions.RequestException as err:
print("Ops: Something Else", err)
print("Server non attivo")
exit(3)
except requests.exceptions.HTTPError as errh:
print("Http Error:", errh)
except requests.exceptions.ConnectionError as errc:
print("Error Connecting:", errc)
except requests.exceptions.Timeout as errt:
print("Timeout Error:", errt)
def delete_rule(uri_path, saveTime=False):
global start_global
global api_invoke_add
global type_test
data = dict(delete="delete")
start_send = time.monotonic()
r = None
try:
r = requests.post(uri_path, verify=False, json=data['delete'])
if r.status_code == 201:
end_send = time.monotonic()
print(f"Delete rule."
f"Time: {end_send - start_send}")
if saveTime:
api_invoke_delete.append(end_send - start_send)
else:
print("Issues with rule entry\n")
exit(1)
except requests.exceptions.RequestException as err:
print("Ops: Something Else", err)
print("Server non attivo")
exit(3)
except requests.exceptions.HTTPError as errh:
print("Http Error:", errh)
except requests.exceptions.ConnectionError as errc:
print("Error Connecting:", errc)
except requests.exceptions.Timeout as errt:
print("Timeout Error:", errt)
def add_rule_and_take_application_time(uri_path, source_net, destination_net, protocol, action, saveTime=False):
threading.Thread(target=add_rule(uri_path=uri_path, action=action, source_net=source_net,
destination_net=destination_net, protocol=protocol, saveTime=saveTime)).start()
threading.Thread(target=take_time(destination_net, action)).start()
def print_test_port_80_results():
df = pd.DataFrame(
{
"API Invoke delete": change_format_float_list(api_invoke_delete),
"API Invoke Add": change_format_float_list(api_invoke_add),
"Rule apply": change_format_float_list(rule_apply),
"Total Time": change_format_float_list(total_time),
"Global Time": change_format_float_list(global_time)
}
)
print(df)
return df
def execute_test(remote_base_uri, source_net, destination_net, protocol):
global start_global
iptables_api_add = "add/rule"
uri_path_add = os.path.join(remote_base_uri, iptables_api_add)
iptables_api_delete = "delete/rule"
uri_path_delete = os.path.join(remote_base_uri, iptables_api_delete)
start_send = time.monotonic()
start_global = start_send
print(start_global)
delete_rule(uri_path_delete, True)
print(f"Eliminated the rule that blocks IP on port 80")
add_rule_and_take_application_time(uri_path_add, source_net, destination_net, protocol=protocol, action='ACCEPT', saveTime=True)
print("Add accept rule destination IP on port 80")
def calculate_total_time(i):
global api_invoke_add
global api_invoke_delete
global rule_apply
global total_time
total_time.append(api_invoke_add[i] + api_invoke_delete[i] + rule_apply[i])
print(total_time)
def remove_all_rules(remote_base_uri):
iptables_api_delete = "delete/rule"
uri_path_delete = os.path.join(remote_base_uri, iptables_api_delete)
for _ in range(2):
delete_rule(uri_path_delete)
if __name__ == '__main__':
# Read configuration file
configuration = ConfigParser()
abs_folder_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
configuration.read(os.path.join(abs_folder_path, 'configuration.ini'))
# Parameters
protocol = configuration['IPTABLES']['protocol']
port = configuration['IPTABLES']['port']
gateway_address = configuration['IPTABLES']['gateway_address']
remote_base_uri = f"{protocol}://{gateway_address}:{port}"
source_net = configuration['IPTABLES']['source_net']
destination_net = configuration['IPTABLES']['destination_net']
n_rules = int(configuration['IPTABLES']['n_rules'])
iptables_api_add = "add/rule"
uri_path_add = os.path.join(remote_base_uri, iptables_api_add)
for i in range(n_rules):
remove_all_rules(remote_base_uri)
time.sleep(4)
add_rule(uri_path=uri_path_add, action='DROP', source_net=source_net,
destination_net=destination_net, protocol='tcp')
time.sleep(4)
add_rule(uri_path=uri_path_add, action='DROP', source_net=source_net,
destination_net=destination_net, protocol='tcp')
time.sleep(4)
execute_test(remote_base_uri, source_net, destination_net, 'tcp')
time.sleep(4)
calculate_total_time(i)
time.sleep(4)
df = print_test_port_80_results()
df.to_csv(os.path.join(os.path.dirname(abs_folder_path), 'iptables.csv'), sep=';', index=False)
|
net_engine.py
|
import marshal
import random
import select
import socket
import threading
import time
import urllib.error
import urllib.request
import stun
import env
def poll(sock):
return select.select([sock], [], [], 0)[0] != []
def any_actions(actions):
return any(acts for _, acts in actions)
class NetEngine:
latency = 5
replay_max_wait = 30
def __init__(self, game_model):
self.game = game_model
self.socket = None
self.threads = []
self.reset()
self.game.my_id = random.randrange(2**64)
def reset(self):
self.peers = []
self.address = None
self.last_comm_time = None
self.comm_gap_msg_at = 10
self.should_start_replay = False
self.iter_actions = {}
def start(self):
self.game.reset()
self.should_stop = False
net_thread = threading.Thread(target=self.net_thread_go)
self.threads.append(net_thread)
if not env.dev_mode:
net_thread.start()
def net_thread_go(self):
self.setup_socket()
if self.should_stop:
return
self.setup_addr_name()
if self.should_stop:
return
self.wait_for_connections()
def setup_socket(self):
while True:
local_port = random.randint(1024, 65535)
try:
if self.should_stop:
return
_nat_type, gamehost, gameport = stun.get_ip_info('0.0.0.0', local_port)
if gameport is None:
print('retrying stun connection')
continue
self.my_addr = (gamehost, gameport)
print('external host %s:%d' % self.my_addr)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', local_port))
print('listening on port %d' % local_port)
except socket.error:
print('retrying establishing server')
continue
break
self.socket = sock
def setup_addr_name(self):
url = 'http://game-match.herokuapp.com/register/chesschase0/%s/%d/' % self.my_addr
print('registering at %s' % url)
self.address = urllib.request.urlopen(url).read().decode('utf-8')
self.game.add_message('')
self.game.add_message('Your address is:')
self.game.add_message(self.address.upper())
self.game.add_message('')
self.game.add_message('Type the address of a friend to play with them')
def wait_for_connections(self):
while not self.peers:
time.sleep(5)
if self.should_stop:
return
url = 'http://game-match.herokuapp.com/lookup/chesschase0/%s/' % self.address.replace(' ', '%20')
print('checking game at %s' % url)
self.add_peers(urllib.request.urlopen(url).read().decode('utf-8'))
def connect(self, address):
connect_thread = threading.Thread(target=self.connect_thread_go, args=(address, ))
self.threads.append(connect_thread)
connect_thread.start()
def connect_thread_go(self, addr):
self.game.add_message('Establishing connection with: %s' % addr)
while self.address is None:
# Net thread didn't finish
time.sleep(1)
url = 'http://game-match.herokuapp.com/connect/chesschase0/%s/%s/' % (self.address.replace(' ', '%20'), addr.lower().replace(' ', '%20'))
print('looking up host at %s' % url)
try:
response = urllib.request.urlopen(url).read()
except urllib.error.HTTPError as err:
if err.code == 404:
self.game.add_message('No such game: %s' % addr)
else:
self.game.add_message('Server error when looking up game: %s' % addr)
return
self.add_peers(response.decode('utf-8'))
def add_peers(self, peers_str):
for x in peers_str.split():
host, port_str = x.split(':')
port = int(port_str)
if (host, port) == self.my_addr:
continue
if (host, port) in self.peers:
continue
print('established connection with %s:%d' % (host, port))
self.peers.append((host, port))
self.game.messages.clear()
self.game.add_message('')
self.game.add_message('Connection successful!')
self.game.add_message('THE GAME BEGINS!')
self.game.mode = 'play'
self.game.init()
self.last_comm_time = time.time()
self.comm_gap_msg_at = 10
def communicate(self):
if self.socket is None:
return
packet = marshal.dumps((
self.game.my_id,
[(i, self.iter_actions.setdefault(i, {}).setdefault(self.game.my_id, []))
for i in
range(
max(0, self.game.counter-self.latency),
self.game.counter+self.latency)]))
for peer in self.peers:
self.socket.sendto(packet, 0, peer)
while poll(self.socket):
self.last_comm_time = time.time()
packet, peer = self.socket.recvfrom(0x1000)
peer_id, peer_iter_actions = marshal.loads(packet)
for i, actions in peer_iter_actions:
acts = self.iter_actions.setdefault(i, {})
if peer_id in acts:
assert acts[peer_id] == actions, '%s %s' % (acts[peer_id], actions)
else:
acts[peer_id] = actions
if self.last_comm_time is None:
return
time_since_comm = time.time() - self.last_comm_time
if time_since_comm >= self.comm_gap_msg_at:
self.game.add_message('No communication for %d seconds' % self.comm_gap_msg_at)
self.comm_gap_msg_at += 5
elif time_since_comm < 5:
self.comm_gap_msg_at = 5
def get_replay_actions(self):
return sorted(self.iter_actions.get(self.game.counter, {}).items())
def act(self):
if self.game.mode == 'replay':
all_actions = self.get_replay_actions()
if any_actions(all_actions):
self.replay_wait = 0
else:
self.game.counter += 1
all_actions = self.get_replay_actions()
self.replay_wait += 1
if self.replay_wait == self.replay_max_wait:
self.replay_wait = 0
while not any_actions(all_actions) and self.game.counter+1 < self.replay_stop:
self.game.counter += 1
all_actions = self.get_replay_actions()
elif self.game.active():
if self.game.counter < self.latency:
self.game.counter += 1
return
if len(self.iter_actions.get(self.game.counter, {})) <= len(self.peers):
# We haven't got communications from all peers for this iteration.
# So we'll wait.
return
all_actions = sorted(self.iter_actions[self.game.counter].items())
else:
return
if self.game.counter == self.latency:
# Assign players
for player, i in enumerate(i for i, _ in all_actions):
self.game.players[i] = player
for i, actions in all_actions:
for action_type, params in actions:
action_func = getattr(self.game, 'action_'+action_type, None)
if action_func is None:
self.game.add_message(action_type + ': no such action')
else:
prev_messages = len(self.game.messages)
if env.dev_mode:
action_func(i, *params)
else:
try:
action_func(i, *params)
except:
self.game.add_message('action ' + action_type + ' failed')
if prev_messages == len(self.game.messages) and not getattr(action_func, 'quiet', False):
self.game.add_message('%s did %s' % (self.game.nick(i), action_type.upper()))
self.game.counter += 1
if self.game.mode == 'replay' and self.game.counter == self.replay_stop:
self.game.mode = 'play'
self.game.last_start = self.game.counter
self.game.init()
assert not self.game.mode == 'replay' or self.game.counter < self.replay_stop
if self.should_start_replay:
self.should_start_replay = False
print('start replay!')
self.game.mode = 'replay'
self.replay_stop = self.game.counter
self.game.counter = self.game.last_start
self.replay_wait = 0
self.game.init()
def iteration(self):
self.communicate()
if self.game.mode != 'replay' and self.game.my_id not in self.iter_actions.setdefault(self.game.counter+self.latency, {}):
self.iter_actions[self.game.counter+self.latency][self.game.my_id] = self.game.cur_actions
self.game.cur_actions = []
self.act()
def start_replay(self):
self.should_start_replay = True
|
oclogs.py
|
#!/usr/bin/env python3
import logging
import os
import json
import arrow
import requests
import crayons
import click
import time
import random
from threading import Thread
try:
from slackclient import SlackClient
except Exception:
pass
logging.basicConfig()
COLOR_KEYS = ('red', 'green', 'blue', 'yellow', 'cyan', 'magenta', 'white', 'black')
COLORS = [getattr(crayons, c) for c in COLOR_KEYS]
DATE_FORMAT = "YYYY-MM-DD HH:mm:ss"
def colorit(name):
return COLORS[sum(map(ord, name)) % len(COLORS)](name)
class Resource(object):
name = None
namespace = None
last_seen = None
def __init__(self, d):
self.data = d
self.metadata = d["metadata"]
class Container(Resource):
def __init__(self, name, d):
"""
Containers get passed the entire pod JSON and pluck its own data out
based on the name parameter.
"""
super().__init__(d)
self.name = name
self.pluck_data()
self.namespace = d["metadata"]["namespace"]
if self.status:
self.state = list(self.status["state"].keys())[0]
self.state_data = self.status["state"][self.state]
else:
self.state = self.state_data = None
def pluck_data(self):
for c in self.data["spec"]["containers"]:
if c["name"] == self.name:
self.spec = c
break
if "containerStatuses" in self.data["status"]:
for c in self.data["status"]["containerStatuses"]:
if c["name"] == self.name:
self.status = c
break
else:
self.status = None
class Pod(Resource):
def __init__(self, d):
super().__init__(d)
md = self.metadata
self.namespace = md["namespace"]
self.name = md["name"]
self.status = d["status"]["phase"]
self.started = arrow.get(md["creationTimestamp"])
self.containers = list(self.populate_containers())
def populate_containers(self):
for name in (c["name"] for c in self.data["spec"]["containers"]):
yield Container(name, self.data)
def __eq__(self, o):
return o is not None and \
o.namespace == self.namespace and \
o.name == self.name and \
o.status == self.status
def __repr__(self):
return "%s %s: [%s] %s" % (
self.started.format(DATE_FORMAT),
colorit(self.namespace),
self.status,
crayons.white(self.name)
)
class Event(Resource):
"""
count: how many times has this event been seen
first_seen: when was this event first seen
kind: type of target resource (e.g. pod)
namespace: namespace of target resource
name: name of target resource
last_seen: when was this event last seen
message: specific info about the event
metadata: typical metadata on any kubernetes resource
reason: event type (e.g. SystemOOM, Created, etc)
source: node hostname
type: logging level, e.g. Warning, Normal, etc
"""
def __init__(self, d):
super().__init__(d)
self.count = d["count"]
self.first_seen = arrow.get(d["firstTimestamp"])
self.last_seen = arrow.get(d["lastTimestamp"])
self.obj = d["involvedObject"]
self.message = d["message"]
self.reason = d["reason"]
self.component = d["source"]["component"]
self.node = d["source"]["host"] if self.component == "kubelet" else None
self.namespace = self.obj.get("namespace", "???")
self.name = self.obj.get("name")
self.kind = self.obj.get("kind")
def __repr__(self):
return "%s %s: [%s] on %s - %s" % (
self.last_seen.format(DATE_FORMAT),
colorit(self.namespace),
self.reason,
crayons.white(f"{self.kind}/{self.name}"),
self.message
)
class Observer(object):
def __init__(self, since=arrow.now().shift(minutes=-1), slack=None):
self.since = since
self.slack = slack
self.seen_messages = {}
def has_been_seen(self, msg_key, now):
return (msg_key in self.seen_messages and
self.seen_messages[msg_key] > self.since)
def clear_seen_messages(self):
if random.randint(0, 100) == 1:
hour_ago = arrow.now().shift(hours=-1)
for k, v in list(self.seen_messages.items()):
if v < hour_ago:
del self.seen_messages[k]
def observe(self, resource, feed):
pass
class Console(Observer):
def observe(self, resource, feed):
self.clear_seen_messages()
msg = repr(resource)
now = arrow.now()
if self.has_been_seen(msg, now):
return
self.seen_messages[msg] = now
if resource.last_seen is None or resource.last_seen > self.since:
print(msg)
class SystemOOM(Observer):
def observe(self, resource, feed):
self.clear_seen_messages()
if type(resource) == Event and resource.reason == "SystemOOM":
now = arrow.now()
if self.has_been_seen(resource.node, now) or resource.last_seen < self.since:
return
self.seen_messages[resource.node] = now
print(crayons.white("{:*^80}".format("SYSTEM OOM")))
print(f"Node: {resource.node}")
print(f"Killed: {resource.last_seen.format(DATE_FORMAT)}")
print(crayons.white("*" * 80))
msg = "\n".join([
":rotating_light: *System OOM* :rotating_light:",
f"Node: {resource.node}",
])
self.slack.send_message(msg)
class FailedPodKill(Observer):
def observe(self, resource, feed):
self.clear_seen_messages()
if type(resource) == Event and resource.reason == "FailedKillPod":
msg_key = resource.namespace + resource.name
now = arrow.now()
if self.has_been_seen(msg_key, now) or resource.last_seen < self.since:
return
self.seen_messages[msg_key] = now
print(crayons.white("{:*^80}".format("Failed to kill pod")))
print(f"Pod: {resource.name}")
print(f"Killed: {resource.last_seen.format(DATE_FORMAT)}")
print(resource.message)
print(crayons.white("*" * 80))
msg = "\n".join([
":super_saiyan: *Failed to kill pod* :super_saiyan:",
f"Namespace: {resource.namespace}",
f"Pod: {resource.name}",
"```%s```" % " ".join(resource.message.split("\n"))
])
self.slack.send_message(msg)
class PodOOM(Observer):
def observe(self, resource, feed):
if type(resource) != Pod:
return
for c in resource.containers:
if c.state == "terminated" and c.state_data.get("reason") == "OOMKilled":
killed = arrow.get(c.state_data.get("finishedAt"))
if killed > self.since:
self.console(resource, c, killed)
self.send_slack(resource, c, killed)
def send_slack(self, p, c, killed):
msg = "\n".join([
":dead-docker: *POD OOM* :dead-docker:",
f"Namespace: {p.namespace}",
f"Pod: {p.name}",
f"Container: {c.name}"
])
self.slack.send_message(msg)
def console(self, p, c, killed):
print(crayons.white("{:*^80}".format("OOM KILLED")))
print(f"Pod: {p.name}")
print(f"Container: {c.name}")
print(f"Killed: {killed.format(DATE_FORMAT)}")
print(crayons.white("*" * 80))
class OpenshiftFeed(object):
resource = None
api_suffix = None
def __init__(self, api, headers, namespace, observers, ca_store):
self.api = api
self.headers = headers
self.namespace = namespace
self.observers = observers
self.ca_store = ca_store
self.resources = {}
def fetch_loop(self):
while True:
try:
ns_url = f"namespaces/{self.namespace}/" if self.namespace else ""
kwargs = {"headers": self.headers, "stream": True}
if self.ca_store:
kwargs["verify"] = self.ca_store
r = requests.get(f"{self.api}/watch/{ns_url}{self.api_suffix}", **kwargs)
if r.status_code != 200:
print(f"Invalid status from server: %s\n%s" % (
r.status_code,
r.json()['message']
))
return
for l in r.iter_lines():
d = json.loads(l)
resource = self.resource(d["object"])
self.resources[resource.name] = resource
for o in self.observers:
o.observe(resource, self)
except Exception:
logging.exception("Failed connection")
print("Reconnecting...")
time.sleep(1)
class PodFeed(OpenshiftFeed):
resource = Pod
api_suffix = "pods"
class EventFeed(OpenshiftFeed):
resource = Event
api_suffix = "events"
class Slack(object):
def __init__(self):
try:
token = os.environ["SLACK_TOKEN"]
self.channel = os.environ["SLACK_CHANNEL"]
self.client = SlackClient(token)
except Exception:
self.client = self.channel = None
def send_message(self, msg):
if self.client:
self.client.api_call("chat.postMessage", channel=self.channel, text=msg)
def disable_color():
global COLORS
global crayons
from collections import namedtuple
COLORS = [lambda *a, **k: a[0] for c in COLOR_KEYS]
crayons_tuple = namedtuple("crayons", COLOR_KEYS)
crayons = crayons_tuple(*COLORS)
@click.command()
@click.option("--token", default=os.path.expanduser("~/token"))
@click.option("--api")
@click.option("-n", "--namespace")
@click.option("--color/--no-color", default=True)
@click.option("--ca-store")
def main(token, api, namespace, color, ca_store):
if not api:
print("Please specify valid api hostname using --api")
return
if not color:
disable_color()
API = f"https://{api}/api/v1"
with open(token) as fp:
token = fp.read().strip()
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json"
}
slack = Slack()
observers = (Console(slack=slack), PodOOM(slack=slack), SystemOOM(slack=slack), FailedPodKill(slack=slack))
for cls in (PodFeed, EventFeed):
feed = cls(API, headers, namespace, observers, ca_store)
Thread(target=feed.fetch_loop).start()
if __name__ == "__main__":
main(auto_envvar_prefix="OCLOGS")
|
bot.py
|
"""
AUTO_BOT bot file
Developers: Andrey Kozlovsky, Stanislav Ermokhin
License information for external libraries is provided in a separate text file.
"""
import datetime
import schedule
import time
import telebot
from telebot import types
from threading import Thread
from config import API_TELEGRAM_TOKEN as TOKEN
import local_en as local
import wrapper_functions
import weather
SECONDS_TO_FETCH = 7
all_seconds = [i for i in range(SECONDS_TO_FETCH)]
TIMES = {(20, 2, all(all_seconds))}
bot = telebot.TeleBot(TOKEN)
user_in_database = wrapper_functions.user_in_database
process_functions_names = ['add_model', 'add_brand', 'add_year',
'add_oil', 'add_insurance', 'add_filter',
'add_tire', 'add_tech']
info_process_functions_names = ['add_username', 'add_city', 'add_email', 'add_password', 'add_phone']
my_commands = dict(zip(local.buttons_add_keyboard, process_functions_names))
user_info_commands = dict(zip(local.buttons_info_keyboard, info_process_functions_names))
def check_errors(msg, func):
x = func(msg)
if x[0]:
bot.send_message(msg.chat.id, local.success)
else:
bot.send_message(msg.chat.id, x[1])
def get_weather_info_on_id(chat_id):
city = wrapper_functions.get_city_by_chat(chat_id)
weather_object = weather.CityInfo(city=city)
dic = weather_object.weather_forecast()
weather_description = dic['weather'][0]['description']
if weather_object.description_bad.intersection(weather_description):
msg_to_send = local.weather_warning + weather_description
bot.send_message(chat_id=chat_id, text=msg_to_send)
else:
pass
def manual_get_weather_info_on_id(chat_id):
city = wrapper_functions.get_city_by_chat(chat_id)
weather_object = weather.CityInfo(city=city)
dic = weather_object.weather_forecast()
weather_description = dic['weather'][0]['description']
msg_to_send = local.weather_warning + weather_description
bot.send_message(chat_id=chat_id, text=msg_to_send)
def send_weather_notification(chat=None):
if not chat:
chat_ids = wrapper_functions.get_all_chat_ids()
for chat_id in chat_ids:
get_weather_info_on_id(chat_id)
else:
manual_get_weather_info_on_id(chat)
@bot.message_handler(commands=['weather'])
def send_on_help(message):
send_weather_notification(message.chat.id)
@bot.message_handler(commands=['start'])
def send_on_start(message):
if user_in_database(message.chat.username):
bot.reply_to(message, local.welcome_back_message1+message.chat.username+local.welcome_back_message2)
else:
action = wrapper_functions.add_username(message.chat)
if action[0]:
bot.reply_to(message, local.start_response_text + local.success)
else:
bot.reply_to(message, local.start_response_text + local.error)
@bot.message_handler(commands=['add'])
def send_on_add(message):
if user_in_database(message.chat.username):
markup = types.InlineKeyboardMarkup(row_width=1)
keyboard_buttons = [types.InlineKeyboardButton(item,
callback_data=item)
for item in local.buttons_add_keyboard]
for obj in keyboard_buttons:
markup.add(obj)
bot.send_message(message.chat.id,
local.explain_add_response,
reply_markup=markup)
else:
bot.reply_to(message, local.error_not_in_database)
@bot.message_handler(commands=['my_info'])
def send_on_info(message):
if user_in_database(message.chat.username):
markup = types.InlineKeyboardMarkup(row_width=1)
keyboard_buttons = [types.InlineKeyboardButton(item,
callback_data=item)
for item in local.buttons_info_keyboard]
for obj in keyboard_buttons:
markup.add(obj)
bot.send_message(message.chat.id,
local.explain_info_response,
reply_markup=markup)
else:
bot.reply_to(message, local.error_not_in_database)
@bot.callback_query_handler(func=lambda call: call.data in my_commands or
call.data in user_info_commands or local.okay in call.data)
def get_on_add(call):
try:
if call.message:
if call.data in my_commands:
msg = bot.reply_to(call.message, text=local.give_value)
result_function = getattr(wrapper_functions, my_commands[call.data])
bot.register_next_step_handler(msg, lambda m: check_errors(m, result_function))
elif call.data in user_info_commands:
msg = bot.reply_to(call.message, local.give_value)
result_function = getattr(wrapper_functions, user_info_commands[call.data])
bot.register_next_step_handler(msg, lambda m: check_errors(m, result_function))
elif local.okay in call.data:
data = call.data
to_find = local.okay + ' '
key = data[len(to_find):]
bot.send_message(call.message.chat.id,
text=local.okay_response+': '+key)
x = wrapper_functions.update_after_notification(call.message, [key])
if x:
bot.send_message(call.message.chat.id,
text=local.success)
else:
bot.send_message(call.message.chat.id, text=local.error)
else:
raise Exception('call.message is None/False') # debugging
except Exception as e:
# sending error message to bot for debugging ----------------
bot.send_message(call.message.chat.id, local.error+'\n'+str(e))
# -----------------------------------------------------------
def schedule_checker():
while True:
schedule.run_pending()
time.sleep(1)
def schedule_checker_weather():
while True:
schedule.run_pending()
time.sleep(10)
def send_notification():
# time check -----
now_time = datetime.datetime.now().timetuple()
current_time = (now_time.tm_hour, now_time.tm_min, now_time.tm_sec)
if current_time in TIMES: # time check -----
chat_ids = wrapper_functions.get_all_chat_ids()
for chat_id in chat_ids:
dic = wrapper_functions.check_notification(chat_id=chat_id)
if dic['type'] != ['' for _ in range(len(dic['type']))]:
markup_okay = types.InlineKeyboardMarkup(row_width=1)
for item in dic['type']:
i = local.types_dict[item]
markup_okay.add(types.InlineKeyboardButton(text=local.okay+' '+i,
callback_data=local.okay+' '+i))
bot.send_message(chat_id=chat_id,
reply_markup=markup_okay,
text=local.notify_okay)
schedule.every(1).seconds.do(send_notification) # (every 1 second) or (every 24 hours and clear time check)
t1 = Thread(target=schedule_checker)
t1.setDaemon(True)
t1.start()
schedule.every(10).minutes.do(send_weather_notification) # weather API limitation
t2 = Thread(target=schedule_checker_weather)
t2.setDaemon(True)
t2.start()
schedule.run_all()
@bot.message_handler(content_types=['text'])
def text_test_run(message):
# debugging and test --------------------------------------------
bot.send_message(message.chat.id, 'Reached function text_test_run')
# ---------------------------------------------------------------
bot.polling()
|
scrubdash.py
|
#!/usr/bin/env python3
"""
This module contains the entry point for the CLI script and the PyPI
console script
"""
import argparse
import yaml
import logging
from multiprocessing import Process, Queue
from scrubdash.asyncio_server.asyncio_server import AsyncioServer
from scrubdash.dash_server.dash_server import start_dash
parser = argparse.ArgumentParser()
parser.add_argument('config_filename')
parser.add_argument('-c', '--continue', dest='cont', action='store_true')
args = parser.parse_args()
CONFIG_FILE = args.config_filename
CONTINUE_RUN = args.cont
logging.basicConfig(level=logging.INFO,
format='[%(levelname)s] %(message)s (%(name)s)')
log = logging.getLogger('main')
with open(CONFIG_FILE) as f:
configs = yaml.load(f, Loader=yaml.SafeLoader)
def main():
asyncio_to_dash_queue = Queue()
asyncio_server = AsyncioServer(configs,
asyncio_to_dash_queue,
CONTINUE_RUN)
# Start the asyncio server in a different process
asyncio = Process(target=asyncio_server.start_server)
asyncio.start()
# Start the dash server in a different process
dash = Process(target=start_dash, args=(configs,
asyncio_to_dash_queue))
dash.start()
try:
asyncio.join()
dash.join()
except KeyboardInterrupt:
# Wait for asyncio server and dash server to shut down
while asyncio.is_alive() or dash.is_alive():
pass
log.info('Successfully shut down scrubdash.')
if __name__ == "__main__":
main()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import threading
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import socket_helper
from test.support.socket_helper import HOST, HOSTv6
TIMEOUT = support.LOOPBACK_TIMEOUT
DEFAULT_ENCODING = 'utf-8'
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000 + 'non-ascii char \xAE\r\n'
LIST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n'
NLST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n"
"type=dir;perm=cpmel;unique==SGP1; dir \xAE non-ascii char\r\n"
"type=file;perm=r;unique==SGP2; file \xAE non-ascii char\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
self.encoding = baseclass.encoding
def handle_read(self):
new_data = self.recv(1024).decode(self.encoding, 'replace')
self.baseclass.last_received_data += new_data
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode(self.encoding))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
self.encoding = encoding
# We use this as the string IPv4 address to direct the client
# to in response to a PASV command. To test security behavior.
# https://bugs.python.org/issue43285/.
self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode(self.encoding)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode(self.encoding) + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
ip = self.fake_pasv_server_ip
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET, encoding=DEFAULT_ENCODING):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self.encoding = encoding
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn, encoding=self.encoding)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
DummyFTPHandler.__init__(self, conn, encoding=encoding)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyFTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode(self.client.encoding))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode(self.client.encoding))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_makepasv_issue43285_security_disabled(self):
"""Test the opt-in to the old vulnerable behavior."""
self.client.trust_server_pasv_ipv4_address = True
bad_host, port = self.client.makepasv()
self.assertEqual(
bad_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((self.client.sock.getpeername()[0], port),
timeout=TIMEOUT).close()
def test_makepasv_issue43285_security_enabled_default(self):
self.assertFalse(self.client.trust_server_pasv_ipv4_address)
trusted_host, port = self.client.makepasv()
self.assertNotEqual(
trusted_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((trusted_host, port), timeout=TIMEOUT).close()
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = socket_helper.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = socket_helper.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
def test_encoding_param(self):
encodings = ['latin-1', 'utf-8']
for encoding in encodings:
with self.subTest(encoding=encoding):
self.tearDown()
self.setUp(encoding=encoding)
self.assertEqual(encoding, self.client.encoding)
self.test_retrbinary()
self.test_storbinary()
self.test_retrlines()
new_dir = self.client.mkd('/non-ascii dir \xAE')
self.check_data(new_dir, '/non-ascii dir \xAE')
# Check default encoding
client = ftplib.FTP(timeout=TIMEOUT)
self.assertEqual(DEFAULT_ENCODING, client.encoding)
@skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0),
af=socket.AF_INET6,
encoding=DEFAULT_ENCODING)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=DEFAULT_ENCODING)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = socket_helper.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
# bpo-39259
with self.assertRaises(ValueError):
ftplib.FTP(HOST, timeout=0)
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
environment.py
|
# coding: utf-8
from user_service import app, init_app
import threading
from sqlalchemy import create_engine
from user_service.core import BaseModel
from user_service import db
from user_service.models import User
from user_service.tests.integration.config import basedir
import os
# Tornado imports
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
def create_database(app):
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], convert_unicode=True)
BaseModel.metadata.create_all(bind=engine)
def shutdown_server():
IOLoop.instance().stop()
def after_scenario(context, scenario):
print("Clearing DB")
try:
User.query.filter_by(id=context.last_user_id).delete()
db.session.commit()
print("DB Cleared - User Gone")
except AttributeError:
pass
def before_all(context):
init_app(settings='user_service.tests.integration.config')
create_database(app)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
ioloop = IOLoop.instance()
context.server = http_server
context.thread = threading.Thread(target=ioloop.start)
context.thread.start()
def after_all(context):
shutdown_server()
if (not os.environ["MYSQL_HOST"]):
os.remove(os.path.join(basedir, 'users.db'))
|
shell.py
|
import sys
import shlex
import os
from multiprocessing import Process
SHELL_STATUS_RUN = 0
SHELL_STATUS_STOP = 1
def tokenize(string) :
return shlex.split(string)
def run_proc(name):
print('运行子进程%s(%s)......'%(name,os.getpid()))
def execute(cmd_tokens) :
p = Process(target=run_proc,args=(cmd_tokens[0],))
p.start()
pid = int(p.pid) #os.fork()
print(pid)
if pid == 0:
os.execvp(cmd_tokens[0], cmd_tokens)
elif pid>0:
while True:
wpid, status = os.waitpid(pid, 0)
if os.WIFEXISTED(status) or os.WIFSIGNALED(status):
break
return SHELL_STATUS_RUN
def shell_loop():
status = SHELL_STATUS_RUN
while status == SHELL_STATUS_RUN:
sys.stdout.write("> ")
sys.stdout.flush
cmd = sys.stdin.readline()
cmd_tokens = tokenize(cmd)
status = execute(cmd_tokens)
def main():
shell_loop()
if __name__ == "__main__":
main()
|
connection_test.py
|
import demistomock as demisto
from Active_Directory_Query import main
import socket
import ssl
from threading import Thread
import time
import os
import pytest
import json
from IAMApiModule import *
from unittest.mock import patch
BASE_TEST_PARAMS = {
'server_ip': '127.0.0.1',
'secure_connection': 'None',
'page_size': '500',
'credentials': {'identifier': 'bad', 'password': 'bad'}
}
RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error'
def test_bad_host_no_ssl(mocker):
mocker.patch.object(demisto, 'params',
return_value=BASE_TEST_PARAMS)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('server_ip') == '127.0.0.1'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
@pytest.mark.filterwarnings("ignore::ResourceWarning")
def test_bad_ssl(mocker):
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '185.199.108.153' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = 443
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
demisto_info_mock = mocker.patch.object(demisto, "info")
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' in err_msg
# call_args_list holds all calls (we need the first) with a tuple of args list and kwargs
info_msg = demisto_info_mock.call_args_list[0][0][0]
# ip is not in the certificate. so it should fail on host match
assert "doesn't match any name" in info_msg
def ssl_bad_socket_server(port):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# cert and keyfile generated with
# openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem
try:
context.load_cert_chain('cert.pem', 'key.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind(('127.0.0.1', port))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
try:
conn, addr = ssock.accept()
except ssl.SSLError as err:
if 'TLSV1_ALERT_UNKNOWN_CA' in str(err):
# all is ok. client refused our cert
return
raise
conn.recv(32)
msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n'
for x in range(10):
msg += msg
conn.send(msg)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except Exception as ex:
pytest.fail("Failed starting ssl_bad_socket_server: {}".format(ex))
raise
@pytest.mark.filterwarnings("ignore::ResourceWarning")
def test_faulty_server(mocker):
port = 9638
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['unsecure'] = True
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
def test_ssl_custom_cert(mocker, request):
ENV_KEY = 'SSL_CERT_FILE'
os.environ[ENV_KEY] = 'cert.pem'
def cleanup():
os.environ.pop(ENV_KEY)
request.addfinalizer(cleanup)
port = 9637
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' not in err_msg
def test_endpoint_entry():
"""
Given:
Custom attributes to filter the computer object entry.
When:
The function filters the computer object according to the custom attributes.
Then:
The function will return all the computer object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import endpoint_entry
custom_attributes_with_asterisk = endpoint_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'Hostname': 'name', 'ID': 'dn', 'Type': 'AD'}
def get_outputs_from_user_profile(user_profile):
entry_context = user_profile.to_entry()
outputs = entry_context.get('Contents')
return outputs
def test_create_user_iam(mocker):
"""
Given:
A valid user profile with valid mapping
When:
Running the `create_user_iam` command
Then:
The user was created successfully in AD.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
'samaccountname': 'test',
'userPrincipalName': 'test',
"ou": "OU=Americas,OU=Demisto"})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
def test_unseccsseful_create_user_iam_missing_ou(mocker):
"""
Given:
A valid user profile with missing ou in the mapping
When:
Running the `create_user_iam` command
Then:
- The user was not created in AD.
- An error message was returned.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
'samaccountname': 'test',
'userPrincipalName': 'test'})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
assert 'User must have an Organizational Unit (OU)' in outputs.get('errorMessage')
def test_unseccsseful_create_user_iam_missing_samaccountname(mocker):
"""
Given:
A valid user profile with missing samaccountname in the mapping
When:
Running the `create_user_iam` command
Then:
- The user was not created in AD.
- An error message was returned.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
"ou": "OU=Americas,OU=Demisto",
'userPrincipalName': 'test'})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
assert 'User must have a sAMAccountName' in outputs.get('errorMessage')
def test_group_entry_no_custom_attributes():
"""
Given:
Custom attributes to filter the group object entry.
When:
The function filters the group object according to the custom attributes.
Then:
The function will return all the group object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import group_entry
custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD'}
def test_group_entry():
"""
Given:
Custom attributes to filter the group object entry.
When:
The function filters the group object according to the custom attributes.
Then:
The function will return all the group object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import group_entry
custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf',
'displayName': 'display name'}, ['displayName'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD',
'displayName': 'display name'}
def test_search_group_members(mocker):
"""
sanity test for search_group_members method
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = [EntryMocker()]
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
expected_results = {'ContentsFormat': 'json', 'Type': 1,
'Contents': [{'dn': 'dn', 'attributes': {'memberOf': ['memberOf'], 'name': ['name']}}],
'ReadableContentsFormat': 'markdown',
'HumanReadable': '### Active Directory - Get Group Members\n|'
'dn|memberOf|name|\n|---|---|---|\n| dn | memberOf | name |\n',
'EntryContext': {'ActiveDirectory.Groups(obj.dn ==dn)': {'dn': 'dn', 'members': [
{'dn': 'dn', 'category': 'group'}]}, 'ActiveDirectory.Groups(obj.dn == val.dn)':
[{'dn': 'dn', 'memberOf': ['memberOf'], 'name': ['name']}], 'Group':
[{'Type': 'AD', 'ID': 'dn', 'Name': ['name'], 'Groups': ['memberOf']}]}}
expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}'
mocker.patch.object(demisto, 'args',
return_value={'member-type': 'group', 'group-dn': 'dn'})
Active_Directory_Query.conn = ConnectionMocker()
with patch('logging.Logger.info') as mock:
Active_Directory_Query.search_group_members('dc', 1)
mock.assert_called_with(expected_results)
|
scripts.py
|
import netaddr
from scapy.all import *
import time
import threading
import paramiko
linux_host = []
lock = threading.Lock()
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def find_linux_hosts(ip_addr):
''' this function will use scapy to send packet to destination host and in case of valid response add that in inventory'''
ssh_packet = IP(dst=str(ip_addr)) / TCP(dport=[22], flags="S")
ssh_check_result = sr1(ssh_packet, timeout=1, verbose=False)
if ssh_check_result:
if ssh_check_result.getlayer(IP).ttl == 64:
lock.acquire()
linux_host.append({})
linux_host[len(linux_host) - 1]["ip"] = ip_addr
try:
try:
hostname = socket.gethostbyaddr(ip_addr)
linux_host[len(linux_host) - 1]["hostname"] = hostname[0]
except socket.herror as e:
linux_host[len(linux_host) - 1]["last_status"] = "FAILED"
linux_host[len(linux_host) - 1]["hostname"] = "UNKNOWN"
for command in ["uptime", "cat /etc/redhat-release"]:
output = execute_command(command, ip_addr)
if output:
for line in output.readlines():
linux_host[len(linux_host) - 1][command] = line.rstrip("\r\n")
client.close()
finally:
lock.release()
def execute_command(command,ip_addr):
try:
client.connect(ip_addr, port="22", username="rkurdukar", password="India@1", timeout=2)
stdin, stdout, stderr = client.exec_command(command, get_pty=True, timeout=300)
return stdout
except paramiko.ssh_exception.BadAuthenticationType as e:
linux_host[len(linux_host) - 1][command] = "BadAuthenticationType"
linux_host[len(linux_host) - 1]["last_status"] = "FAILED"
logging.error(e)
except paramiko.ssh_exception.AuthenticationException as e:
linux_host[len(linux_host) - 1][command] = "AuthenticationFailed"
linux_host[len(linux_host) - 1]["last_status"] = "FAILED"
logging.error(e)
except Exception as e:
return e
def scan(range):
logging.info("--------- Finding linux host from range {} -----------".format(range))
try:
range_obj = netaddr.IPNetwork(range)
if len(list(range_obj.iter_hosts())) <= 512:
host_list = range_obj.iter_hosts()
threads = []
t1 = time.time()
for ip_addr in host_list:
thread = threading.Thread(target=find_linux_hosts, args=[str(ip_addr)])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logging.info("Total linux host found {}".format(len(linux_host)))
logging.info("Total time taken {}".format(time.time() - t1))
else:
return "Too long range provided , please limit range till /23 only "
except Exception as e:
logging.error("[ERROR:] Exception occured {}".format(e))
return e
logging.info("------------- Scanning for hosts completed --------------- ")
for each in linux_host:
if "last_status" not in each:
each['last_status'] = "PASSED"
each['last_time_checked'] = datetime.now()
return linux_host
|
test_wrapper.py
|
# Copyright 2017 The Nuclio Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import functools
import json
import logging
import operator
import os
import socket
import struct
import sys
import tempfile
import threading
import time
import unittest
import _nuclio_wrapper as wrapper
import msgpack
import nuclio_sdk
import nuclio_sdk.helpers
if nuclio_sdk.helpers.PYTHON3:
from socketserver import UnixStreamServer, BaseRequestHandler
from unittest import mock
import http.client as httpclient
else:
from SocketServer import UnixStreamServer, BaseRequestHandler
import mock
import httplib as httpclient
class TestSubmitEvents(unittest.TestCase):
def setUp(self):
self._temp_path = tempfile.mkdtemp(prefix='nuclio-test-py-wrapper')
# write handler to temp path
self._handler_path = self._write_handler(self._temp_path)
# set PYTHONPATH to include temp path
sys.path.append(self._temp_path)
# generate socket path
self._socket_path = os.path.join(self._temp_path, 'nuclio.sock')
# create transport
self._unix_stream_server = self._create_unix_stream_server(self._socket_path)
# create logger
self._logger = nuclio_sdk.Logger(logging.DEBUG)
self._logger.set_handler('test-default', sys.stdout, nuclio_sdk.logger.HumanReadableFormatter())
# create a wrapper
self._wrapper = wrapper.Wrapper(self._logger, 'reverser:handler', self._socket_path, 'test')
def tearDown(self):
sys.path.remove(self._temp_path)
self._wrapper._processor_sock.close()
self._unix_stream_server.server_close()
self._unix_stream_server.shutdown()
self._unix_stream_server_thread.join()
def test_non_utf8_headers(self):
self._wait_for_socket_creation()
self._wrapper._entrypoint = lambda context, event: event.body
events = [
json.loads(nuclio_sdk.Event(_id=str(i), body='e{0}'.format(i)).to_json())
for i in range(3)
]
# middle event is malformed
events[len(events) // 2]['headers']['x-nuclio'] = b'\xda'
# send events
t = threading.Thread(target=self._send_events, args=(events,))
t.start()
self._wrapper.serve_requests(num_requests=len(events))
t.join()
# processor start
# if python 2 then: deprecation note
# duration
# function response
# malformed log line (wrapper)
# malformed response
# duration
# function response
expected_messages = 7
if nuclio_sdk.helpers.PYTHON2:
expected_messages += 1
self._wait_until_received_messages(expected_messages)
malformed_response = self._unix_stream_server._messages[-3]['body']
self.assertEqual(httpclient.INTERNAL_SERVER_ERROR, malformed_response['status_code'])
# ensure messages coming after malformed request are still valid
last_function_response = self._unix_stream_server._messages[-1]['body']
self.assertEqual(httpclient.OK, last_function_response['status_code'])
self.assertEqual(events[-1]['body'], last_function_response['body'])
def test_bad_function_code(self):
def raise_exception(ctx, event):
raise RuntimeError(error_message)
error_message = 'Im a bad entrypoint'
self._wait_for_socket_creation()
self._send_event(nuclio_sdk.Event(_id='1'))
self._wrapper._entrypoint = raise_exception
self._wrapper.serve_requests(num_requests=1)
# processor start, function log line, response body
self._wait_until_received_messages(3)
# extract the response
response = next(message['body']
for message in self._unix_stream_server._messages
if message['type'] == 'r')
response_body = response['body']
self.assertIn(error_message, response_body)
def test_event_illegal_message_size(self):
def _send_illegal_message_size():
self._unix_stream_server._connection_socket.sendall(struct.pack(">I", 0))
self._wait_for_socket_creation()
t = threading.Thread(target=_send_illegal_message_size)
t.start()
self._wrapper._entrypoint = mock.MagicMock()
self._wrapper._entrypoint.assert_not_called()
with self.assertRaises(SystemExit):
self._wrapper.serve_requests(num_requests=1)
t.join()
def test_single_event(self):
reverse_text = 'reverse this'
# send the event
self._wait_for_socket_creation()
t = threading.Thread(target=self._send_event, args=(nuclio_sdk.Event(_id=1, body=reverse_text),))
t.start()
self._wrapper.serve_requests(num_requests=1)
t.join()
# processor start, function log line, response body, duration messages
self._wait_until_received_messages(4)
# extract the response
response = next(message['body']
for message in self._unix_stream_server._messages
if message['type'] == 'r')
response_body = response['body'][::-1]
# blame is on nuclio_sdk/event.py:80
if sys.version_info[:2] < (3, 0):
response_body = base64.b64decode(response_body)
self.assertEqual(reverse_text, response_body)
def test_blast_events(self):
"""Test when many >> 10 events are being sent in parallel"""
def record_event(recorded_events, ctx, event):
recorded_events.add(event.id)
recorded_event_ids = set()
expected_events_length = 10000
events = (
nuclio_sdk.Event(_id=i, body='e{}'.format(i))
for i in range(expected_events_length)
)
t = threading.Thread(target=self._send_events, args=(events,))
t.start()
self._wrapper._entrypoint = functools.partial(record_event, recorded_event_ids)
self._wrapper.serve_requests(num_requests=expected_events_length)
t.join()
# record incoming events
self.assertEqual(expected_events_length, len(recorded_event_ids), 'Wrong number of events')
def test_multi_event(self):
"""Test when two events fit inside on TCP packet"""
recorded_events = []
def event_recorder(ctx, event):
recorded_events.append(event)
return 'OK'
num_of_events = 10
events = (
nuclio_sdk.Event(_id=i, body='e{}'.format(i))
for i in range(num_of_events)
)
self._send_events(events)
self._wrapper._entrypoint = event_recorder
self._wrapper.serve_requests(num_of_events)
self.assertEqual(num_of_events, len(recorded_events), 'wrong number of events')
for recorded_event_index, recorded_event in enumerate(sorted(recorded_events, key=operator.attrgetter('id'))):
self.assertEqual(recorded_event_index, recorded_event.id)
response_body = recorded_event.body
if sys.version_info[:2] < (3, 0):
# blame is on nuclio_sdk/event.py:80
response_body = base64.b64decode(response_body)
self.assertEqual('e{}'.format(recorded_event_index), response_body)
# # to run memory profiling test, uncomment the test below
# # and from terminal run with
# # > mprof run python -m py.test test_wrapper.py::TestSubmitEvents::test_memory_profiling_100_<num>
# # and to get its plot use:
# # > mprof plot --backend agg --output <filename>.png
# def test_memory_profiling_100(self):
# self._run_memory_profiling(100)
#
# def test_memory_profiling_1k(self):
# self._run_memory_profiling(1000)
#
# def test_memory_profiling_10k(self):
# self._run_memory_profiling(10000)
#
# def test_memory_profiling_100k(self):
# self._run_memory_profiling(100000)
#
# def _run_memory_profiling(self, num_of_events):
# self._wrapper._entrypoint = mock.MagicMock()
# self._wrapper._entrypoint.return_value = {}
# threading.Thread(target=self._send_events, args=(num_of_events,)).start()
# with open('test_memory_profiling_{0}.txt'.format(num_of_events), 'w') as f:
# profiled_serve_requests_func = memory_profiler.profile(self._wrapper.serve_requests,
# precision=4,
# stream=f)
# profiled_serve_requests_func(num_requests=num_of_events)
# self.assertEqual(num_of_events, self._wrapper._entrypoint.call_count, 'Received unexpected number of events')
def _send_event(self, event):
if not isinstance(event, dict):
event = self._event_to_dict(event)
# pack exactly as processor or wrapper explodes
body = msgpack.Packer().pack(event)
# big endian body len
body_len = struct.pack(">I", len(body))
# first write body length
self._unix_stream_server._connection_socket.sendall(body_len)
# then write body content
self._unix_stream_server._connection_socket.sendall(body)
def _get_packed_event_body_len(self, event):
return len(msgpack.Packer().pack(self._event_to_dict(event)))
def _event_to_dict(self, event):
return json.loads(event.to_json())
def _send_events(self, events):
self._wait_for_socket_creation()
for event in events:
self._send_event(event)
def _wait_for_socket_creation(self, timeout=10, interval=0.1):
# wait for socket connection
while self._unix_stream_server._connection_socket is None and timeout > 0:
time.sleep(interval)
timeout -= interval
def _wait_until_received_messages(self, minimum_messages_length, timeout=10, interval=1):
while timeout > 0:
time.sleep(interval)
current_messages_length = len(self._unix_stream_server._messages)
if current_messages_length >= minimum_messages_length:
return
self._logger.debug_with('Waiting for messages to arrive',
current_messages_length=current_messages_length,
minimum_messages_length=minimum_messages_length)
timeout -= interval
raise RuntimeError('Failed waiting for messages')
def _create_unix_stream_server(self, socket_path):
unix_stream_server = _SingleConnectionUnixStreamServer(socket_path, _Connection)
# create a thread and listen forever on server
self._unix_stream_server_thread = threading.Thread(target=unix_stream_server.serve_forever)
self._unix_stream_server_thread.daemon = True
self._unix_stream_server_thread.start()
return unix_stream_server
def _write_handler(self, temp_path):
handler_code = '''import sys
is_py2 = sys.version_info[:2] < (3, 0)
def handler(ctx, event):
"""Return reversed body as string"""
body = event.body
if not is_py2 and isinstance(body, bytes):
body = body.decode('utf-8')
ctx.logger.warn('the end is nigh')
return body[::-1]
'''
handler_path = os.path.join(temp_path, 'reverser.py')
with open(handler_path, 'w') as out:
out.write(handler_code)
return handler_path
class _SingleConnectionUnixStreamServer(UnixStreamServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
UnixStreamServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self._connection_socket = None # type: socket.socket
self._messages = []
class _Connection(BaseRequestHandler):
def handle(self):
self.request.settimeout(1)
# make a file from the socket so we can readln
socket_file = self.request.makefile('r')
# save the connection socket
self.server._connection_socket = self.request
# while the server isn't shut down
while not self.server._BaseServer__shutdown_request:
try:
line = socket_file.readline()
if not line:
continue
message = {
'type': line[0],
'body': json.loads(line[1:]) if line[0] != 's' else ''
}
self.server._messages.append(message)
except:
pass
class TestCallFunction(unittest.TestCase):
def setUp(self):
# provided by _connection_provider
self._mockConnection = mock.MagicMock()
def test_call_json_body(self):
self._platform = nuclio_sdk.Platform('local', 'somens', self._connection_provider)
# prepare an event to send
event = nuclio_sdk.Event(method='GET', path='path', body={'a': 'some_body'})
# prepare a responder
connection_response = mock.MagicMock()
connection_response.status = httpclient.NO_CONTENT
connection_response.getheaders = lambda: [('Content-Type', 'application/json')]
connection_response.read = mock.MagicMock(return_value='{"b": "some_response"}')
self._mockConnection.getresponse = mock.MagicMock(return_value=connection_response)
# send the event
response = self._platform.call_function('function-name', event)
self.assertEqual(self._mockConnection.url, 'nuclio-somens-function-name:8080')
self._mockConnection.request.assert_called_with(event.method,
event.path,
body=json.dumps({'a': 'some_body'}),
headers={
'Content-Type': 'application/json',
'X-Nuclio-Target': 'function-name'
})
self.assertEqual({'b': 'some_response'}, response.body)
self.assertEqual('application/json', response.content_type)
self.assertEqual(httpclient.NO_CONTENT, response.status_code)
def test_get_function_url(self):
self.assertEqual(nuclio_sdk.Platform('local', 'ns')._get_function_url('function-name'),
'nuclio-ns-function-name:8080')
self.assertEqual(nuclio_sdk.Platform('kube', 'ns')._get_function_url('function-name'),
'nuclio-function-name:8080')
def _connection_provider(self, url, timeout=None):
self._mockConnection.url = url
return self._mockConnection
|
run_enso.py
|
#! /usr/bin/env python
# vim:set tabstop=4 shiftwidth=4 expandtab:
# -*- coding: utf-8 -*-
__updated__ = "2019-05-14"
import atexit
atexit_register = atexit.register
atexit_functions = []
def my_atexit_register(func, *args, **kwargs):
global atexit_functions
atexit_functions.append((func, args, kwargs))
atexit_register(func, *args, **kwargs)
def run_all_exitfunctions():
global atexit_functions
for func, args, kwargs in atexit_functions:
try:
print "Running exit function: ", func.__name__
func(*args, **kwargs)
except:
pass
atexit.register = my_atexit_register
atexit.run_all_exitfunctions = run_all_exitfunctions
import logging
import os
import socket
import sys
import threading
import click
import enso.config
import enso.version
from enso._version_local import VERSION
_ = enso.version # Keep pyLint happy
ENSO_DIR = os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
# Disable IPV6 address lookup
# http://stackoverflow.com/questions/2014534/force-python-mechanize-urllib2-to-only-use-a-requests
# FIXME: Resolve this based on current location and user configuration
# FIXME: Move this somewhere else
# This hack will force IPV4 DNS lookups only.
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# replace the original socket.getaddrinfo by our version
#socket.getaddrinfo = getAddrInfoWrapper
def change_color_scheme(color):
"""Change Enso color scheme"""
if not hasattr(enso.config, "COLOR_SCHEMES"):
print "No COLOR_SCHEMES setting found in config.py. Color scheme will not be changed."
return
if color not in enso.config.COLOR_SCHEMES:
print "Unknown color scheme '%s'. Leaving defaults." % color
return
from enso.quasimode import layout
scheme = enso.config.COLOR_SCHEMES[color]
layout.WHITE = scheme[0]
layout.DESIGNER_GREEN = scheme[1]
layout.DARK_GREEN = scheme[2]
layout.BLACK = scheme[3]
layout.DESCRIPTION_BACKGROUND_COLOR = layout.COLOR_DESIGNER_GREEN + "cc"
layout.MAIN_BACKGROUND_COLOR = layout.COLOR_BLACK + "d8"
class LoggingDebugFilter(logging.Filter):
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
res = logging.Filter.filter(self, record)
if res == 0:
return res
if record.module == "inotify_buffer" and record.funcName == "run":
return 0
return res
class LogLevelFilter(logging.Filter, object):
"""Filters (lets through) all messages with level <= LEVEL"""
# http://stackoverflow.com/a/24956305/408556
def __init__(self, name, passlevel, reject):
super(LogLevelFilter, self).__init__(name)
self.passlevel = passlevel
self.reject = reject
def filter(self, record):
passed = super(LogLevelFilter, self).filter(record)
if self.reject:
return passed and (record.levelno > self.passlevel)
else:
return passed and (record.levelno <= self.passlevel)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-l', '--log-level', default="ERROR",
type=click.Choice(['CRITICAL', 'ERROR', 'INFO', 'WARNING', 'DEBUG']),
show_default=True, help='Log level.')
@click.option('-n', '--no-splash', is_flag=True, help='Do not show splash window.')
@click.option('-c', '--no-console', is_flag=True, help='Do not show console window.')
@click.option('-q', '--quiet', is_flag=True,
help='No information windows are shown on startup/shutdown.')
@click.option('-i', '--ignore-config', is_flag=True, help='Ignore .ensorc file.')
@click.option('-k', '--hotkey',
type=click.Choice(['CAPITAL', 'LSHIFT', 'RSHIFT', 'LCONTROL',
'RCONTROL', 'LWIN', 'RWIN']),
help="Override the hotkey to invoke Enso interface set in .ensorc.")
@click.option("--commands-dir",
help="Override name of the subdirectory in user home directory that stores custom commands (used for development)")
@click.option("--color-scheme",
type=click.Choice(enso.config.COLOR_SCHEMES.keys()[1:]),
help="Override default color scheme (used for development).")
@click.option("-t", "--no-tray-icon", is_flag=True, help="Hide tray icon (only on Windows)")
@click.option("--get-config-var", hidden=True)
@click.version_option(version=VERSION)
def main(log_level, no_splash, no_console, quiet, ignore_config, hotkey,
commands_dir, color_scheme, no_tray_icon, get_config_var):
"""
Enso: Linguistic command-line launcher
"""
if not ignore_config:
# Load custom user config first
enso.config.load_ensorc()
else:
logging.info("Ignoring your .ensorc startup script")
if get_config_var is not None:
if get_config_var == "list":
for v in (v for v in dir(enso.config) if not v.startswith("__") and v[0].isupper()):
print v
elif get_config_var == "all":
for v in (v for v in dir(enso.config) if not v.startswith("__") and v[0].isupper()):
try:
print("%s=%s" % (v, getattr(enso.config, v)))
except AttributeError:
print("%s=<invalid>" % v)
else:
try:
print(getattr(enso.config, get_config_var))
except AttributeError:
print("<notfound>")
return
enso.config.CMDLINE_OPTIONS = {
'log_level': log_level,
'no_splash': no_splash,
'no_console': no_console,
'quiet': quiet,
'ignore_config': ignore_config,
'hotkey': hotkey,
'commands_dir': commands_dir,
'color_scheme': color_scheme,
'no_tray_icon': no_tray_icon,
}
logformat = "%(levelname)-9s%(asctime)s %(pathname)s[%(funcName)s:%(lineno)d]: %(message)s"
loglevel = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
}.get(log_level, logging.NOTSET)
if not no_console:
MIN_LEVEL = loglevel
STDOUT_MAX_LEVEL = logging.WARNING
stdout_hdlr = logging.StreamHandler(sys.stdout)
stdout_hdlr.addFilter(LogLevelFilter('', STDOUT_MAX_LEVEL, False))
stdout_hdlr.setFormatter(logging.Formatter(logformat))
stdout_hdlr.setLevel(MIN_LEVEL)
stderr_hdlr = logging.StreamHandler(sys.stderr)
stderr_hdlr.addFilter(LogLevelFilter('', STDOUT_MAX_LEVEL, True))
stderr_hdlr.setFormatter(logging.Formatter(logformat))
stderr_hdlr.setLevel(MIN_LEVEL)
rootLogger = logging.getLogger()
rootLogger.addHandler(stdout_hdlr)
rootLogger.addHandler(stderr_hdlr)
rootLogger.setLevel(MIN_LEVEL)
else:
click.echo("Logging into '%s'" % os.path.join(ENSO_DIR, "enso.log"))
sys.stdout = open("stdout.log", "w", 0) # NullDevice()
sys.stderr = open("stderr.log", "w", 0) # NullDevice()
logging.basicConfig(
filename=os.path.join(ENSO_DIR, "enso.log"),
level=loglevel,
format=logformat)
if loglevel == logging.DEBUG:
pass
assert logging.debug("default options set:" + repr(enso.config.CMDLINE_OPTIONS)) or True
assert logging.debug("command-line args:" + repr(enso.config.CMDLINE_OPTIONS)) or True
if hotkey:
#contents += "enso.config.QUASIMODE_START_KEY = \"KEYCODE_%s\"\n" % opts.hotkey
enso.config.QUASIMODE_START_KEY = "KEYCODE_%s" % hotkey
logging.info("Enso hotkey has been set to %s" % hotkey)
# Can't display message at this phase as on Linux the gtk loop is not active yet
# at this point and that causes screen artifacts. Will be displayed in the init
# handler instead (initialized in enso.run()
# if not opts.quiet and opts.show_splash:
# displayMessage("<p><command>Enso</command> is starting...</p>")
enso.config.SHOW_SPLASH = not quiet and not no_splash
if sys.platform.startswith("win"):
# Add tray-icon support for win32 platform
if not no_tray_icon:
# tray-icon code must be run in separate thread otherwise it blocks
# current thread (using PumpMessages() )
try:
import enso.platform.win32.taskbar as taskbar
threading.Thread(
target=taskbar.systray, args=(enso.config,)).start()
except Exception as e:
logging.error("Error initializing taskbar systray icon: %s", e)
if commands_dir:
logging.info(
"Default commands directory changed to \"%s\"" % commands_dir)
enso.config.SCRIPTS_FOLDER_NAME = commands_dir
if color_scheme:
logging.info("Changing color scheme to %s" % color_scheme)
change_color_scheme(color_scheme)
l = logging.getLogger()
if l.isEnabledFor(logging.DEBUG):
try:
l.addFilter(LoggingDebugFilter())
except:
pass
# Execute main Enso loop
enso.run()
import time
time.sleep(10)
import traceback
thread_names = dict([(t.ident, t.name) for t in threading.enumerate()])
for thread_id, frame in sys._current_frames().iteritems():
print("Thread %s:" % thread_names.get(thread_id, thread_id))
traceback.print_stack(frame)
print()
return 0
if __name__ == "__main__":
#sys.exit(main(sys.argv))
main()
|
initialize_nodes.py
|
from http.server import HTTPServer
from node import get_handler
from threading import Thread
import time
import requests
# The condition argument is for you to know when everything is running
def do_stuff(host, nodes, edges, condition_ready=None, condition_done=None):
servers = list(HTTPServer((host, port), get_handler()) for port in nodes)
threads = list(Thread(target=server.serve_forever) for server in servers)
try:
for t in threads:
t.start()
def add(x, y):
_ = requests.get(f'http://{host}:{x}/new?port={y}')
_ = requests.get(f'http://{host}:{y}/new?port={x}')
for v0, v1 in edges:
add(v0, v1)
add(v1, v0)
# This is here for you so you know when stuff is ready
if condition_ready is not None:
with condition_ready:
condition_ready.notify()
if condition_done:
with condition_done:
condition_done.wait()
else:
while True:
time.sleep(5)
except KeyboardInterrupt:
pass
for server in servers:
server.shutdown()
server.server_close()
for t in threads:
t.join()
if __name__ == "__main__":
HOST = "localhost"
graph_base = 8030
graph = {(0, 1), (1, 2), (1, 3), (1, 4), (3, 4), (4, 5)}
graph = {(graph_base + x, graph_base + y) for x, y in graph}
_nodes = {x for y in graph for x in y}
do_stuff(HOST, _nodes, graph)
|
algo_six.py
|
from functools import reduce
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
received_time = []
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
# cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d "t" -f 2 | cut -d " " -f 2']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, _need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
# print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
# print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+wait-die {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_16_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} \noff_cloud{_id_}_16_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_16_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}",
f"\ntask_received{_id_}_16_{mec_no} = {total_received_task} \nsent_t{_id_}_16_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_16_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_16_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_16_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def terminate_process():
global prev_t, _loc, _off_mec, _off_cloud, _inward_mec, outward_mec, deadlock, memory, mec_waiting_time, mec_rtt
global offload_register, reoffload_list, discovering, test, _time, _pos, received_task_queue, received_time
global cloud_register, t_track, task_record, task_id, cooperate, clients_record, offload_check
global timed_out_tasks, total_received_task, _cpu
# reinitialize #
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
outward_mec = 0 # keeps count of tasks sent back to another mec after executing
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
t_track = 1
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
cooperate = {'mec': 0, 'cloud': 0}
clients_record = {}
offload_check = [0, 0]
timed_out_tasks = 0
total_received_task = 0
time.sleep(1)
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
print('========= Waiting for tasks ==========')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
for th in threads_:
th.join()
time.sleep(1)
print('done')
break
print('algo stopped!')
run = 1
stop = True
time.sleep(20)
for th in threads_:
th.join()
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('Terminating process')
terminate_process()
|
fake.py
|
# This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
"""
gpsfake.py -- classes for creating a controlled test environment around gpsd.
The gpsfake(1) regression tester shipped with GPSD is a trivial wrapper
around this code. For a more interesting usage example, see the
valgrind-audit script shipped with the GPSD code.
To use this code, start by instantiating a TestSession class. Use the
prefix argument if you want to run the daemon under some kind of run-time
monitor like valgrind or gdb. Here are some particularly useful possibilities:
valgrind --tool=memcheck --gen-suppressions=yes --leak-check=yes
Run under Valgrind, checking for malloc errors and memory leaks.
xterm -e gdb -tui --args
Run under gdb, controlled from a new xterm.
You can use the options argument to pass in daemon options; normally you will
use this to set the debug-logging level.
On initialization, the test object spawns an instance of gpsd with no
devices or clients attached, connected to a control socket.
TestSession has methods to attach and detch fake GPSes. The
TestSession class simulates GPS devices for you with objects composed
from a pty and a class instance that cycles sentences into the master side
from some specified logfile; gpsd reads the slave side. A fake GPS is
identified by the string naming its slave device.
TestSession also has methods to start and end client sessions. Daemon
responses to a client are fed to a hook function which, by default,
discards them. You can change the hook to sys.stdout.write() to dump
responses to standard output (this is what the gpsfake executable
does) or do something more exotic. A client session is identified by a
small integer that counts the number of client session starts.
There are a couple of convenience methods. TestSession.wait() does nothing,
allowing a specified number of seconds to elapse. TestSession.send()
ships commands to an open client session.
TestSession does not currently capture the daemon's log output. It is
run with -N, so the output will go to stderr (along with, for example,
Valgrind notifications).
Each FakeGPS instance tries to packetize the data from the logfile it
is initialized with. It uses the same packet-getter as the daemon.
Exception: if there is a Delay-Cookie line in a header comment, that
delimiter is used to split up the test load.
The TestSession code maintains a run queue of FakeGPS and gps.gs
(client- session) objects. It repeatedly cycles through the run queue.
For each client session object in the queue, it tries to read data
from gpsd. For each fake GPS, it sends one line or packet of stored
data. When a fake-GPS's go predicate becomes false, the fake GPS is
removed from the run queue.
There are two ways to use this code. The more deterministic is
non-threaded mode: set up your client sessions and fake GPS devices,
then call the run() method. The run() method will terminate when
there are no more objects in the run queue. Note, you must have
created at least one fake client or fake GPS before calling run(),
otherwise it will terminate immediately.
To allow for adding and removing clients while the test is running,
run in threaded mode by calling the start() method. This simply calls
the run method in a subthread, with locking of critical regions.
"""
import os, sys, time, signal, pty, termios # fcntl, array, struct
import exceptions, threading, socket, select
import gps
import packet as sniffer
import stat
import Queue
# The magic number below has to be derived from observation. If
# it's too high you'll slow the tests down a lot. If it's too low
# you'll get regression tests timing out.
# WRITE_PAD: Define a per-line delay on writes so we won't spam the
# buffers in the pty layer or gpsd itself. Values smaller than the
# system timer tick don't make any difference here. Can be set from
# WRITE_PAD in the environment.
if sys.platform.startswith("linux"):
WRITE_PAD = 0.0
elif sys.platform.startswith("freebsd"):
WRITE_PAD = 0.01
elif sys.platform.startswith("netbsd5"):
WRITE_PAD = 0.200
elif sys.platform.startswith("netbsd"):
WRITE_PAD = 0.004
elif sys.platform.startswith("darwin"):
# darwin Darwin-13.4.0-x86_64-i386-64bit
WRITE_PAD = 0.03
else:
WRITE_PAD = 0.004
# Make it easier to test pad values
if os.getenv("WRITE_PAD"):
WRITE_PAD = eval(os.getenv("WRITE_PAD"))
# Additional delays in slow mode
WRITE_PAD_SLOWDOWN = 0.01
# If a test takes longer than this, we deem it to have timed out
TEST_TIMEOUT = 60
class TestLoadError(exceptions.Exception):
def __init__(self, msg):
exceptions.Exception.__init__(self)
self.msg = msg
class TestLoad:
"Digest a logfile into a list of sentences we can cycle through."
def __init__(self, logfp, predump=False, slow=False, oneshot=False):
self.sentences = [] # This is the interesting part
if type(logfp) == type(""):
logfp = open(logfp, "r")
self.name = logfp.name
self.logfp = logfp
self.predump = predump
self.type = None
self.sourcetype = "pty"
self.serial = None
self.delay = WRITE_PAD
if slow:
self.delay += WRITE_PAD_SLOWDOWN
self.delimiter = None
# Stash away a copy in case we need to resplit
text = logfp.read()
logfp = open(logfp.name)
# Grab the packets in the normal way
getter = sniffer.new()
#gps.packet.register_report(reporter)
type_latch = None
commentlen = 0
while True:
(plen, ptype, packet, _counter) = getter.get(logfp.fileno())
if plen <= 0:
break
elif ptype == sniffer.COMMENT_PACKET:
commentlen += len(packet)
# Some comments are magic
if "Serial:" in packet:
# Change serial parameters
packet = packet[1:].strip()
try:
(_xx, baud, params) = packet.split()
baud = int(baud)
if params[0] in ('7', '8'):
databits = int(params[0])
else:
raise ValueError
if params[1] in ('N', 'O', 'E'):
parity = params[1]
else:
raise ValueError
if params[2] in ('1', '2'):
stopbits = int(params[2])
else:
raise ValueError
except (ValueError, IndexError):
raise TestLoadError("bad serial-parameter spec in %s"%\
self.name)
self.serial = (baud, databits, parity, stopbits)
elif "Transport: UDP" in packet:
self.sourcetype = "UDP"
elif "Transport: TCP" in packet:
self.sourcetype = "TCP"
elif "Delay-Cookie:" in packet:
if packet.startswith("#"):
packet = packet[1:]
try:
(_dummy, self.delimiter, delay) = packet.strip().split()
self.delay = float(delay)
except ValueError:
raise TestLoadError("bad Delay-Cookie line in %s"%\
self.name)
self.resplit = True
else:
if type_latch is None:
type_latch = ptype
if self.predump:
print repr(packet)
if not packet:
raise TestLoadError("zero-length packet from %s"%\
self.name)
self.sentences.append(packet)
# Look at the first packet to grok the GPS type
self.textual = (type_latch == sniffer.NMEA_PACKET)
if self.textual:
self.legend = "gpsfake: line %d: "
else:
self.legend = "gpsfake: packet %d"
# Maybe this needs to be split on different delimiters?
if self.delimiter is not None:
self.sentences = text[commentlen:].split(self.delimiter)
# Do we want single-shot operation?
if oneshot:
self.sentences.append("# EOF\n")
class PacketError(exceptions.Exception):
def __init__(self, msg):
exceptions.Exception.__init__(self)
self.msg = msg
class FakeGPS:
def __init__(self, testload, progress=None):
self.testload = testload
self.progress = progress
self.go_predicate = lambda: True
self.readers = 0
self.index = 0
self.progress("gpsfake: %s provides %d sentences\n" % (self.testload.name, len(self.testload.sentences)))
def write(self, line):
"Throw an error if this superclass is ever instantiated."
raise ValueError, line
def feed(self, stop, manual):
"Feed a line from the contents of the GPS log to the daemon."
line = self.testload.sentences[self.index % len(self.testload.sentences)]
if "%Delay:" in line:
# Delay specified number of seconds
delay = line.split()[1]
time.sleep(int(delay))
# self.write has to be set by the derived class
self.write(line)
time.sleep(self.testload.delay)
##print "Feed stop? %r" % stop
if not stop and not manual:
self.index += 1
class FakePTY(FakeGPS):
"A FakePTY is a pty with a test log ready to be cycled to it."
def __init__(self, testload,
speed=4800, databits=8, parity='N', stopbits=1,
progress=None):
FakeGPS.__init__(self, testload, progress)
# Allow Serial: header to be overridden by explicit speed.
if self.testload.serial:
(speed, databits, parity, stopbits) = self.testload.serial
self.speed = speed
baudrates = {
0: termios.B0,
50: termios.B50,
75: termios.B75,
110: termios.B110,
134: termios.B134,
150: termios.B150,
200: termios.B200,
300: termios.B300,
600: termios.B600,
1200: termios.B1200,
1800: termios.B1800,
2400: termios.B2400,
4800: termios.B4800,
9600: termios.B9600,
19200: termios.B19200,
38400: termios.B38400,
57600: termios.B57600,
115200: termios.B115200,
230400: termios.B230400,
}
(self.fd, self.slave_fd) = pty.openpty()
self.byname = os.ttyname(self.slave_fd)
os.chmod( self.byname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH )
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self.slave_fd)
cc[termios.VMIN] = 1
cflag &= ~(termios.PARENB | termios.PARODD | termios.CRTSCTS)
cflag |= termios.CREAD | termios.CLOCAL
iflag = oflag = lflag = 0
iflag &=~ (termios.PARMRK | termios.INPCK)
cflag &=~ (termios.CSIZE | termios.CSTOPB | termios.PARENB | termios.PARODD)
if databits == 7:
cflag |= termios.CS7
else:
cflag |= termios.CS8
if stopbits == 2:
cflag |= termios.CSTOPB
# Warning: attempting to set parity makes Fedora lose its cookies
if parity == 'E':
iflag |= termios.INPCK
cflag |= termios.PARENB
elif parity == 'O':
iflag |= termios.INPCK
cflag |= termios.PARENB | termios.PARODD
ispeed = ospeed = baudrates[speed]
try:
termios.tcsetattr(self.slave_fd, termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error:
raise TestLoadError("error attempting to set serial mode to %s %s%s%s" \
% (speed, databits, parity, stopbits))
def read(self):
"Discard control strings written by gpsd."
# A tcflush implementation works on Linux but fails on OpenBSD 4.
termios.tcflush(self.fd, termios.TCIFLUSH)
# Alas, the FIONREAD version also works on Linux and fails on OpenBSD.
#try:
# buf = array.array('i', [0])
# fcntl.ioctl(self.master_fd, termios.FIONREAD, buf, True)
# n = struct.unpack('i', buf)[0]
# os.read(self.master_fd, n)
#except IOError:
# pass
def write(self, line):
self.progress("gpsfake: %s writes %d=%s\n" % (self.testload.name, len(line), repr(line)))
os.write(self.fd, line)
def drain(self):
"Wait for the associated device to drain (e.g. before closing)."
termios.tcdrain(self.fd)
def cleansocket(host, port):
"Get a socket that we can re-use cleanly after it's closed."
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# This magic prevents "Address already in use" errors after
# we release the socket.
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cs.bind((host, port))
return cs
class FakeTCP(FakeGPS):
"A TCP serverlet with a test log ready to be cycled to it."
def __init__(self, testload,
host, port,
progress=None):
FakeGPS.__init__(self, testload, progress)
self.host = host
self.port = int(port)
self.byname = "tcp://" + host + ":" + str(port)
self.dispatcher = cleansocket(self.host, self.port)
self.dispatcher.listen(5)
self.readables = [self.dispatcher]
def read(self):
"Handle connection requests and data."
readable, _writable, _errored = select.select(self.readables, [], [], 0)
for s in readable:
if s == self.dispatcher: # Connection request
client_socket, _address = s.accept()
self.readables = [client_socket]
self.dispatcher.close()
else: # Incoming data
data = s.recv(1024)
if not data:
s.close()
self.readables.remove(s)
def write(self, line):
"Send the next log packet to everybody connected."
self.progress("gpsfake: %s writes %d=%s\n" % (self.testload.name, len(line), repr(line)))
for s in self.readables:
if s != self.dispatcher:
s.send(line)
def drain(self):
"Wait for the associated device(s) to drain (e.g. before closing)."
for s in self.readables:
if s != self.dispatcher:
s.shutdown(socket.SHUT_RDWR)
class FakeUDP(FakeGPS):
"A UDP broadcaster with a test log ready to be cycled to it."
def __init__(self, testload,
ipaddr, port,
progress=None):
FakeGPS.__init__(self, testload, progress)
self.ipaddr = ipaddr
self.port = port
self.byname = "udp://" + ipaddr + ":" + str(port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def read(self):
"Discard control strings written by gpsd."
pass
def write(self, line):
self.progress("gpsfake: %s writes %d=%s\n" % (self.testload.name, len(line), repr(line)))
self.sock.sendto(line, (self.ipaddr, int(self.port)))
def drain(self):
"Wait for the associated device to drain (e.g. before closing)."
pass # shutdown() fails on UDP
class DaemonError(exceptions.Exception):
def __init__(self, msg):
exceptions.Exception.__init__(self)
self.msg = msg
def __str__(self):
return repr(self.msg)
class DaemonInstance:
"Control a gpsd instance."
def __init__(self, control_socket=None):
self.sockfile = None
self.pid = None
self.tmpdir = os.environ.get('TMPDIR', '/tmp')
if control_socket:
self.control_socket = control_socket
else:
self.control_socket = "%s/gpsfake-%d.sock" % (self.tmpdir, os.getpid())
self.pidfile = "%s/gpsfake-%d.pid" % (self.tmpdir, os.getpid())
def spawn(self, options, port, background=False, prefix=""):
"Spawn a daemon instance."
self.spawncmd = None
# Look for gpsd in GPSD_HOME env variable
if os.environ.get('GPSD_HOME'):
for path in os.environ['GPSD_HOME'].split(':'):
_spawncmd = "%s/gpsd" % path
if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK):
self.spawncmd = _spawncmd
break
# if we could not find it yet try PATH env variable for it
if not self.spawncmd:
if not '/usr/sbin' in os.environ['PATH']:
os.environ['PATH']=os.environ['PATH'] + ":/usr/sbin"
for path in os.environ['PATH'].split(':'):
_spawncmd = "%s/gpsd" % path
if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK):
self.spawncmd = _spawncmd
break
if not self.spawncmd:
raise DaemonError("Cannot execute gpsd: executable not found. Set GPSD_HOME env variable")
# The -b option to suppress hanging on probe returns is needed to cope
# with OpenBSD (and possibly other non-Linux systems) that don't support
# anything we can use to implement the FakeGPS.read() method
self.spawncmd += " -b -N -S %s -F %s -P %s %s" % (port, self.control_socket, self.pidfile, options)
if prefix:
self.spawncmd = prefix + " " + self.spawncmd.strip()
if background:
self.spawncmd += " &"
status = os.system(self.spawncmd)
if os.WIFSIGNALED(status) or os.WEXITSTATUS(status):
raise DaemonError("daemon exited with status %d" % status)
def wait_pid(self):
"Wait for the daemon, get its PID and a control-socket connection."
while True:
try:
fp = open(self.pidfile)
except IOError:
time.sleep(0.1)
continue
try:
fp.seek(0)
pidstr = fp.read()
self.pid = int(pidstr)
except ValueError:
time.sleep(0.5)
continue # Avoid race condition -- PID not yet written
fp.close()
break
def __get_control_socket(self):
# Now we know it's running, get a connection to the control socket.
if not os.path.exists(self.control_socket):
return None
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
self.sock.connect(self.control_socket)
except socket.error:
if self.sock:
self.sock.close()
self.sock = None
return self.sock
def is_alive(self):
"Is the daemon still alive?"
try:
os.kill(self.pid, 0)
return True
except OSError:
return False
def add_device(self, path):
"Add a device to the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("+%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close()
def remove_device(self, path):
"Remove a device from the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("-%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close()
def kill(self):
"Kill the daemon instance."
if self.pid:
try:
os.kill(self.pid, signal.SIGTERM)
# Raises an OSError for ESRCH when we've killed it.
while True:
os.kill(self.pid, signal.SIGTERM)
time.sleep(0.01)
except OSError:
pass
self.pid = None
class TestSessionError(exceptions.Exception):
def __init__(self, msg):
exceptions.Exception.__init__(self)
self.msg = msg
class TestSession:
"Manage a session including a daemon with fake GPSes and clients."
def __init__(self, prefix=None, port=None, options=None, verbose=0, predump=False, udp=False, tcp=False, slow=False, queue=None):
"Initialize the test session by launching the daemon."
self.prefix = prefix
self.port = port
self.options = options
self.verbose = verbose
self.predump = predump
self.udp = udp
self.tcp = tcp
self.slow = slow
self.daemon = DaemonInstance()
self.fakegpslist = {}
self.client_id = 0
self.readers = 0
self.writers = 0
self.runqueue = []
self.index = 0
self.baseport = 49194 # In the IANA orivate port range
if port:
self.port = port
else:
# Magic way to get a socket with an unused port number
s = cleansocket("localhost", 0)
self.port = s.getsockname()[1]
s.close()
self.progress = lambda x: None
self.reporter = lambda x: None
self.default_predicate = None
self.fd_set = []
self.threadlock = None
self.queue = queue
self.stop = False
self.manual = False
def spawn(self):
for sig in (signal.SIGQUIT, signal.SIGINT, signal.SIGTERM):
signal.signal(sig, lambda unused, dummy: self.cleanup())
self.daemon.spawn(background=True, prefix=self.prefix, port=self.port, options=self.options)
self.daemon.wait_pid()
def set_predicate(self, pred):
"Set a default go predicate for the session."
self.default_predicate = pred
def gps_add(self, logfile, speed=19200, pred=None, oneshot=False):
"Add a simulated GPS being fed by the specified logfile."
self.progress("gpsfake: gps_add(%s, %d)\n" % (logfile, speed))
if logfile not in self.fakegpslist:
testload = TestLoad(logfile, predump=self.predump, slow=self.slow, oneshot=oneshot)
if testload.sourcetype == "UDP" or self.udp:
newgps = FakeUDP(testload, ipaddr="127.0.0.1",
port=self.baseport,
progress=self.progress)
self.baseport += 1
elif testload.sourcetype == "TCP" or self.tcp:
newgps = FakeTCP(testload, host="127.0.0.1",
port=self.baseport,
progress=self.progress)
self.baseport += 1
else:
newgps = FakePTY(testload, speed=speed,
progress=self.progress)
if pred:
newgps.go_predicate = pred
elif self.default_predicate:
newgps.go_predicate = self.default_predicate
self.fakegpslist[newgps.byname] = newgps
self.append(newgps)
newgps.exhausted = 0
self.daemon.add_device(newgps.byname)
return newgps.byname
def gps_remove(self, name):
"Remove a simulated GPS from the daemon's search list."
self.progress("gpsfake: gps_remove(%s)\n" % name)
self.fakegpslist[name].drain()
self.remove(self.fakegpslist[name])
self.daemon.remove_device(name)
del self.fakegpslist[name]
def client_add(self, commands):
"Initiate a client session and force connection to a fake GPS."
self.progress("gpsfake: client_add()\n")
newclient = gps.gps(port=self.port, verbose=self.verbose)
self.append(newclient)
newclient.id = self.client_id + 1
self.client_id += 1
self.progress("gpsfake: client %d has %s\n" % (self.client_id,newclient.device))
if commands:
self.initialize(newclient, commands)
return self.client_id
def client_remove(self, cid):
"Terminate a client session."
self.progress("gpsfake: client_remove(%d)\n" % cid)
for obj in self.runqueue:
if isinstance(obj, gps.gps) and obj.id == cid:
self.remove(obj)
return True
return False
def wait(self, seconds):
"Wait, doing nothing."
self.progress("gpsfake: wait(%d)\n" % seconds)
time.sleep(seconds)
def gather(self, seconds):
"Wait, doing nothing but watching for sentences."
self.progress("gpsfake: gather(%d)\n" % seconds)
#mark = time.time()
time.sleep(seconds)
def cleanup(self):
"We're done, kill the daemon."
self.progress("gpsfake: cleanup()\n")
if self.daemon:
self.daemon.kill()
self.daemon = None
def processingcommand(self):
command = self.queue.get()
if "stop" in command:
print "Stop track cycle"
self.stop = True
elif "go" in command:
self.stop = False
print "Go track again"
elif "file" in command:
print "New track processing..."
path = command.split(",")
newtestload = TestLoad(path[1], predump=self.predump, slow=self.slow)
for obj in self.runqueue:
obj.testload.sentences = newtestload.sentences
obj.index = 0
elif "manual-begin" in command:
print "man begin"
self.manual = True
self.stop = True
return True
elif "manual-end" in command:
self.manual = False
elif self.manual and ("GPGGA" in command or "GPRMC" in command):
if self.stop:
for obj in self.runqueue:
obj.testload.sentences = []
obj.index = 0
self.stop = False
for obj in self.runqueue:
obj.testload.sentences.append(command)
obj.index = len(obj.testload.sentences) - 1
return False
def run(self, withoutfile):
"Run the tests."
try:
self.progress("gpsfake: test loop begins\n")
commandarrived = -1
while self.daemon:
# We have to read anything that gpsd might have tried
# to send to the GPS here -- under OpenBSD the
# TIOCDRAIN will hang, otherwise.
if not self.queue.empty():
#print "Queue not empty"
withoutfile = self.processingcommand()
#commandarrived = commandarrived + 1
#if commandarrived > 1 and withoutfile:
#withoutfile = False
if not withoutfile:
for device in self.runqueue:
if isinstance(device, FakeGPS):
device.read()
had_output = False
chosen = self.choose()
if isinstance(chosen, FakeGPS):
if chosen.exhausted and (time.time() - chosen.exhausted > TEST_TIMEOUT) and chosen.byname in self.fakegpslist:
sys.stderr.write("Test timed out: increase WRITE_PAD = %s\n" % WRITE_PAD)
raise SystemExit, 1
elif not chosen.go_predicate(chosen.index, chosen):
if chosen.exhausted == 0:
chosen.exhausted = time.time()
self.progress("gpsfake: GPS %s ran out of input\n" % chosen.byname)
else:
chosen.feed(self.stop, self.manual)
elif isinstance(chosen, gps.gps):
if chosen.enqueued:
chosen.send(chosen.enqueued)
chosen.enqueued = ""
while chosen.waiting():
chosen.read()
if chosen.valid & gps.PACKET_SET:
self.reporter(chosen.response)
if chosen.data["class"] == "DEVICE" and chosen.data["activated"] == 0 and chosen.data["path"] in self.fakegpslist:
self.gps_remove(chosen.data["path"])
self.progress("gpsfake: GPS %s removed (notification)\n" % chosen.data["path"])
had_output = True
else:
raise TestSessionError("test object of unknown type")
if not self.writers and not had_output:
self.progress("gpsfake: no writers and no output\n")
break
self.progress("gpsfake: test loop ends\n")
finally:
self.cleanup()
# All knowledge about locks and threading is below this line,
# except for the bare fact that self.threadlock is set to None
# in the class init method.
def append(self, obj):
"Add a producer or consumer to the object list."
if self.threadlock:
self.threadlock.acquire()
self.runqueue.append(obj)
if isinstance(obj, FakeGPS):
self.writers += 1
elif isinstance(obj, gps.gps):
self.readers += 1
if self.threadlock:
self.threadlock.release()
def remove(self, obj):
"Remove a producer or consumer from the object list."
if self.threadlock:
self.threadlock.acquire()
self.runqueue.remove(obj)
if isinstance(obj, FakeGPS):
self.writers -= 1
elif isinstance(obj, gps.gps):
self.readers -= 1
self.index = min(len(self.runqueue)-1, self.index)
if self.threadlock:
self.threadlock.release()
def choose(self):
"Atomically get the next object scheduled to do something."
if self.threadlock:
self.threadlock.acquire()
chosen = self.index
self.index += 1
self.index %= len(self.runqueue)
if self.threadlock:
self.threadlock.release()
return self.runqueue[chosen]
def initialize(self, client, commands):
"Arrange for client to ship specified commands when it goes active."
client.enqueued = ""
if not self.threadlock:
client.send(commands)
else:
client.enqueued = commands
def start(self):
self.threadlock = threading.Lock()
threading.Thread(target=self.run)
# End
|
train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
def main(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, ), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
for p in procs:
p.join()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opts.gcn_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
opt = parser.parse_args()
main(opt)
|
runCtaTrading.py
|
# encoding: UTF-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print '-'*20
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
sleep(10) # 等待CTP接口初始化
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
le.info(u'CTA策略载入成功')
cta.initAll()
le.info(u'CTA策略初始化成功')
cta.startAll()
le.info(u'CTA策略启动成功')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess()
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess()
|
debugger_v2_plugin_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debugger V2 Plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import six
import socket
import threading
import tensorflow as tf
from werkzeug import test as werkzeug_test # pylint: disable=wrong-import-order
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.plugins import base_plugin
from tensorboard.plugins.debugger_v2 import debug_data_multiplexer
from tensorboard.plugins.debugger_v2 import debugger_v2_plugin
from tensorboard.util import test_util
mock = tf.compat.v1.test.mock
_HOST_NAME = socket.gethostname()
_CURRENT_FILE_FULL_PATH = os.path.abspath(__file__)
def _generate_tfdbg_v2_data(
logdir, tensor_debug_mode="NO_TENSOR", logarithm_times=None
):
"""Generate a simple dump of tfdbg v2 data by running a TF2 program.
The run is instrumented by the enable_dump_debug_info() API.
The instrumented program is intentionally diverse in:
- Execution paradigm: eager + tf.function
- Control flow (TF while loop)
- dtype and shape
in order to faciliate testing.
Args:
logdir: Logdir to write the debugger data to.
tensor_debug_mode: Mode for dumping debug tensor values, as an optional
string. See the documentation of
`tf.debugging.experimental.enable_dump_debug_info()` for details.
logarithm_times: Optionally take logarithm of the final `x` file _ times
iteratively, in order to produce nans.
"""
writer = tf.debugging.experimental.enable_dump_debug_info(
logdir, circular_buffer_size=-1, tensor_debug_mode=tensor_debug_mode
)
try:
@tf.function
def unstack_and_sum(x):
elements = tf.unstack(x)
return elements[0] + elements[1] + elements[2] + elements[3]
@tf.function
def repeated_add(x, times):
sum = tf.constant(0, dtype=x.dtype)
i = tf.constant(0, dtype=tf.int32)
while tf.less(i, times):
sum += x
i += 1
return sum
@tf.function
def my_function(x):
times = tf.constant(3, dtype=tf.int32)
return repeated_add(unstack_and_sum(x), times)
x = tf.constant([1, 3, 3, 7], dtype=tf.float32)
for i in range(3):
assert my_function(x).numpy() == 42.0
logarithm_times = 0 if logarithm_times is None else logarithm_times
for i in range(logarithm_times):
x = tf.math.log(x)
# Expected iteration results:
# [0. 1.0986123 1.0986123 1.9459102]
# [-inf 0.09404784 0.09404784 0.6657298 ]
# [nan -2.3639517 -2.3639517 -0.40687138]
# [nan nan nan nan]
# [nan nan nan nan]
# [nan nan nan nan]
# ...
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
finally:
tf.debugging.experimental.disable_dump_debug_info()
_ROUTE_PREFIX = "/data/plugin/debugger-v2"
_DEFAULT_DEVICE_SUFFIX = "GPU:0" if tf.test.is_gpu_available() else "CPU:0"
@test_util.run_v2_only("tfdbg2 is not available in r1.")
class DebuggerV2PluginTest(tf.test.TestCase):
def setUp(self):
super(DebuggerV2PluginTest, self).setUp()
self.logdir = self.get_temp_dir()
context = base_plugin.TBContext(logdir=self.logdir)
self.plugin = debugger_v2_plugin.DebuggerV2Plugin(context)
wsgi_app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
# The multiplexer reads data asynchronously on a separate thread, so
# as not to block the main thread of the TensorBoard backend. During
# unit test, we disable the asynchronous behavior, so that we can
# load the debugger data synchronously on the main thread and get
# determinisic behavior in the tests.
def run_repeatedly_in_background_mock(target, interval_sec):
del interval_sec # Unused in this mock.
target()
return None, None
self.run_in_background_patch = tf.compat.v1.test.mock.patch.object(
debug_data_multiplexer,
"run_repeatedly_in_background",
run_repeatedly_in_background_mock,
)
self.run_in_background_patch.start()
def tearDown(self):
self.run_in_background_patch.stop()
super(DebuggerV2PluginTest, self).tearDown()
def _getExactlyOneRun(self):
"""Assert there is exactly one DebuggerV2 run and get its ID."""
run_listing = json.loads(
self.server.get(_ROUTE_PREFIX + "/runs").get_data()
)
self.assertLen(run_listing, 1)
return list(run_listing.keys())[0]
def testPluginIsNotActiveByDefault(self):
self.assertFalse(self.plugin.is_active())
def testPluginIsActiveWithDataExists(self):
_generate_tfdbg_v2_data(self.logdir)
self.assertTrue(self.plugin.is_active())
def testConcurrentCallsToPluginIsActiveWhenNotActive(self):
results = collections.deque()
def query_is_active():
results.append(self.plugin.is_active())
threads = [threading.Thread(target=query_is_active) for _ in range(4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(list(results), [False] * 4)
def testConcurrentCallsToPluginIsActiveWhenActive(self):
_generate_tfdbg_v2_data(self.logdir)
results = collections.deque()
def query_is_active():
results.append(self.plugin.is_active())
threads = [threading.Thread(target=query_is_active) for _ in range(4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(list(results), [True] * 4)
def testServeRunsWithoutExistingRuns(self):
response = self.server.get(_ROUTE_PREFIX + "/runs")
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(json.loads(response.get_data()), dict())
def testServeRunsWithExistingRuns(self):
_generate_tfdbg_v2_data(self.logdir)
response = self.server.get(_ROUTE_PREFIX + "/runs")
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(list(data.keys()), ["__default_debugger_run__"])
run = data["__default_debugger_run__"]
self.assertIsInstance(run["start_time"], float)
self.assertGreater(run["start_time"], 0)
def testConcurrentServeRunsWithoutExistingRuns(self):
responses = collections.deque()
def get_runs():
responses.append(self.server.get(_ROUTE_PREFIX + "/runs"))
threads = [threading.Thread(target=get_runs) for _ in range(4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertLen(responses, 4)
for response in responses:
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(json.loads(response.get_data()), dict())
def testConcurrentServeRunsWithExistingRuns(self):
_generate_tfdbg_v2_data(self.logdir)
responses = collections.deque()
def get_runs():
responses.append(self.server.get(_ROUTE_PREFIX + "/runs"))
threads = [threading.Thread(target=get_runs) for _ in range(4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertLen(responses, 4)
for response in responses:
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(list(data.keys()), ["__default_debugger_run__"])
run = data["__default_debugger_run__"]
self.assertIsInstance(run["start_time"], float)
self.assertGreater(run["start_time"], 0)
def testAlertsWhenNoAlertExists(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/alerts?run=%s&begin=0&end=0" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(
data,
{
"begin": 0,
"end": 0,
"num_alerts": 0,
"alerts_breakdown": {},
"per_type_alert_limit": 1000,
"alert_type": None,
"alerts": [],
},
)
def testGetAlertNumberOnlyWhenAlertExistsCurtHealthMode(self):
_generate_tfdbg_v2_data(
self.logdir, tensor_debug_mode="CURT_HEALTH", logarithm_times=4
)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/alerts?run=%s&begin=0&end=0" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(
data,
{
"begin": 0,
"end": 0,
"num_alerts": 3,
"alerts_breakdown": {"InfNanAlert": 3,},
"per_type_alert_limit": 1000,
"alert_type": None,
"alerts": [],
},
)
def testGetAlertsContentWhenAlertExistsConciseHealthMode(self):
_generate_tfdbg_v2_data(
self.logdir, tensor_debug_mode="CONCISE_HEALTH", logarithm_times=4
)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/alerts?run=%s&begin=0&end=3" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 3)
self.assertEqual(data["num_alerts"], 3)
self.assertEqual(data["alerts_breakdown"], {"InfNanAlert": 3})
self.assertEqual(data["per_type_alert_limit"], 1000)
alerts = data["alerts"]
self.assertLen(alerts, 3)
self.assertEqual(
alerts[0],
{
"alert_type": "InfNanAlert",
"op_type": "Log",
"output_slot": 0,
"size": 4.0,
"num_neg_inf": 1.0,
"num_pos_inf": 0.0,
"num_nan": 0.0,
"execution_index": 4,
"graph_execution_trace_index": None,
},
)
self.assertEqual(
alerts[1],
{
"alert_type": "InfNanAlert",
"op_type": "Log",
"output_slot": 0,
"size": 4.0,
"num_neg_inf": 0.0,
"num_pos_inf": 0.0,
"num_nan": 1.0,
"execution_index": 5,
"graph_execution_trace_index": None,
},
)
self.assertEqual(
alerts[2],
{
"alert_type": "InfNanAlert",
"op_type": "Log",
"output_slot": 0,
"size": 4.0,
"num_neg_inf": 0.0,
"num_pos_inf": 0.0,
"num_nan": 4.0,
"execution_index": 6,
"graph_execution_trace_index": None,
},
)
def testGetAlertsWithInvalidBeginOrEndWhenAlertExistsCurtHealthMode(self):
_generate_tfdbg_v2_data(
self.logdir, tensor_debug_mode="CURT_HEALTH", logarithm_times=4
)
run = self._getExactlyOneRun()
# begin = 0; end = 5
response = self.server.get(
_ROUTE_PREFIX + "/alerts?run=%s&begin=0&end=5" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: end index (5) out of bounds (3)"},
)
# begin = -1; end = 2
response = self.server.get(
_ROUTE_PREFIX + "/alerts?run=%s&begin=-1&end=2" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: Invalid begin index (-1)"},
)
# begin = 2; end = 1
response = self.server.get(
_ROUTE_PREFIX + "/alerts?run=%s&begin=2&end=1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"end index (1) is unexpectedly less than begin index (2)"
},
)
def testGetAlertsWithAlertType(self):
_generate_tfdbg_v2_data(
self.logdir, tensor_debug_mode="CONCISE_HEALTH", logarithm_times=4
)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX
+ "/alerts?run=%s&alert_type=InfNanAlert&begin=1&end=-1" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 1)
self.assertEqual(data["end"], 3)
self.assertEqual(data["num_alerts"], 3)
self.assertEqual(data["alerts_breakdown"], {"InfNanAlert": 3})
self.assertEqual(data["alert_type"], "InfNanAlert")
alerts = data["alerts"]
self.assertLen(alerts, 2)
self.assertEqual(
alerts[0],
{
"alert_type": "InfNanAlert",
"op_type": "Log",
"output_slot": 0,
"size": 4.0,
"num_neg_inf": 0.0,
"num_pos_inf": 0.0,
"num_nan": 1.0,
"execution_index": 5,
"graph_execution_trace_index": None,
},
)
self.assertEqual(
alerts[1],
{
"alert_type": "InfNanAlert",
"op_type": "Log",
"output_slot": 0,
"size": 4.0,
"num_neg_inf": 0.0,
"num_pos_inf": 0.0,
"num_nan": 4.0,
"execution_index": 6,
"graph_execution_trace_index": None,
},
)
def testGetAlertsWithTypeFilterAndInvalidBeginOrEndWhenAlertsExist(self):
_generate_tfdbg_v2_data(
self.logdir, tensor_debug_mode="CURT_HEALTH", logarithm_times=4
)
run = self._getExactlyOneRun()
# begin = 0; end = 5
response = self.server.get(
_ROUTE_PREFIX
+ "/alerts?alert_type=InfNanAlert&run=%s&begin=0&end=5" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: end index (5) out of bounds (3)"},
)
# begin = -1; end = 2
response = self.server.get(
_ROUTE_PREFIX
+ "/alerts?alert_type=InfNanAlert&run=%s&begin=-1&end=2" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: Invalid begin index (-1)"},
)
# begin = 2; end = 1
response = self.server.get(
_ROUTE_PREFIX
+ "/alerts?alert_type=InfNanAlert&run=%s&begin=2&end=1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"end index (1) is unexpectedly less than begin index (2)"
},
)
def testGetAlertsWithNonexistentTypeFilterWhenAlertsExist(self):
_generate_tfdbg_v2_data(
self.logdir, tensor_debug_mode="CURT_HEALTH", logarithm_times=4
)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX
+ "/alerts?alert_type=NonexistentAlert&run=%s&begin=0&end=-1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"Filtering of alerts failed: alert type NonexistentAlert "
"does not exist"
},
)
def testServeExecutionDigestsWithEqualBeginAndEnd(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=0&end=0" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(
data,
{"begin": 0, "end": 0, "num_digests": 3, "execution_digests": [],},
)
def testServeExecutionDigestsWithEndGreaterThanBeginFullRange(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=0&end=3" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 3)
self.assertEqual(data["num_digests"], 3)
execution_digests = data["execution_digests"]
self.assertLen(execution_digests, 3)
prev_wall_time = 0
for execution_digest in execution_digests:
self.assertGreaterEqual(
execution_digest["wall_time"], prev_wall_time
)
prev_wall_time = execution_digest["wall_time"]
self.assertStartsWith(
execution_digest["op_type"], "__inference_my_function"
)
def testServeExecutionDigestsWithImplicitFullRange(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=0" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 3)
self.assertEqual(data["num_digests"], 3)
execution_digests = data["execution_digests"]
self.assertLen(execution_digests, 3)
prev_wall_time = 0
for execution_digest in execution_digests:
self.assertGreaterEqual(
execution_digest["wall_time"], prev_wall_time
)
prev_wall_time = execution_digest["wall_time"]
self.assertStartsWith(
execution_digest["op_type"], "__inference_my_function"
)
def testServeExecutionDigestsWithEndGreaterThanBeginPartialRange(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=0&end=2" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 2)
self.assertEqual(data["num_digests"], 3)
execution_digests = data["execution_digests"]
self.assertLen(execution_digests, 2)
prev_wall_time = 0
for execution_digest in execution_digests:
self.assertGreaterEqual(
execution_digest["wall_time"], prev_wall_time
)
prev_wall_time = execution_digest["wall_time"]
self.assertStartsWith(
execution_digest["op_type"], "__inference_my_function"
)
def testServeExecutionDigestOutOfBoundsError(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# begin = 0; end = 4
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=0&end=4" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: end index (4) out of bounds (3)"},
)
# begin = -1; end = 2
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=-1&end=2" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: Invalid begin index (-1)"},
)
# begin = 2; end = 1
response = self.server.get(
_ROUTE_PREFIX + "/execution/digests?run=%s&begin=2&end=1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"end index (1) is unexpectedly less than begin index (2)"
},
)
def testServeExecutionDigests400ResponseIfRunParamIsNotSpecified(self):
response = self.server.get(
# `run` parameter is not specified here.
_ROUTE_PREFIX
+ "/execution/digests?begin=0&end=0"
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "run parameter is not provided"},
)
def testServeASingleExecutionDataObject(self):
_generate_tfdbg_v2_data(self.logdir, tensor_debug_mode="CONCISE_HEALTH")
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/data?run=%s&begin=0&end=1" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 1)
self.assertLen(data["executions"], 1)
execution = data["executions"][0]
self.assertStartsWith(execution["op_type"], "__inference_my_function_")
self.assertLen(execution["output_tensor_device_ids"], 1)
self.assertEqual(execution["host_name"], _HOST_NAME)
self.assertTrue(execution["stack_frame_ids"])
self.assertLen(execution["input_tensor_ids"], 1)
self.assertLen(execution["output_tensor_ids"], 1)
self.assertTrue(execution["graph_id"])
# CONCISE_HEALTH mode:
# [[Unused tensor ID, #(elements), #(-inf), #(+inf), #(nan)]].
self.assertEqual(execution["tensor_debug_mode"], 3)
self.assertAllClose(
execution["debug_tensor_values"], [[-1.0, 1.0, 0.0, 0.0, 0.0]]
)
def testServeMultipleExecutionDataObject(self):
_generate_tfdbg_v2_data(self.logdir, tensor_debug_mode="CURT_HEALTH")
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/data?run=%s&begin=0&end=-1" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 3)
self.assertLen(data["executions"], 3)
for i in range(3):
execution = data["executions"][i]
self.assertStartsWith(
execution["op_type"], "__inference_my_function_"
)
self.assertLen(execution["output_tensor_device_ids"], 1)
self.assertEqual(execution["host_name"], _HOST_NAME)
self.assertTrue(execution["stack_frame_ids"])
self.assertLen(execution["input_tensor_ids"], 1)
self.assertLen(execution["output_tensor_ids"], 1)
self.assertTrue(execution["graph_id"])
if i > 0:
self.assertEqual(
execution["input_tensor_ids"],
data["executions"][i - 1]["input_tensor_ids"],
)
self.assertEqual(
execution["graph_id"], data["executions"][i - 1]["graph_id"]
)
# CURT_HEALTH mode:
# [[Unused tensor ID, #(inf_or_nan)]].
self.assertEqual(execution["tensor_debug_mode"], 2)
self.assertAllClose(execution["debug_tensor_values"], [[-1.0, 0.0]])
def testServeExecutionDataObjectsOutOfBoundsError(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# begin = 0; end = 4
response = self.server.get(
_ROUTE_PREFIX + "/execution/data?run=%s&begin=0&end=4" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: end index (4) out of bounds (3)"},
)
# begin = -1; end = 2
response = self.server.get(
_ROUTE_PREFIX + "/execution/data?run=%s&begin=-1&end=2" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: Invalid begin index (-1)"},
)
# begin = 2; end = 1
response = self.server.get(
_ROUTE_PREFIX + "/execution/data?run=%s&begin=2&end=1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"end index (1) is unexpectedly less than begin index (2)"
},
)
def testServeGraphExecutionDigestsPartialRange(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX
+ "/graph_execution/digests?run=%s&begin=0&end=4" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 4)
self.assertEqual(data["num_digests"], 219)
digests = data["graph_execution_digests"]
self.assertLen(digests, 4)
self.assertGreater(digests[0]["wall_time"], 0)
self.assertEqual(digests[0]["op_type"], "Placeholder")
self.assertEqual(digests[0]["output_slot"], 0)
self.assertTrue(digests[0]["op_name"])
self.assertTrue(digests[0]["graph_id"])
self.assertGreaterEqual(
digests[1]["wall_time"], digests[0]["wall_time"]
)
self.assertEqual(digests[1]["op_type"], "Const")
self.assertEqual(digests[1]["output_slot"], 0)
self.assertTrue(digests[1]["op_name"])
self.assertNotEqual(digests[1]["op_name"], digests[0]["op_name"])
self.assertTrue(digests[1]["graph_id"])
self.assertGreaterEqual(
digests[2]["wall_time"], digests[1]["wall_time"]
)
self.assertEqual(digests[2]["op_type"], "Placeholder")
self.assertEqual(digests[2]["output_slot"], 0)
self.assertTrue(digests[2]["op_name"])
self.assertTrue(digests[2]["graph_id"])
self.assertGreaterEqual(
digests[3]["wall_time"], digests[2]["wall_time"]
)
# The unstack() function uses the Unpack op under the hood.
self.assertEqual(digests[3]["op_type"], "Unpack")
self.assertEqual(digests[3]["output_slot"], 0)
self.assertTrue(digests[3]["op_name"])
self.assertTrue(digests[3]["graph_id"])
def testServeGraphExecutionDigestsImplicitFullRange(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 219)
self.assertEqual(data["num_digests"], 219)
digests = data["graph_execution_digests"]
self.assertLen(digests, 219)
self.assertGreater(digests[-1]["wall_time"], 0)
# Due to the while loop in the tf.function, the last op executed
# is a Less op.
self.assertEqual(digests[-1]["op_type"], "Less")
self.assertEqual(digests[-1]["output_slot"], 0)
self.assertTrue(digests[-1]["op_name"])
self.assertTrue(digests[-1]["graph_id"])
def testServeGraphExecutionDigestOutOfBoundsError(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# begin = 0; end = 300
response = self.server.get(
_ROUTE_PREFIX
+ "/graph_execution/digests?run=%s&begin=0&end=300" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: end index (300) out of bounds (219)"},
)
# begin = -1; end = 2
response = self.server.get(
_ROUTE_PREFIX
+ "/graph_execution/digests?run=%s&begin=-1&end=2" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: Invalid begin index (-1)"},
)
# begin = 2; end = 1
response = self.server.get(
_ROUTE_PREFIX
+ "/graph_execution/digests?run=%s&begin=2&end=1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"end index (1) is unexpectedly less than begin index (2)"
},
)
def testServeASingleGraphExecutionDataObject(self):
_generate_tfdbg_v2_data(self.logdir, tensor_debug_mode="CONCISE_HEALTH")
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/data?run=%s&begin=0&end=1" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 1)
self.assertLen(data["graph_executions"], 1)
graph_exec = data["graph_executions"][0]
self.assertStartsWith(graph_exec["op_type"], "Placeholder")
self.assertTrue(graph_exec["op_name"])
self.assertEqual(graph_exec["output_slot"], 0)
self.assertTrue(graph_exec["graph_id"])
self.assertGreaterEqual(len(graph_exec["graph_ids"]), 1)
self.assertEqual(graph_exec["graph_ids"][-1], graph_exec["graph_id"])
# [tensor_id, element_count, nan_count, neg_inf_count, pos_inf_count].
self.assertEqual(
graph_exec["debug_tensor_value"], [1.0, 4.0, 0.0, 0.0, 0.0]
)
self.assertEndsWith(graph_exec["device_name"], _DEFAULT_DEVICE_SUFFIX)
def testServeMultipleGraphExecutionDataObjects(self):
_generate_tfdbg_v2_data(self.logdir, tensor_debug_mode="CONCISE_HEALTH")
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/data?run=%s&begin=0&end=3" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["begin"], 0)
self.assertEqual(data["end"], 3)
self.assertLen(data["graph_executions"], 3)
graph_exec = data["graph_executions"][0]
self.assertStartsWith(graph_exec["op_type"], "Placeholder")
self.assertTrue(graph_exec["op_name"])
self.assertEqual(graph_exec["output_slot"], 0)
self.assertTrue(graph_exec["graph_id"])
self.assertGreaterEqual(len(graph_exec["graph_ids"]), 1)
self.assertEqual(graph_exec["graph_ids"][-1], graph_exec["graph_id"])
# [tensor_id, element_count, nan_count, neg_inf_count, pos_inf_count].
self.assertEqual(
graph_exec["debug_tensor_value"], [1.0, 4.0, 0.0, 0.0, 0.0]
)
self.assertEndsWith(graph_exec["device_name"], _DEFAULT_DEVICE_SUFFIX)
graph_exec = data["graph_executions"][1]
self.assertStartsWith(graph_exec["op_type"], "Placeholder")
self.assertTrue(graph_exec["op_name"])
self.assertEqual(graph_exec["output_slot"], 0)
self.assertTrue(graph_exec["graph_id"])
self.assertGreaterEqual(len(graph_exec["graph_ids"]), 1)
self.assertEqual(graph_exec["graph_ids"][-1], graph_exec["graph_id"])
self.assertEqual(
graph_exec["debug_tensor_value"], [3.0, 4.0, 0.0, 0.0, 0.0]
)
self.assertEndsWith(graph_exec["device_name"], _DEFAULT_DEVICE_SUFFIX)
graph_exec = data["graph_executions"][2]
# The unstack() function uses the Unpack op under the hood.
self.assertStartsWith(graph_exec["op_type"], "Unpack")
self.assertTrue(graph_exec["op_name"])
self.assertEqual(graph_exec["output_slot"], 0)
self.assertTrue(graph_exec["graph_id"])
self.assertGreaterEqual(len(graph_exec["graph_ids"]), 1)
self.assertEqual(graph_exec["graph_ids"][-1], graph_exec["graph_id"])
self.assertEqual(
graph_exec["debug_tensor_value"], [4.0, 1.0, 0.0, 0.0, 0.0]
)
self.assertEndsWith(graph_exec["device_name"], _DEFAULT_DEVICE_SUFFIX)
def testServeGraphExecutionDataObjectsOutOfBoundsError(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# _generate_tfdbg_v2_data() generates exactly 186 graph-execution
# traces.
# begin = 0; end = 220
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/data?run=%s&begin=0&end=220" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: end index (220) out of bounds (219)"},
)
# begin = -1; end = 2
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/data?run=%s&begin=-1&end=2" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Invalid argument: Invalid begin index (-1)"},
)
# begin = 2; end = 1
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/data?run=%s&begin=2&end=1" % run
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Invalid argument: "
"end index (1) is unexpectedly less than begin index (2)"
},
)
def testServeGraphInfo(self):
"""Get the op info of an op with both inputs and consumers."""
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, look up the graph_id of the 1st AddV2 op.
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
data = json.loads(response.get_data())
digests = data["graph_execution_digests"]
op_types = [digest["op_type"] for digest in digests]
op_index = op_types.index("AddV2")
graph_id = digests[op_index]["graph_id"]
# Query the /graphs/graph_info route for the inner graph.
# This is the graph that contains the AddV2 op. It corresponds
# to the function "unstack_and_sum".
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/graph_info?run=%s&graph_id=%s" % (run, graph_id)
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
outer_graph_id = data["outer_graph_id"]
self.assertEqual(data["graph_id"], graph_id)
self.assertEqual(data["name"], "unstack_and_sum")
self.assertTrue(outer_graph_id)
self.assertIsInstance(outer_graph_id, str)
# The graph of unstack_and_sum has no inner graphs.
self.assertEqual(data["inner_graph_ids"], [])
# Query the /graphs/graph_info route for the outer graph.
# This corresponds to the function "my_function"
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/graph_info?run=%s&graph_id=%s" % (run, outer_graph_id)
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
outermost_graph_id = data["outer_graph_id"]
self.assertEqual(data["graph_id"], outer_graph_id)
self.assertEqual(data["name"], "my_function")
self.assertTrue(outermost_graph_id)
self.assertIsInstance(outermost_graph_id, str)
# This outer graph contains another inner graph (repeat_add).
self.assertLen(data["inner_graph_ids"], 2)
self.assertIn(graph_id, data["inner_graph_ids"])
# Query the /graphs/graph_info route for the outermost graph.
# This is an unnamed outermost graph.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/graph_info?run=%s&graph_id=%s"
% (run, outermost_graph_id)
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertEqual(data["graph_id"], outermost_graph_id)
self.assertIsNone(data["name"])
self.assertIsNone(data["outer_graph_id"])
self.assertEqual(data["inner_graph_ids"], [outer_graph_id])
def testServeGraphInfoRaisesErrorForInvalidGraphId(self):
"""Get the op info of an op with both inputs and consumers."""
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/graph_info?run=%s&graph_id=%s"
% (run, "nonsensical-graph-id")
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.get_data()),
{
"error": 'Not found: There is no graph with ID "nonsensical-graph-id"'
},
)
def testServeGraphOpInfoForOpWithInputsAndConsumers(self):
"""Get the op info of an op with both inputs and consumers."""
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, look up the graph_id and name of the 1st AddV2 op.
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
data = json.loads(response.get_data())
digests = data["graph_execution_digests"]
op_types = [digest["op_type"] for digest in digests]
op_index = op_types.index("AddV2")
graph_id = digests[op_index]["graph_id"]
op_name = digests[op_index]["op_name"]
# Actually query the /graphs/op_info route.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/op_info?run=%s&graph_id=%s&op_name=%s"
% (run, graph_id, op_name)
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
# Check op's self properties.
self.assertEqual(data["op_type"], "AddV2")
self.assertEqual(data["op_name"], digests[op_index]["op_name"])
# TODO(cais): Assert on detailed device name when available.
self.assertIn("device_name", data)
# The op is inside a nested tf.function, so its graph stack must have a
# height > 1.
self.assertGreater(len(data["graph_ids"]), 1)
# All graph_ids should be non-empty strings.
self.assertTrue(all(data["graph_ids"]))
# All graph_ids should be unique (graph recursion is not currently
# allowed in TF.)
self.assertLen(set(data["graph_ids"]), len(data["graph_ids"]))
self.assertNotIn("graph_id", data)
self.assertEqual(data["graph_ids"][-1], digests[op_index]["graph_id"])
self.assertNotIn("input_names", data)
self.assertEqual(data["num_outputs"], 1)
self.assertLen(data["output_tensor_ids"], 1)
self.assertIsInstance(data["output_tensor_ids"][0], int)
self.assertEqual(data["host_name"], _HOST_NAME)
self.assertTrue(data["stack_frame_ids"])
# Check input op properties.
inputs = data["inputs"]
# The two input tensors to the AddV2 op are from the same Unpack
# (unstack) op that provides 4 outputs.
self.assertTrue(inputs[0]["op_name"])
self.assertEqual(inputs[0]["output_slot"], 0)
self.assertTrue(inputs[1]["op_name"])
self.assertEqual(inputs[1]["output_slot"], 1)
input0 = inputs[0]["data"]
input1 = inputs[1]["data"]
for inpt in (input0, input1):
self.assertEqual(inpt["op_type"], "Unpack")
self.assertNotIn("input_names", inpt)
self.assertEqual(inpt["num_outputs"], 4)
self.assertLen(inpt["output_tensor_ids"], 4)
self.assertEqual(inpt["host_name"], _HOST_NAME)
self.assertEqual(inpt["graph_ids"], data["graph_ids"])
self.assertLen(inpt["inputs"], 1)
self.assertTrue(inpt["inputs"][0]["op_name"])
self.assertIsInstance(inpt["inputs"][0]["op_name"], str)
self.assertEqual(inpt["inputs"][0]["output_slot"], 0)
self.assertNotIn("data", inpt["inputs"][0]["op_name"])
self.assertLen(inpt["consumers"], 4)
self.assertLen(inpt["consumers"][0], 1)
self.assertEqual(inpt["consumers"][0][0]["input_slot"], 0)
self.assertNotIn("data", inpt["consumers"][0][0])
self.assertLen(inpt["consumers"][1], 1)
self.assertEqual(inpt["consumers"][1][0]["input_slot"], 1)
self.assertNotIn("data", inpt["consumers"][1][0])
self.assertLen(inpt["consumers"][2], 1)
self.assertEqual(inpt["consumers"][2][0]["input_slot"], 1)
self.assertNotIn("data", inpt["consumers"][2][0])
self.assertLen(inpt["consumers"][3], 1)
self.assertEqual(inpt["consumers"][3][0]["input_slot"], 1)
self.assertNotIn("data", inpt["consumers"][3][0])
# Check consuming op properties.
self.assertLen(data["consumers"], 1)
self.assertLen(data["consumers"][0], 1)
# The AddV2 is consumed by another AddV2 op in the same graph.
self.assertTrue(data["consumers"][0][0]["op_name"])
self.assertIsInstance(data["consumers"][0][0]["op_name"], str)
self.assertEqual(data["consumers"][0][0]["input_slot"], 0)
consumer = data["consumers"][0][0]["data"]
self.assertEqual(consumer["op_type"], "AddV2")
self.assertTrue(consumer["op_name"])
self.assertNotEqual(consumer["op_name"], data["op_name"])
self.assertEqual(consumer["num_outputs"], 1)
self.assertLen(consumer["output_tensor_ids"], 1)
self.assertIsInstance(consumer["output_tensor_ids"][0], int)
self.assertEqual(consumer["host_name"], _HOST_NAME)
self.assertTrue(consumer["stack_frame_ids"])
self.assertLen(consumer["inputs"], 2)
self.assertEqual(consumer["inputs"][0]["op_name"], data["op_name"])
self.assertEqual(consumer["inputs"][0]["output_slot"], 0)
self.assertNotIn("data", consumer["inputs"][0])
self.assertEqual(consumer["inputs"][1]["output_slot"], 2)
self.assertNotIn("data", consumer["inputs"][1])
self.assertLen(consumer["consumers"], 1)
self.assertLen(consumer["consumers"][0], 1)
self.assertTrue(consumer["consumers"][0][0]["op_name"])
self.assertIsInstance(consumer["consumers"][0][0]["op_name"], str)
self.assertEqual(consumer["consumers"][0][0]["input_slot"], 0)
self.assertNotIn("data", consumer["consumers"][0][0])
def testServeGraphOpInfoForOpWithNoConsumers(self):
"""Get the op info of an op with no consumers in the same graph."""
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, look up the graph_id and name of the Iendity op in the
# unstack_and_sum() graph. The Identity op marks the return value of
# the tf.function and hence has no consumer.
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
data = json.loads(response.get_data())
digests = data["graph_execution_digests"]
op_types = [digest["op_type"] for digest in digests]
add_index_0 = op_types.index("AddV2")
graph_id = digests[add_index_0]["graph_id"]
# Actually query the /graphs/op_info route.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/op_info?run=%s&graph_id=%s&op_name=%s"
% (run, graph_id, "Identity")
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
# Check op's self properties.
self.assertEqual(data["op_type"], "Identity")
self.assertEqual(data["op_name"], "Identity")
# TODO(cais): Assert on detailed device name when available.
self.assertIn("device_name", data)
# The op is inside a nested tf.function, so its graph stack must have a height > 1.
self.assertGreater(len(data["graph_ids"]), 1)
self.assertEqual(data["graph_ids"][-1], graph_id)
self.assertNotIn("input_names", data)
self.assertEqual(data["num_outputs"], 1)
self.assertEqual(data["host_name"], _HOST_NAME)
self.assertTrue(data["stack_frame_ids"])
# Check input op properties.
self.assertLen(data["inputs"], 1)
self.assertTrue(data["inputs"][0]["op_name"])
self.assertIsInstance(data["inputs"][0]["op_name"], str)
self.assertEqual(data["inputs"][0]["output_slot"], 0)
input0 = data["inputs"][0]["data"]
self.assertEqual(input0["op_type"], "AddV2")
# Check consumers: There should be no consumers for this Identity op.
self.assertEqual(data["consumers"], [[]])
def testServeGraphOpInfoForOpWithNoInputs(self):
"""Get the op info of an op with no inputs."""
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, look up the graph_id and name of the Placeholder op in the
# same graph as the Unstack op. This Placeholder op has no inputs.
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
data = json.loads(response.get_data())
digests = data["graph_execution_digests"]
op_types = [digest["op_type"] for digest in digests]
graph_ids = [digest["graph_id"] for digest in digests]
unpack_op_index = op_types.index("Unpack")
unpack_op_name = digests[unpack_op_index]["op_name"]
graph_id = digests[unpack_op_index]["graph_id"]
placeholder_op_index = list(zip(graph_ids, op_types)).index(
(graph_id, "Placeholder")
)
op_name = digests[placeholder_op_index]["op_name"]
# Actually query the /graphs/op_info route.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/op_info?run=%s&graph_id=%s&op_name=%s"
% (run, graph_id, op_name)
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
# Check op's self properties.
self.assertEqual(data["op_type"], "Placeholder")
self.assertTrue(data["op_name"])
# TODO(cais): Assert on detailed device name when available.
self.assertIn("device_name", data)
# The op is inside a nested tf.function, so its graph stack must have a height > 1.
self.assertNotIn("graph_id", data)
self.assertGreater(len(data["graph_ids"]), 1)
self.assertEqual(data["graph_ids"][-1], graph_id)
self.assertNotIn("input_names", data)
self.assertEqual(data["num_outputs"], 1)
self.assertEqual(data["host_name"], _HOST_NAME)
self.assertTrue(data["stack_frame_ids"])
# Check input op properties: The Placeholder has no inputs.
self.assertEqual(data["inputs"], [])
# Check consumers.
self.assertLen(data["consumers"], 1)
self.assertLen(data["consumers"][0], 1)
self.assertEqual(data["consumers"][0][0]["op_name"], unpack_op_name)
self.assertEqual(data["consumers"][0][0]["input_slot"], 0)
consumer = data["consumers"][0][0]["data"]
self.assertEqual(consumer["op_type"], "Unpack")
self.assertEqual(consumer["op_name"], unpack_op_name)
def testServeGraphOpInfoWithInputsAndConsumerLookupFailures(self):
"""Get the op info of an op with both inputs and consumers."""
from tensorflow.python.debug.lib import debug_events_reader
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, look up the graph_id and name of the 1st AddV2 op.
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
data = json.loads(response.get_data())
digests = data["graph_execution_digests"]
op_types = [digest["op_type"] for digest in digests]
op_index = op_types.index("AddV2")
graph_id = digests[op_index]["graph_id"]
add_v2_op_name = digests[op_index]["op_name"]
graph = self.plugin._data_provider._multiplexer._reader.graph_by_id(
graph_id
)
def fake_get_op_creation_digest(op_name):
if op_name == add_v2_op_name:
return debug_events_reader.GraphOpCreationDigest(
1234.0, # wall_time
777, # offset
graph_id,
"AddV2", # op_type
add_v2_op_name,
[12], # output_tensor_ids
"localhost", # host_name
["a1", "b2"], # stack_frame_ids
input_names=["add_v2_input:0"],
)
else:
raise KeyError()
with mock.patch.object(
graph, "get_op_creation_digest", fake_get_op_creation_digest
):
# Actually query the /graphs/op_info route.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/op_info?run=%s&graph_id=%s&op_name=%s"
% (run, graph_id, add_v2_op_name)
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertNotIn("input_names", data)
self.assertEqual(
data["inputs"], [{"op_name": "add_v2_input", "output_slot": 0,}]
) # "data" is missing due to op lookup failure.
# Check the consumer op data, which should also be None due to the
# KeyError encountered during the retrieval of the data about the
# consumer op.
self.assertLen(data["consumers"], 1)
self.assertLen(data["consumers"][0], 1)
consumer_spec = data["consumers"][0][0]
self.assertTrue(consumer_spec["op_name"])
self.assertIsInstance(consumer_spec["op_name"], str)
self.assertEqual(consumer_spec["input_slot"], 0)
# NOTE: "data" is missing due to op lookup failure.
self.assertNotIn("data", consumer_spec)
def testServeGraphOpInfoRespondsWithErrorForInvalidGraphId(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# Query the /graphs/op_info route with an invalid graph_id.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/op_info?run=%s&graph_id=%s&op_name=%s"
% (run, "nonsensical-graph-id", "Placeholder")
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": 'Not found: There is no graph with ID "nonsensical-graph-id"'
},
)
def testServeGraphOpInfoRespondsWithErrorForInvalidOpName(self):
"""Get the op info of an op with no inputs."""
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, look up the valid graph_id.
response = self.server.get(
_ROUTE_PREFIX + "/graph_execution/digests?run=%s" % run
)
data = json.loads(response.get_data())
digests = data["graph_execution_digests"]
op_types = [digest["op_type"] for digest in digests]
op_index = op_types.index("Placeholder")
graph_id = digests[op_index]["graph_id"]
# Query the/graphs/op_info route with a valid graph_id and
# a nonexistent op_name.
response = self.server.get(
_ROUTE_PREFIX
+ "/graphs/op_info?run=%s&graph_id=%s&op_name=%s"
% (run, graph_id, "nonexistent-op-name")
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": 'Not found: There is no op named "nonexistent-op-name" '
'in graph with ID "%s"' % graph_id
},
)
def testServeSourceFileListIncludesThisTestFile(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/source_files/list?run=%s" % run
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
source_file_list = json.loads(response.get_data())
self.assertIsInstance(source_file_list, list)
self.assertIn([_HOST_NAME, _CURRENT_FILE_FULL_PATH], source_file_list)
def testServeSourceFileListWithoutRunParamErrors(self):
# Make request without run param.
response = self.server.get(_ROUTE_PREFIX + "/source_files/list")
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "run parameter is not provided"},
)
def testServeSourceFileContentOfThisTestFile(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, access the source file list, so we can get hold of the index
# for this file. The index is required for the request to the
# "/source_files/file" route below.
response = self.server.get(
_ROUTE_PREFIX + "/source_files/list?run=%s" % run
)
source_file_list = json.loads(response.get_data())
index = source_file_list.index([_HOST_NAME, _CURRENT_FILE_FULL_PATH])
response = self.server.get(
_ROUTE_PREFIX + "/source_files/file?run=%s&index=%d" % (run, index)
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertEqual(data["host_name"], _HOST_NAME)
self.assertEqual(data["file_path"], _CURRENT_FILE_FULL_PATH)
with open(__file__, "r") as f:
lines = f.read().split("\n")
self.assertEqual(data["lines"], lines)
def testServeSourceFileWithoutRunErrors(self):
# Make request without run param.
response = self.server.get(_ROUTE_PREFIX + "/source_files/file")
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "run parameter is not provided"},
)
def testServeSourceFileWithOutOfBoundIndexErrors(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
# First, access the source file list, so we can get hold of the index
# for this file. The index is required for the request to the
# "/source_files/file" route below.
response = self.server.get(
_ROUTE_PREFIX + "/source_files/list?run=%s" % run
)
source_file_list = json.loads(response.get_data())
self.assertTrue(source_file_list)
# Use an out-of-bound index.
invalid_index = len(source_file_list)
response = self.server.get(
_ROUTE_PREFIX
+ "/source_files/file?run=%s&index=%d" % (run, invalid_index)
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{
"error": "Not found: There is no source-code file at index %d"
% invalid_index
},
)
def testServeStackFrames(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/execution/data?run=%s&begin=0&end=1" % run
)
data = json.loads(response.get_data())
stack_frame_ids = data["executions"][0]["stack_frame_ids"]
self.assertIsInstance(stack_frame_ids, list)
self.assertTrue(stack_frame_ids)
response = self.server.get(
_ROUTE_PREFIX
+ "/stack_frames/stack_frames?run=%s&stack_frame_ids=%s"
% (run, ",".join(stack_frame_ids))
)
self.assertEqual(200, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
data = json.loads(response.get_data())
self.assertIsInstance(data, dict)
stack_frames = data["stack_frames"]
self.assertIsInstance(stack_frames, list)
self.assertLen(stack_frames, len(stack_frame_ids))
for item in stack_frames:
self.assertIsInstance(item, list)
self.assertLen(item, 4) # [host_name, file_path, lineno, function].
self.assertEqual(item[0], _HOST_NAME)
self.assertIsInstance(item[1], six.string_types)
self.assertTrue(item[1])
self.assertIsInstance(item[2], int)
self.assertGreaterEqual(item[2], 1)
self.assertIsInstance(item[3], six.string_types)
self.assertTrue(item[3])
# Assert that the current file and current function should be in the
# stack frames.
frames_for_this_function = list(
filter(
lambda frame: frame[0] == _HOST_NAME
and frame[1] == _CURRENT_FILE_FULL_PATH
and frame[3] == "testServeStackFrames",
stack_frames,
)
)
self.assertLen(frames_for_this_function, 1)
def testServeStackFramesWithMissingStackFrameIdParamErrors(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
_ROUTE_PREFIX + "/stack_frames/stack_frames?run=%s" % run
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Missing stack_frame_ids parameter"},
)
def testServeStackFramesWithMissingStackFrameIdParamErrors(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
response = self.server.get(
# Use empty value for the stack_frame_ids parameter.
_ROUTE_PREFIX
+ "/stack_frames/stack_frames?run=%s&stack_frame_ids=" % run
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertEqual(
json.loads(response.get_data()),
{"error": "Empty stack_frame_ids parameter"},
)
def testServeStackFramesWithMissingStackFrameIdParamErrors(self):
_generate_tfdbg_v2_data(self.logdir)
run = self._getExactlyOneRun()
invalid_stack_frme_id = "nonsense-stack-frame-id"
response = self.server.get(
# Use empty value for the stack_frame_ids parameter.
_ROUTE_PREFIX
+ "/stack_frames/stack_frames?run=%s&stack_frame_ids=%s"
% (run, invalid_stack_frme_id)
)
self.assertEqual(400, response.status_code)
self.assertEqual(
"application/json", response.headers.get("content-type")
)
self.assertRegexpMatches(
json.loads(response.get_data())["error"],
"Not found: Cannot find stack frame with ID"
".*nonsense-stack-frame-id.*",
)
if __name__ == "__main__":
tf.test.main()
|
webstreaming.py
|
"""
Web based Sign Language Predictor
USAGE:
python webstreaming.py --ip 127.0.0.1 --port 8000
"""
# import all the necessary packages
from ClassifierNet import Net
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
import threading
import argparse
import datetime
import time
import cv2
import numpy as np
import torch
from torch.autograd import Variable
import pymongo
#Initialize the output frame and a lock used to ensure thread-safe
#exchanges of the output frames (useful for multiple browsers/tabs
#are viewing the stream).
outputFrame = None
lock = threading.Lock()
# initialize a flask object
app = Flask(__name__)
# initialize the video stream and allow the camera sensor to warmup
vs = VideoStream(src=0).start()
time.sleep(2.0)
# Create Mongo Client using username and password
client = pymongo.MongoClient("mongodb+srv://meghana-urs:Project123@projectdb-uzrf2.mongodb.net/?retryWrites=true&w=majority")
@app.route("/")
def index():
""" return the rendered template """
return render_template("index.html")
@app.route("/practice")
def practice():
""" return the rendered template """
return render_template("practice.html")
def sign_prediction():
"""
The function predicts sign for every frame of the video using
the trained model. A square window which contains the hand
is cropped from each frame which is the input to the model.
The predicted letter is displayed on the output frame.
"""
# Global references to the video stream, output frame, and lock variables
global vs, outputFrame, lock
# Load model architecture, pretrained weights and set to eval mode
model = Net()
model.load_state_dict(torch.load('./checkpoint.pth'))
model.eval()
index_to_letter = list('ABCDEFGHIKLMNOPQRSTUVWXY')
letter = index_to_letter[0]
print("Inference session")
previous_timestamp = datetime.datetime.now()
# loop over frames from the video stream
while True:
# read the next frame from the video stream
frame = vs.read()
(w, h, c) = frame.shape
s = round(h/2)
frame = cv2.flip(frame, 1)
blank_image = np.zeros((w, h, c), np.uint8)
# Crop the window, convert the frame to grayscale, resize the image
blank_image[50: s+50 ,h-50-s: h-50, :] = frame[50: s+50 ,h-50-s: h-50, :]
model_input = frame[ 50: s+50 ,h-50-s: h-50, :]
model_input = cv2.flip(model_input, 1)
model_input = cv2.cvtColor(model_input, cv2.COLOR_RGB2GRAY)
x = cv2.resize(model_input, (28, 28))
x = (x - np.min(x)) / (np.max(x) - np.min(x))
x = x.reshape(1, 1, 28, 28).astype(np.float32)
# Convert input to Float Tensor and get predictions
x = torch.FloatTensor(x)
y = model(Variable(x))
pred = torch.argmax(y).cpu().numpy()
letter = index_to_letter[int(pred)]
timestamp = datetime.datetime.now()
# Post the prediction to the database after every 15 seconds
if datetime.datetime.now() >= previous_timestamp + datetime.timedelta(seconds=15):
previous_timestamp = datetime.datetime.now()
data = {
'Time Stamp' : timestamp,
'Prediction' : letter
}
with client:
db = client.Sign_Prediction
db.prediction.insert_one(data)
#Display predictions on the output frame
frame = cv2.addWeighted(frame, 0.3, blank_image, 1, 0)
cv2.putText(frame, letter, (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), thickness=2)
color = (0, 0, 255)
thickness = 2
cv2.rectangle(frame, (h-50-s,50), (h-50,s+50), color, thickness)
with lock:
outputFrame = frame.copy()
def generate():
""" Video streaming generator function """
# grab global references to the output frame and lock variables
global outputFrame, lock
while True:
with lock:
if outputFrame is None:
continue
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
if not flag:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@app.route("/video_feed")
def video_feed():
""" return the response generated along with the specific media
type (mime type)
"""
return Response(generate(),
mimetype = "multipart/x-mixed-replace; boundary=frame")
""" check to see if this is the main thread of execution """
if __name__ == '__main__':
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, required=False,
help="ip address of the device")
ap.add_argument("-o", "--port", type=int, required=True,
help="ephemeral port number of the server (1024 to 65535)")
args = vars(ap.parse_args())
# start a thread that will perform sign prediction
t = threading.Thread(target=sign_prediction)
t.daemon = True
t.start()
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True,
threaded=True, use_reloader=False)
# release the video stream pointer
vs.stop()
|
thread_05_1.py
|
import threading
Q=1000000
thread_list = []
def drink(max):
global Q
for i in range(0, max):
Q-=1
for i in range(0, 2):
thread_inst = threading.Thread(target=drink, args=(500000,))
thread_list.append(thread_inst)
thread_inst.start()
for thread in thread_list:
thread.join()
print(Q)
|
launch_experiment.py
|
import argparse
from datetime import datetime
import itertools
import multiprocessing
import os
import random
import sys
import time
import doodad as dd
import doodad.mount as mount
from doodad.easy_sweep.hyper_sweep import run_sweep_doodad, Sweeper
import doodad.easy_sweep.launcher as launcher
from experiment_utils import config
from experiment_utils.sweeper import generate_variants
from experiment_utils.utils import query_yes_no
from experiment_configs.base_experiment import experiment as run_experiment
def launch_experiment(
exp_name,
variant,
sweep_values=None,
num_seeds=1,
get_confirmation=True,
# arguments specifying where the code to run the experiment is
experiment_class=None,
get_config=None,
get_algorithm=None,
get_offline_algorithm=None,
load_config=None,
# misc arguments
instance_type='c4.2xlarge',
use_gpu=False,
include_date=True,
):
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='local',
help='Mode for running the experiments - local: runs on local machine, '
'ec2: runs on AWS ec2 cluster (requires a proper configuration file)')
parser.add_argument('--gpu_id', '-id', type=int, default=0,
help='GPU id for running experiments (if using single GPU)')
parser.add_argument('--num_gpu', '-g', type=int, default=3,
help='Number of GPUs to use for running the experiments')
parser.add_argument('--exps_per_gpu', '-e', type=int, default=1,
help='Number of experiments per GPU simultaneously')
parser.add_argument('--num_cpu', '-c', type=int, default=multiprocessing.cpu_count(),
help='Number of threads to use for running experiments')
parser.add_argument('--log_to_wandb', '-w', type=bool, default=False,
help='Whether or not to log to Weights and Biases')
args = parser.parse_args(sys.argv[1:])
"""
Generating experiment from specified functions:
If the user specifies experiment_class, it is assumed that if get_algorithm and/or
get_offline_algorithm are specified, then they are located there. This is mostly
just for backwards compatibility.
Otherwise, get_algorithm and get_offline_algorithm should be fed into launch_experiment,
which is generally more modular than specifying the class. get_config must be
specified, either in experiment_class or in the method call.
load_config is called after the initialization of the config dict, so it can modify any
values of the dict in place as needed, and must be fed directly.
"""
experiment_config = dict()
if experiment_class is not None:
experiment_config['get_config'] = experiment_class.get_config
if hasattr(experiment_class, 'get_algorithm'):
experiment_config['get_algorithm'] = experiment_class.get_algorithm
if hasattr(experiment_class, 'get_offline_algorithm'):
experiment_config['get_offline_algorithm'] = \
experiment_class.get_offline_algorithm
if get_config is not None:
experiment_config['get_config'] = get_config
if get_algorithm is not None:
experiment_config['get_algorithm'] = get_algorithm
if get_offline_algorithm is not None:
experiment_config['get_offline_algorithm'] = get_offline_algorithm
if load_config is not None:
experiment_config['load_config'] = load_config
if sweep_values is None:
variants = [variant]
else:
variants = generate_variants(variant, sweep_values, num_seeds=num_seeds)
"""
Setup in the form to feed into the doodad sweeper.
"""
if include_date:
timestamp = datetime.now().strftime('%m-%d')
exp_name = '%s-%s' % (timestamp, exp_name)
gpu_id = args.gpu_id
log_to_wandb = args.log_to_wandb
sweep_params = dict(
experiment_config=[experiment_config],
exp_prefix=[exp_name],
variant=variants,
gpu_kwargs=[{'mode': use_gpu, # don't use GPU with EC2
'gpu_id': gpu_id}],
log_to_wandb=[log_to_wandb],
)
"""
Confirmation
"""
print('\n')
print('=' * 50)
print('Launching experiment: %s' % exp_name)
print('num variants: %d, num seeds: %d' % (len(variants) // num_seeds, num_seeds))
print('About to launch %d total experiments' % (len(variants)))
print('=' * 50)
for k in sweep_values:
print('%s:' % k, sweep_values[k])
print('=' * 50)
print('\n')
if get_confirmation and not query_yes_no('Confirm?'):
return
"""
Standard run_sweep
"""
local_mount = mount.MountLocal(local_dir=config.BASE_DIR, pythonpath=True)
docker_mount_point = os.path.join(config.DOCKER_MOUNT_DIR, exp_name)
sweeper = launcher.DoodadSweeper([local_mount], docker_img=config.DOCKER_IMAGE,
docker_output_dir=docker_mount_point,
local_output_dir=os.path.join(config.DATA_DIR, 'local', exp_name))
# it's annoying to have to set up s3 if we don't want to use it
# TODO: if you want to use S3, uncomment this
sweeper.mount_out_s3 = None # mount.MountS3(s3_path='', mount_point=docker_mount_point, output=True)
if args.mode == 'ec2':
print("\n" + "**********" * 10 + "\nexp_prefix: {}\nvariants: {}".format(exp_name, len(
list(itertools.product(*[value for value in sweep_params.values()])))))
if query_yes_no("Continue?"):
sweeper.run_sweep_ec2(run_experiment, sweep_params, bucket_name=config.S3_BUCKET_NAME,
instance_type=instance_type,
region='us-east-2', s3_log_name=exp_name, add_date_to_logname=False)
elif args.mode == 'local_docker':
mode_docker = dd.mode.LocalDocker(
image=sweeper.image,
)
run_sweep_doodad(run_experiment, sweep_params, run_mode=mode_docker,
mounts=sweeper.mounts)
elif args.mode == 'local':
sweeper.run_sweep_serial(run_experiment, sweep_params)
elif args.mode == 'local_par':
sweeper.run_sweep_parallel(run_experiment, sweep_params)
elif args.mode == 'multi_gpu':
run_sweep_multi_gpu(run_experiment, sweep_params, num_gpu=args.num_gpu, exps_per_gpu=args.exps_per_gpu)
else:
raise NotImplementedError('experiment run mode not recognized')
def run_sweep_multi_gpu(
run_method,
params,
repeat=1,
num_cpu=multiprocessing.cpu_count(),
num_gpu=2,
exps_per_gpu=2
):
sweeper = Sweeper(params, repeat, include_name=False)
gpu_frac = 0.9 / exps_per_gpu
num_runs = num_gpu * exps_per_gpu
cpu_per_gpu = num_cpu / num_gpu
exp_args = []
for config in sweeper:
exp_args.append((config, run_method))
random.shuffle(exp_args)
processes = [None] * num_runs
run_info = [(i, (i * cpu_per_gpu, (i + 1) * cpu_per_gpu)) for i in range(num_gpu)] * exps_per_gpu
for kwarg, run in exp_args:
launched = False
while not launched:
for idx in range(num_runs):
if processes[idx] is None or not processes[idx].is_alive():
# kwarg['gpu_frac'] = gpu_frac
p = multiprocessing.Process(target=run, kwargs=kwarg)
os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % run_info[idx][0]
os.system("taskset -p -c %d-%d %d" % (run_info[idx][1] + (os.getpid(),)))
p.start()
processes[idx] = p
launched = True
break
if not launched:
time.sleep(10)
|
bazel_build.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bridge between Xcode and Bazel for the "build" action."""
import atexit
import errno
import fcntl
import hashlib
import inspect
import io
import json
import os
import pipes
import re
import shutil
import signal
import subprocess
import sys
import textwrap
import threading
import time
import zipfile
from apfs_clone_copy import CopyOnWrite
import bazel_build_events
import bazel_build_settings
import bazel_options
from bootstrap_lldbinit import BootstrapLLDBInit
from bootstrap_lldbinit import TULSI_LLDBINIT_FILE
import tulsi_logging
from update_symbol_cache import UpdateSymbolCache
# List of frameworks that Xcode injects into test host targets that should be
# re-signed when running the tests on devices.
XCODE_INJECTED_FRAMEWORKS = [
'libXCTestBundleInject.dylib',
'IDEBundleInjection.framework',
'XCTAutomationSupport.framework',
'XCTest.framework',
]
_logger = None
def _PrintUnbuffered(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
def _PrintXcodeWarning(msg):
sys.stdout.write(':: warning: %s\n' % msg)
sys.stdout.flush()
def _PrintXcodeError(msg):
sys.stderr.write(':: error: %s\n' % msg)
sys.stderr.flush()
def _Fatal(msg, fatal_frame=None):
"""Print a fatal error pointing to the failure line inside the script."""
if not fatal_frame:
fatal_frame = inspect.currentframe().f_back
filename, line_number, _, _, _ = inspect.getframeinfo(fatal_frame)
_PrintUnbuffered('%s:%d: error: %s' % (os.path.abspath(filename),
line_number, msg))
CLEANUP_BEP_FILE_AT_EXIT = False
# Function to be called atexit to clean up the BEP file if one is present.
# This is especially useful in cases of abnormal termination (such as what
# happens when Xcode is killed).
def _BEPFileExitCleanup(bep_file_path):
if not CLEANUP_BEP_FILE_AT_EXIT:
return
try:
os.remove(bep_file_path)
except OSError as e:
_PrintXcodeWarning('Failed to remove BEP file from %s. Error: %s' %
(bep_file_path, e.strerror))
def _InterruptHandler(signum, frame):
"""Gracefully exit on SIGINT."""
del signum, frame # Unused.
_PrintUnbuffered('Caught interrupt signal. Exiting...')
sys.exit(0)
class Timer(object):
"""Simple profiler."""
def __init__(self, action_name, action_id):
"""Creates a new Timer object.
Args:
action_name: A human-readable action name, shown in the build log.
action_id: A machine-readable action identifier, can be used for metrics.
Returns:
A Timer instance.
Raises:
RuntimeError: if Timer is created without initializing _logger.
"""
if _logger is None:
raise RuntimeError('Attempted to create Timer without a logger.')
self.action_name = action_name
self.action_id = action_id
self._start = None
def Start(self):
self._start = time.time()
return self
def End(self, log_absolute_times=False):
end = time.time()
seconds = end - self._start
if log_absolute_times:
_logger.log_action(self.action_name, self.action_id, seconds,
self._start, end)
else:
_logger.log_action(self.action_name, self.action_id, seconds)
def _LockFileCreate():
# This relies on this script running at the root of the bazel workspace.
cwd = os.environ['PWD']
cwd_hash = hashlib.sha256(cwd.encode()).hexdigest()
return '/tmp/tulsi_bazel_build_{}.lock'.format(cwd_hash)
# Function to be called atexit to release the file lock on script termination.
def _LockFileExitCleanup(lock_file_handle):
lock_file_handle.close()
def _LockFileAcquire(lock_path):
"""Force script to wait on file lock to serialize build target actions.
Args:
lock_path: Path to the lock file.
"""
_PrintUnbuffered('Queuing Tulsi build...')
lockfile = open(lock_path, 'w')
# Register "fclose(...)" as early as possible, before acquiring lock.
atexit.register(_LockFileExitCleanup, lockfile)
while True:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
class CodesignBundleAttributes(object):
"""Wrapper class for codesigning attributes of a signed bundle."""
# List of codesigning attributes that this script requires.
_ATTRIBUTES = ['Authority', 'Identifier', 'TeamIdentifier']
def __init__(self, codesign_output):
self.attributes = {}
pending_attributes = list(self._ATTRIBUTES)
for line in codesign_output.split('\n'):
if not pending_attributes:
break
for attribute in pending_attributes:
if line.startswith(attribute):
value = line[len(attribute) + 1:]
self.attributes[attribute] = value
pending_attributes.remove(attribute)
break
for attribute in self._ATTRIBUTES:
if attribute not in self.attributes:
_PrintXcodeError(
'Failed to extract %s from %s.\n' % (attribute, codesign_output))
def Get(self, attribute):
"""Returns the value for the given attribute, or None if it wasn't found."""
value = self.attributes.get(attribute)
if attribute not in self._ATTRIBUTES:
_PrintXcodeError(
'Attribute %s not declared to be parsed. ' % attribute +
'Available attributes are %s.\n' % self._ATTRIBUTES)
return value
class _OptionsParser(object):
"""Handles parsing script options."""
# List of all supported Xcode configurations.
KNOWN_CONFIGS = ['Debug', 'Release']
def __init__(self, build_settings, sdk_version, platform_name, arch):
self.targets = []
self.build_settings = build_settings
self.common_build_options = [
'--verbose_failures',
'--bes_outerr_buffer_size=0', # Don't buffer Bazel output.
]
self.sdk_version = sdk_version
self.platform_name = platform_name
if self.platform_name.startswith('watch'):
config_platform = 'watchos'
elif self.platform_name.startswith('iphone'):
config_platform = 'ios'
elif self.platform_name.startswith('macos'):
config_platform = 'macos'
elif self.platform_name.startswith('appletv'):
config_platform = 'tvos'
else:
self._WarnUnknownPlatform()
config_platform = 'ios'
self.bazel_build_config = '{}_{}'.format(config_platform, arch)
if self.bazel_build_config not in build_settings.platformConfigFlags:
_PrintXcodeError('Unknown active compilation target of "{}". '
'Please report a Tulsi bug.'
.format(self.bazel_build_config))
sys.exit(1)
self.verbose = 0
self.bazel_bin_path = 'bazel-bin'
self.bazel_executable = None
@staticmethod
def _UsageMessage():
"""Returns a usage message string."""
usage = textwrap.dedent("""\
Usage: %s <target> [<target2> ...] --bazel <bazel_binary_path> [options]
Where options are:
--verbose [-v]
Increments the verbosity of the script by one level. This argument
may be provided multiple times to enable additional output levels.
--bazel_bin_path <path>
Path at which Bazel-generated artifacts may be retrieved.
""" % sys.argv[0])
return usage
def ParseOptions(self, args):
"""Parses arguments, returning (message, exit_code)."""
bazel_executable_index = args.index('--bazel')
self.targets = args[:bazel_executable_index]
if not self.targets or len(args) < bazel_executable_index + 2:
return (self._UsageMessage(), 10)
self.bazel_executable = args[bazel_executable_index + 1]
return self._ParseVariableOptions(args[bazel_executable_index + 2:])
def GetBaseFlagsForTargets(self, config):
is_debug = config == 'Debug'
return self.build_settings.flags_for_target(
self.targets[0],
is_debug,
self.bazel_build_config)
def GetEnabledFeatures(self):
"""Returns a list of enabled Bazel features for the active target."""
return self.build_settings.features_for_target(self.targets[0])
def GetBazelOptions(self, config):
"""Returns the full set of build options for the given config."""
bazel, start_up, build = self.GetBaseFlagsForTargets(config)
all_build = []
all_build.extend(self.common_build_options)
all_build.extend(build)
xcode_version_flag = self._ComputeXcodeVersionFlag()
if xcode_version_flag:
all_build.append('--xcode_version=%s' % xcode_version_flag)
return bazel, start_up, all_build
def _WarnUnknownPlatform(self):
_PrintUnbuffered('Warning: unknown platform "%s" will be treated as '
'iOS' % self.platform_name)
def _ParseVariableOptions(self, args):
"""Parses flag-based args, returning (message, exit_code)."""
verbose_re = re.compile('-(v+)$')
while args:
arg = args[0]
args = args[1:]
if arg == '--bazel_bin_path':
if not args:
return ('Missing required parameter for %s' % arg, 2)
self.bazel_bin_path = args[0]
args = args[1:]
elif arg == '--verbose':
self.verbose += 1
else:
match = verbose_re.match(arg)
if match:
self.verbose += len(match.group(1))
else:
return ('Unknown option "%s"\n%s' % (arg, self._UsageMessage()), 1)
return (None, 0)
@staticmethod
def _GetXcodeBuildVersionString():
"""Returns Xcode build version from the environment as a string."""
return os.environ['XCODE_PRODUCT_BUILD_VERSION']
@staticmethod
def _GetXcodeVersionString():
"""Returns Xcode version info from the environment as a string."""
reported_version = os.environ['XCODE_VERSION_ACTUAL']
match = re.match(r'(\d{2})(\d)(\d)$', reported_version)
if not match:
_PrintUnbuffered('Warning: Failed to extract Xcode version from %s' % (
reported_version))
return None
major_version = int(match.group(1))
minor_version = int(match.group(2))
fix_version = int(match.group(3))
return '%d.%d.%d' % (major_version, minor_version, fix_version)
@staticmethod
def _ComputeXcodeVersionFlag():
"""Returns a string for the --xcode_version build flag, if any.
The flag should be used if the active Xcode version was not the same one
used during project generation.
Note this a best-attempt only; this may not be accurate as Bazel itself
caches the active DEVELOPER_DIR path and the user may have changed their
installed Xcode version.
"""
xcode_version = _OptionsParser._GetXcodeVersionString()
build_version = _OptionsParser._GetXcodeBuildVersionString()
if not xcode_version or not build_version:
return None
# Of the form Major.Minor.Fix.Build (new Bazel form) or Major.Min.Fix (old).
full_bazel_version = os.environ.get('TULSI_XCODE_VERSION')
if not full_bazel_version: # Unexpected: Tulsi gen didn't set the flag.
return xcode_version
# Newer Bazel versions specify the version as Major.Minor.Fix.Build.
if full_bazel_version.count('.') == 3:
components = full_bazel_version.rsplit('.', 1)
bazel_xcode_version = components[0]
bazel_build_version = components[1]
if (xcode_version != bazel_xcode_version
or build_version != bazel_build_version):
return '{}.{}'.format(xcode_version, build_version)
else:
return None
else: # Old version of Bazel. We need to use form Major.Minor.Fix.
return xcode_version if xcode_version != full_bazel_version else None
class BazelBuildBridge(object):
"""Handles invoking Bazel and unpacking generated binaries."""
BUILD_EVENTS_FILE = 'build_events.json'
def __init__(self, build_settings):
self.build_settings = build_settings
self.verbose = 0
self.build_path = None
self.bazel_bin_path = None
self.codesign_attributes = {}
self.codesigning_folder_path = os.environ['CODESIGNING_FOLDER_PATH']
self.xcode_action = os.environ['ACTION'] # The Xcode build action.
# When invoked as an external build system script, Xcode will set ACTION to
# an empty string.
if not self.xcode_action:
self.xcode_action = 'build'
if int(os.environ['XCODE_VERSION_MAJOR']) < 900:
xcode_build_version = os.environ['XCODE_PRODUCT_BUILD_VERSION']
_PrintXcodeWarning('Tulsi officially supports Xcode 9+. You are using an '
'earlier Xcode, build %s.' % xcode_build_version)
self.tulsi_version = os.environ.get('TULSI_VERSION', 'UNKNOWN')
# TODO(b/69857078): Remove this when wrapped_clang is updated.
self.direct_debug_prefix_map = False
self.normalized_prefix_map = False
self.update_symbol_cache = UpdateSymbolCache()
# Target architecture. Must be defined for correct setting of
# the --cpu flag. Note that Xcode will set multiple values in
# ARCHS when building for a Generic Device.
archs = os.environ.get('ARCHS')
if not archs:
_PrintXcodeError('Tulsi requires env variable ARCHS to be '
'set. Please file a bug against Tulsi.')
sys.exit(1)
self.arch = archs.split()[-1]
# Path into which generated artifacts should be copied.
self.built_products_dir = os.environ['BUILT_PRODUCTS_DIR']
# Path where Xcode expects generated sources to be placed.
self.derived_sources_folder_path = os.environ.get('DERIVED_SOURCES_DIR')
# Full name of the target artifact (e.g., "MyApp.app" or "Test.xctest").
self.full_product_name = os.environ['FULL_PRODUCT_NAME']
# Whether to generate runfiles for this target.
self.gen_runfiles = os.environ.get('GENERATE_RUNFILES')
# Target SDK version.
self.sdk_version = os.environ.get('SDK_VERSION')
# TEST_HOST for unit tests.
self.test_host_binary = os.environ.get('TEST_HOST')
# Whether this target is a test or not.
self.is_test = os.environ.get('WRAPPER_EXTENSION') == 'xctest'
# Target platform.
self.platform_name = os.environ['PLATFORM_NAME']
# Type of the target artifact.
self.product_type = os.environ['PRODUCT_TYPE']
# Path to the parent of the xcodeproj bundle.
self.project_dir = os.environ['PROJECT_DIR']
# Path to the xcodeproj bundle.
self.project_file_path = os.environ['PROJECT_FILE_PATH']
# Path to the directory containing the WORKSPACE file.
self.workspace_root = os.path.abspath(os.environ['TULSI_WR'])
# Set to the name of the generated bundle for bundle-type targets, None for
# single file targets (like static libraries).
self.wrapper_name = os.environ.get('WRAPPER_NAME')
self.wrapper_suffix = os.environ.get('WRAPPER_SUFFIX', '')
# Path where Xcode expects the artifacts to be written to. This is not the
# codesigning_path as device vs simulator builds have different signing
# requirements, so Xcode expects different paths to be signed. This is
# mostly apparent on XCUITests where simulator builds set the codesigning
# path to be the .xctest bundle, but for device builds it is actually the
# UI runner app (since it needs to be codesigned to run on the device.) The
# FULL_PRODUCT_NAME variable is a stable path on where to put the expected
# artifacts. For static libraries (objc_library, swift_library),
# FULL_PRODUCT_NAME corresponds to the .a file name, which coincides with
# the expected location for a single artifact output.
# TODO(b/35811023): Check these paths are still valid.
self.artifact_output_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])
# Path to where Xcode expects the binary to be placed.
self.binary_path = os.path.join(
os.environ['TARGET_BUILD_DIR'], os.environ['EXECUTABLE_PATH'])
self.is_simulator = self.platform_name.endswith('simulator')
# Check to see if code signing actions should be skipped or not.
if self.is_simulator:
self.codesigning_allowed = False
else:
self.codesigning_allowed = os.environ.get('CODE_SIGNING_ALLOWED') == 'YES'
if self.codesigning_allowed:
platform_prefix = 'iOS'
if self.platform_name.startswith('macos'):
platform_prefix = 'macOS'
entitlements_filename = '%sXCTRunner.entitlements' % platform_prefix
self.runner_entitlements_template = os.path.join(self.project_file_path,
'.tulsi',
'Resources',
entitlements_filename)
self.bazel_executable = None
def Run(self, args):
"""Executes a Bazel build based on the environment and given arguments."""
if self.xcode_action != 'build':
sys.stderr.write('Xcode action is %s, ignoring.' % self.xcode_action)
return 0
parser = _OptionsParser(self.build_settings,
self.sdk_version,
self.platform_name,
self.arch)
timer = Timer('Parsing options', 'parsing_options').Start()
message, exit_code = parser.ParseOptions(args[1:])
timer.End()
if exit_code:
_PrintXcodeError('Option parsing failed: %s' % message)
return exit_code
self.verbose = parser.verbose
self.bazel_bin_path = os.path.abspath(parser.bazel_bin_path)
self.bazel_executable = parser.bazel_executable
self.bazel_exec_root = self.build_settings.bazelExecRoot
# Update feature flags.
features = parser.GetEnabledFeatures()
self.direct_debug_prefix_map = 'DirectDebugPrefixMap' in features
self.normalized_prefix_map = 'DebugPathNormalization' in features
self.build_path = os.path.join(self.bazel_bin_path,
os.environ.get('TULSI_BUILD_PATH', ''))
# Path to the Build Events JSON file uses pid and is removed if the
# build is successful.
filename = '%d_%s' % (os.getpid(), BazelBuildBridge.BUILD_EVENTS_FILE)
self.build_events_file_path = os.path.join(
self.project_file_path,
'.tulsi',
filename)
(command, retval) = self._BuildBazelCommand(parser)
if retval:
return retval
timer = Timer('Running Bazel', 'running_bazel').Start()
exit_code, outputs = self._RunBazelAndPatchOutput(command)
timer.End()
if exit_code:
_Fatal('Bazel build failed with exit code %d. Please check the build '
'log in Report Navigator (⌘9) for more information.'
% exit_code)
return exit_code
post_bazel_timer = Timer('Total Tulsi Post-Bazel time', 'total_post_bazel')
post_bazel_timer.Start()
if not os.path.exists(self.bazel_exec_root):
_Fatal('No Bazel execution root was found at %r. Debugging experience '
'will be compromised. Please report a Tulsi bug.'
% self.bazel_exec_root)
return 404
# This needs to run after `bazel build`, since it depends on the Bazel
# workspace directory
exit_code = self._LinkTulsiWorkspace()
if exit_code:
return exit_code
exit_code, outputs_data = self._ExtractAspectOutputsData(outputs)
if exit_code:
return exit_code
# Generated headers are installed on a thread since we are launching
# a separate process to do so. This gives us clean timings.
install_thread = threading.Thread(
target=self._InstallGeneratedHeaders, args=(outputs,))
install_thread.start()
timer = Timer('Installing artifacts', 'installing_artifacts').Start()
exit_code = self._InstallArtifact(outputs_data)
timer.End()
install_thread.join()
if exit_code:
return exit_code
exit_code, dsym_paths = self._InstallDSYMBundles(
self.built_products_dir, outputs_data)
if exit_code:
return exit_code
if not dsym_paths:
# Clean any bundles from a previous build that can interfere with
# debugging in LLDB.
self._CleanExistingDSYMs()
else:
for path in dsym_paths:
# Starting with Xcode 9.x, a plist based remapping exists for dSYM
# bundles that works with Swift as well as (Obj-)C(++).
#
# This solution also works for Xcode 8.x for (Obj-)C(++) but not
# for Swift.
timer = Timer('Adding remappings as plists to dSYM',
'plist_dsym').Start()
exit_code = self._PlistdSYMPaths(path)
timer.End()
if exit_code:
_PrintXcodeError('Remapping dSYMs process returned %i, please '
'report a Tulsi bug and attach a full Xcode '
'build log.' % exit_code)
return exit_code
# Starting with Xcode 7.3, XCTests inject several supporting frameworks
# into the test host that need to be signed with the same identity as
# the host itself.
if (self.is_test and not self.platform_name.startswith('macos') and
self.codesigning_allowed):
exit_code = self._ResignTestArtifacts()
if exit_code:
return exit_code
# Starting with Xcode 8, .lldbinit files are honored during Xcode debugging
# sessions. This allows use of the target.source-map field to remap the
# debug symbol paths encoded in the binary to the paths expected by Xcode.
#
# This will not work with dSYM bundles, or a direct -fdebug-prefix-map from
# the Bazel-built locations to Xcode-visible sources.
timer = Timer('Updating .lldbinit', 'updating_lldbinit').Start()
clear_source_map = dsym_paths or self.direct_debug_prefix_map
exit_code = self._UpdateLLDBInit(clear_source_map)
timer.End()
if exit_code:
_PrintXcodeWarning('Updating .lldbinit action failed with code %d' %
exit_code)
post_bazel_timer.End(log_absolute_times=True)
return 0
def _BuildBazelCommand(self, options):
"""Builds up a commandline string suitable for running Bazel."""
configuration = os.environ['CONFIGURATION']
# Treat the special testrunner build config as a Debug compile.
test_runner_config_prefix = '__TulsiTestRunner_'
if configuration.startswith(test_runner_config_prefix):
configuration = configuration[len(test_runner_config_prefix):]
elif os.environ.get('TULSI_TEST_RUNNER_ONLY') == 'YES':
_PrintXcodeError('Building test targets with configuration "%s" is not '
'allowed. Please use the "Test" action or "Build for" > '
'"Testing" instead.' % configuration)
return (None, 1)
if configuration not in _OptionsParser.KNOWN_CONFIGS:
_PrintXcodeError('Unknown build configuration "%s"' % configuration)
return (None, 1)
bazel, start_up, build = options.GetBazelOptions(configuration)
bazel_command = [bazel]
bazel_command.extend(start_up)
bazel_command.append('build')
bazel_command.extend(build)
bazel_command.extend([
# The following flags are used by Tulsi to identify itself and read
# build information from Bazel. They shold not affect Bazel anaylsis
# caching.
'--tool_tag=tulsi:bazel_build',
'--build_event_json_file=%s' % self.build_events_file_path,
'--noexperimental_build_event_json_file_path_conversion',
'--aspects', '@tulsi//:tulsi/tulsi_aspects.bzl%tulsi_outputs_aspect'])
if self.is_test and self.gen_runfiles:
bazel_command.append('--output_groups=+tulsi_outputs')
else:
bazel_command.append('--output_groups=tulsi_outputs,default')
bazel_command.extend(options.targets)
extra_options = bazel_options.BazelOptions(os.environ)
bazel_command.extend(extra_options.bazel_feature_flags())
return (bazel_command, 0)
def _RunBazelAndPatchOutput(self, command):
"""Runs subprocess command, patching output as it's received."""
self._PrintVerbose('Running "%s", patching output for workspace root at '
'"%s" with project path at "%s".' %
(' '.join([pipes.quote(x) for x in command]),
self.workspace_root,
self.project_dir))
# Xcode translates anything that looks like ""<path>:<line>:" that is not
# followed by the word "warning" into an error. Bazel warnings and debug
# messages do not fit this scheme and must be patched here.
bazel_warning_line_regex = re.compile(
r'(?:DEBUG|WARNING): ([^:]+:\d+:(?:\d+:)?)\s+(.+)')
def PatchBazelWarningStatements(output_line):
match = bazel_warning_line_regex.match(output_line)
if match:
output_line = '%s warning: %s' % (match.group(1), match.group(2))
return output_line
patch_xcode_parsable_line = PatchBazelWarningStatements
if self.workspace_root != self.project_dir:
# Match (likely) filename:line_number: lines.
xcode_parsable_line_regex = re.compile(r'([^/][^:]+):\d+:')
def PatchOutputLine(output_line):
output_line = PatchBazelWarningStatements(output_line)
if xcode_parsable_line_regex.match(output_line):
output_line = '%s/%s' % (self.workspace_root, output_line)
return output_line
patch_xcode_parsable_line = PatchOutputLine
def HandleOutput(output):
for line in output.splitlines():
_logger.log_bazel_message(patch_xcode_parsable_line(line))
def WatcherUpdate(watcher):
"""Processes any new events in the given watcher.
Args:
watcher: a BazelBuildEventsWatcher object.
Returns:
A list of new tulsiout file names seen.
"""
new_events = watcher.check_for_new_events()
new_outputs = []
for build_event in new_events:
if build_event.stderr:
HandleOutput(build_event.stderr)
if build_event.stdout:
HandleOutput(build_event.stdout)
if build_event.files:
outputs = [x for x in build_event.files if x.endswith('.tulsiouts')]
new_outputs.extend(outputs)
return new_outputs
def ReaderThread(file_handle, out_buffer):
out_buffer.append(file_handle.read())
file_handle.close()
# Make sure the BEP JSON file exists and is empty. We do this to prevent
# any sort of race between the watcher, bazel, and the old file contents.
open(self.build_events_file_path, 'w').close()
# Capture the stderr and stdout from Bazel. We only display it if it we're
# unable to read any BEP events.
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
# Register atexit function to clean up BEP file.
atexit.register(_BEPFileExitCleanup, self.build_events_file_path)
global CLEANUP_BEP_FILE_AT_EXIT
CLEANUP_BEP_FILE_AT_EXIT = True
# Start capturing output from Bazel.
reader_buffer = []
reader_thread = threading.Thread(target=ReaderThread,
args=(process.stdout, reader_buffer))
reader_thread.daemon = True
reader_thread.start()
with io.open(self.build_events_file_path, 'r', -1, 'utf-8', 'ignore'
) as bep_file:
watcher = bazel_build_events.BazelBuildEventsWatcher(bep_file,
_PrintXcodeWarning)
output_locations = []
while process.returncode is None:
output_locations.extend(WatcherUpdate(watcher))
time.sleep(0.1)
process.poll()
output_locations.extend(WatcherUpdate(watcher))
# If BEP JSON parsing failed, we should display the raw stdout and
# stderr from Bazel.
reader_thread.join()
if not watcher.has_read_events():
HandleOutput(reader_buffer[0])
if process.returncode == 0 and not output_locations:
CLEANUP_BEP_FILE_AT_EXIT = False
_PrintXcodeError('Unable to find location of the .tulsiouts file.'
'Please report this as a Tulsi bug, including the'
'contents of %s.' % self.build_events_file_path)
return 1, output_locations
return process.returncode, output_locations
def _ExtractAspectOutputsData(self, output_files):
"""Converts aspect output from paths to json to a list of dictionaries.
Args:
output_files: A list of strings to files representing Bazel aspect output
in UTF-8 JSON format.
Returns:
return_code, [dict]: A tuple with a return code as its first argument and
for its second argument, a list of dictionaries for
each output_file that could be interpreted as valid
JSON, representing the returned Bazel aspect
information.
return_code, None: If an error occurred while converting the list of
files into JSON.
"""
outputs_data = []
for output_file in output_files:
try:
output_data = json.load(open(output_file))
except (ValueError, IOError) as e:
_PrintXcodeError('Failed to load output map ""%s". '
'%s' % (output_file, e))
return 600, None
outputs_data.append(output_data)
return 0, outputs_data
def _InstallArtifact(self, outputs_data):
"""Installs Bazel-generated artifacts into the Xcode output directory."""
xcode_artifact_path = self.artifact_output_path
if not outputs_data:
_PrintXcodeError('Failed to load top level output file.')
return 600
primary_output_data = outputs_data[0]
if 'artifact' not in primary_output_data:
_PrintXcodeError(
'Failed to find an output artifact for target %s in output map %r' %
(xcode_artifact_path, primary_output_data))
return 601
primary_artifact = primary_output_data['artifact']
artifact_archive_root = primary_output_data.get('archive_root')
bundle_name = primary_output_data.get('bundle_name')
# The PRODUCT_NAME used by the Xcode project is not trustable as it may be
# modified by the user and, more importantly, may have been modified by
# Tulsi to disambiguate multiple targets with the same name.
self.bazel_product_name = bundle_name
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = primary_artifact.endswith('.ipa')
is_zip = primary_artifact.endswith('.zip')
if is_ipa or is_zip:
expected_bundle_name = bundle_name + self.wrapper_suffix
# The directory structure within the IPA is then determined based on
# Bazel's package and/or product type.
if is_ipa:
bundle_subpath = os.path.join('Payload', expected_bundle_name)
else:
# If the artifact is a ZIP, assume that the bundle is the top-level
# directory (this is the way in which Skylark rules package artifacts
# that are not standalone IPAs).
bundle_subpath = expected_bundle_name
# Prefer to copy over files from the archive root instead of unzipping the
# ipa/zip in order to help preserve timestamps. Note that the archive root
# is only present for local builds; for remote builds we must extract from
# the zip file.
if self._IsValidArtifactArchiveRoot(artifact_archive_root, bundle_name):
source_location = os.path.join(artifact_archive_root, bundle_subpath)
exit_code = self._RsyncBundle(os.path.basename(primary_artifact),
source_location,
xcode_artifact_path)
else:
exit_code = self._UnpackTarget(primary_artifact,
xcode_artifact_path,
bundle_subpath)
if exit_code:
return exit_code
elif os.path.isfile(primary_artifact):
# Remove the old artifact before copying.
if os.path.isfile(xcode_artifact_path):
try:
os.remove(xcode_artifact_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output file ""%s". '
'%s' % (xcode_artifact_path, e))
return 600
exit_code = self._CopyFile(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
if exit_code:
return exit_code
else:
self._RsyncBundle(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
# When the rules output a tree artifact, Tulsi will copy the bundle as is
# into the expected Xcode output location. But because they're copied as
# is from the bazel output, they come with bazel's permissions, which are
# read only. Here we set them to write as well, so Xcode can modify the
# bundle too (for example, for codesigning).
chmod_timer = Timer('Modifying permissions of output bundle',
'bundle_chmod').Start()
self._PrintVerbose('Spawning subprocess to add write permissions to '
'copied bundle...')
process = subprocess.Popen(['chmod', '-R', 'uga+w', xcode_artifact_path])
process.wait()
chmod_timer.End()
# No return code check as this is not an essential operation.
self._InstallEmbeddedBundlesIfNecessary(primary_output_data)
return 0
def _IsValidArtifactArchiveRoot(self, archive_root, bundle_name):
"""Returns true if the archive root is valid for use."""
if not archive_root or not os.path.isdir(archive_root):
return False
# The archive root will not be updated for any remote builds, but will be
# valid for local builds. We detect this by using an implementation detail
# of the rules_apple bundler: archives will always be transformed from
# <name>.unprocessed.zip (locally or remotely) to <name>.archive-root.
#
# Thus if the mod time on the archive root is not greater than the mod
# time on the on the zip, the archive root is not valid. Remote builds
# will end up copying the <name>.unprocessed.zip but not the
# <name>.archive-root, making this a valid temporary solution.
#
# In the future, it would be better to have this handled by the rules;
# until then this should suffice as a work around to improve build times.
unprocessed_zip = os.path.join(os.path.dirname(archive_root),
'%s.unprocessed.zip' % bundle_name)
if not os.path.isfile(unprocessed_zip):
return False
return os.path.getmtime(archive_root) > os.path.getmtime(unprocessed_zip)
def _InstallEmbeddedBundlesIfNecessary(self, output_data):
"""Install embedded bundles next to the current target's output."""
# In order to find and load symbols for the binary installed on device,
# Instruments needs to "see" it in Spotlight index somewhere on the local
# filesystem. This is only needed for on-device instrumentation.
#
# Unfortunatelly, it does not seem to be possible to detect when a build is
# being made for profiling, thus we can't exclude this step for on-device
# non-profiling builds.
if self.is_simulator or ('embedded_bundles' not in output_data):
return
timer = Timer('Installing embedded bundles',
'installing_embedded_bundles').Start()
for bundle_info in output_data['embedded_bundles']:
bundle_name = bundle_info['bundle_name']
bundle_extension = bundle_info['bundle_extension']
full_name = bundle_name + bundle_extension
output_path = os.path.join(self.built_products_dir, full_name)
# TODO(b/68936732): See if copying just the binary (not the whole bundle)
# is enough to make Instruments work.
if self._IsValidArtifactArchiveRoot(bundle_info['archive_root'],
bundle_name):
source_path = os.path.join(bundle_info['archive_root'], full_name)
self._RsyncBundle(full_name, source_path, output_path)
else:
# Try to find the embedded bundle within the installed main bundle.
bundle_path = self._FindEmbeddedBundleInMain(bundle_name,
bundle_extension)
if bundle_path:
self._RsyncBundle(full_name, bundle_path, output_path)
else:
_PrintXcodeWarning('Could not find bundle %s in main bundle. ' %
(bundle_name + bundle_extension) +
'Device-level Instruments debugging will be '
'disabled for this bundle. Please report a '
'Tulsi bug and attach a full Xcode build log.')
timer.End()
# Maps extensions to anticipated subfolders.
_EMBEDDED_BUNDLE_PATHS = {
'.appex': 'PlugIns',
'.framework': 'Frameworks'
}
def _FindEmbeddedBundleInMain(self, bundle_name, bundle_extension):
"""Retrieves the first embedded bundle found within our main bundle."""
main_bundle = os.environ.get('EXECUTABLE_FOLDER_PATH')
if not main_bundle:
return None
main_bundle_path = os.path.join(self.built_products_dir,
main_bundle)
return self._FindEmbeddedBundle(bundle_name,
bundle_extension,
main_bundle_path)
def _FindEmbeddedBundle(self, bundle_name, bundle_extension, bundle_path):
"""Retrieves the first embedded bundle found within this bundle path."""
embedded_subfolder = self._EMBEDDED_BUNDLE_PATHS.get(bundle_extension)
if not embedded_subfolder:
return None
projected_bundle_path = os.path.join(bundle_path,
embedded_subfolder,
bundle_name + bundle_extension)
if os.path.isdir(projected_bundle_path):
return projected_bundle_path
# For frameworks not in the main app bundle, and possibly other executable
# bundle content in the future, we recurse through every .appex in PlugIns
# to find those frameworks.
#
# This won't support frameworks that could potentially have the same name
# but are different between the app and extensions, but we intentionally
# choose not to handle that case. Xcode build system only supports
# uniquely named frameworks, and we shouldn't confuse the dynamic loader
# with frameworks that have the same image names but different content.
appex_root_path = os.path.join(bundle_path, 'PlugIns')
if not os.path.isdir(appex_root_path):
return None
# Find each directory within appex_root_path and attempt to find a bundle.
# If one can't be found, return None.
appex_dirs = os.listdir(appex_root_path)
for appex_dir in appex_dirs:
appex_path = os.path.join(appex_root_path, appex_dir)
path = self._FindEmbeddedBundle(bundle_name,
bundle_extension,
appex_path)
if path:
return path
return None
def _InstallGeneratedHeaders(self, outputs):
"""Invokes install_genfiles.py to install generated Bazel files."""
genfiles_timer = Timer('Installing generated headers',
'installing_generated_headers').Start()
# Resolve the path to the install_genfiles.py script.
# It should be in the same directory as this script.
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'install_genfiles.py')
args = [path, self.bazel_exec_root]
args.extend(outputs)
self._PrintVerbose('Spawning subprocess install_genfiles.py to copy '
'generated files in the background...')
process = subprocess.Popen(args)
process.wait()
genfiles_timer.End()
def _InstallBundle(self, source_path, output_path):
"""Copies the bundle at source_path to output_path."""
if not os.path.isdir(source_path):
return 0, None
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale bundle ""%s". '
'%s' % (output_path, e))
return 700, None
exit_code = self._CopyBundle(os.path.basename(source_path),
source_path,
output_path)
return exit_code, output_path
def _RsyncBundle(self, source_path, full_source_path, output_path):
"""Rsyncs the given bundle to the given expected output path."""
self._PrintVerbose('Rsyncing %s to %s' % (source_path, output_path))
# rsync behavior changes based on presence of a trailing slash.
if not full_source_path.endswith('/'):
full_source_path += '/'
try:
# Use -c to check differences by checksum, -v for verbose,
# and --delete to delete stale files.
# The rest of the flags are the same as -a but without preserving
# timestamps, which is done intentionally so the timestamp will
# only change when the file is changed.
subprocess.check_output(['rsync',
'-vcrlpgoD',
'--delete',
full_source_path,
output_path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
_PrintXcodeError('Rsync failed. %s' % e)
return 650
return 0
def _CopyBundle(self, source_path, full_source_path, output_path):
"""Copies the given bundle to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
try:
CopyOnWrite(full_source_path, output_path, tree=True)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _CopyFile(self, source_path, full_source_path, output_path):
"""Copies the given file to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
output_path_dir = os.path.dirname(output_path)
if not os.path.exists(output_path_dir):
try:
os.makedirs(output_path_dir)
except OSError as e:
_PrintXcodeError('Failed to create output directory "%s". '
'%s' % (output_path_dir, e))
return 650
try:
CopyOnWrite(full_source_path, output_path)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _UnpackTarget(self, bundle_path, output_path, bundle_subpath):
"""Unpacks generated bundle into the given expected output path."""
self._PrintVerbose('Unpacking %s to %s' % (bundle_path, output_path))
if not os.path.isfile(bundle_path):
_PrintXcodeError('Generated bundle not found at "%s"' % bundle_path)
return 670
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output directory ""%s". '
'%s' % (output_path, e))
return 600
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = bundle_path.endswith('.ipa')
with zipfile.ZipFile(bundle_path, 'r') as zf:
for item in zf.infolist():
filename = item.filename
# Support directories do not seem to be needed by the debugger and are
# skipped.
basedir = filename.split(os.sep)[0]
if basedir.endswith('Support') or basedir.endswith('Support2'):
continue
if len(filename) < len(bundle_subpath):
continue
attributes = (item.external_attr >> 16) & 0o777
self._PrintVerbose('Extracting %s (%o)' % (filename, attributes),
level=1)
if not filename.startswith(bundle_subpath):
_PrintXcodeWarning('Mismatched extraction path. Bundle content '
'at "%s" expected to have subpath of "%s"' %
(filename, bundle_subpath))
dir_components = self._SplitPathComponents(filename)
# Get the file's path, ignoring the payload components if the archive
# is an IPA.
if is_ipa:
subpath = os.path.join(*dir_components[2:])
else:
subpath = os.path.join(*dir_components[1:])
target_path = os.path.join(output_path, subpath)
# Ensure the target directory exists.
try:
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
except OSError as e:
_PrintXcodeError(
'Failed to create target path "%s" during extraction. %s' % (
target_path, e))
return 671
# If the archive item looks like a file, extract it.
if not filename.endswith(os.sep):
with zf.open(item) as src, file(target_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
# Patch up the extracted file's attributes to match the zip content.
if attributes:
os.chmod(target_path, attributes)
return 0
def _InstallDSYMBundles(self, output_dir, outputs_data):
"""Copies any generated dSYM bundles to the given directory."""
# Indicates that our aspect reports a dSYM was generated for this build.
has_dsym = outputs_data[0]['has_dsym']
if not has_dsym:
return 0, None
# Start the timer now that we know we have dSYM bundles to install.
timer = Timer('Installing DSYM bundles', 'installing_dsym').Start()
# Declares the Xcode-generated name of our main target's dSYM.
# This environment variable is always set, for any possible Xcode output
# that could generate a dSYM bundle.
target_dsym = os.environ.get('DWARF_DSYM_FILE_NAME')
if target_dsym:
dsym_to_process = set([(self.build_path, target_dsym)])
# Collect additional dSYM bundles generated by the dependencies of this
# build such as extensions or frameworks.
child_dsyms = set()
for data in outputs_data:
for bundle_info in data.get('embedded_bundles', []):
if not bundle_info['has_dsym']:
continue
# Uses the parent of archive_root to find dSYM bundles associated with
# app/extension/df bundles. Currently hinges on implementation of the
# build rules.
dsym_path = os.path.dirname(bundle_info['archive_root'])
bundle_full_name = (bundle_info['bundle_name'] +
bundle_info['bundle_extension'])
dsym_filename = '%s.dSYM' % bundle_full_name
child_dsyms.add((dsym_path, dsym_filename))
dsym_to_process.update(child_dsyms)
dsyms_found = []
for dsym_path, dsym_filename in dsym_to_process:
input_dsym_full_path = os.path.join(dsym_path, dsym_filename)
output_full_path = os.path.join(output_dir, dsym_filename)
exit_code, path = self._InstallBundle(input_dsym_full_path,
output_full_path)
if exit_code:
_PrintXcodeWarning('Failed to install dSYM "%s" (%s)'
% (dsym_filename, exit_code))
elif path is None:
_PrintXcodeWarning('Could not find a dSYM bundle named "%s"'
% dsym_filename)
else:
dsyms_found.append(path)
timer.End()
return 0, dsyms_found
def _ResignBundle(self, bundle_path, signing_identity, entitlements=None):
"""Re-signs the bundle with the given signing identity and entitlements."""
if not self.codesigning_allowed:
return 0
timer = Timer('\tSigning ' + bundle_path, 'signing_bundle').Start()
command = [
'xcrun',
'codesign',
'-f',
'--timestamp=none',
'-s',
signing_identity,
]
if entitlements:
command.extend(['--entitlements', entitlements])
else:
command.append('--preserve-metadata=entitlements')
command.append(bundle_path)
returncode, output = self._RunSubprocess(command)
timer.End()
if returncode:
_PrintXcodeError('Re-sign command %r failed. %s' % (command, output))
return 800 + returncode
return 0
def _ResignTestArtifacts(self):
"""Resign test related artifacts that Xcode injected into the outputs."""
if not self.is_test:
return 0
# Extract the signing identity from the bundle at the expected output path
# since that's where the signed bundle from bazel was placed.
signing_identity = self._ExtractSigningIdentity(self.artifact_output_path)
if not signing_identity:
return 800
exit_code = 0
timer = Timer('Re-signing injected test host artifacts',
'resigning_test_host').Start()
if self.test_host_binary:
# For Unit tests, we need to resign the frameworks that Xcode injected
# into the test host bundle.
test_host_bundle = os.path.dirname(self.test_host_binary)
exit_code = self._ResignXcodeTestFrameworks(
test_host_bundle, signing_identity)
else:
# For UI tests, we need to resign the UI test runner app and the
# frameworks that Xcode injected into the runner app. The UI Runner app
# also needs to be signed with entitlements.
exit_code = self._ResignXcodeTestFrameworks(
self.codesigning_folder_path, signing_identity)
if exit_code == 0:
entitlements_path = self._InstantiateUIRunnerEntitlements()
if entitlements_path:
exit_code = self._ResignBundle(
self.codesigning_folder_path,
signing_identity,
entitlements_path)
else:
_PrintXcodeError('Could not instantiate UI runner entitlements.')
exit_code = 800
timer.End()
return exit_code
def _ResignXcodeTestFrameworks(self, bundle, signing_identity):
"""Re-signs the support frameworks injected by Xcode in the given bundle."""
if not self.codesigning_allowed:
return 0
for framework in XCODE_INJECTED_FRAMEWORKS:
framework_path = os.path.join(
bundle, 'Frameworks', framework)
if os.path.isdir(framework_path) or os.path.isfile(framework_path):
exit_code = self._ResignBundle(framework_path, signing_identity)
if exit_code != 0:
return exit_code
return 0
def _InstantiateUIRunnerEntitlements(self):
"""Substitute team and bundle identifiers into UI runner entitlements.
This method throws an IOError exception if the template wasn't found in
its expected location, or an OSError if the expected output folder could
not be created.
Returns:
The path to where the entitlements file was generated.
"""
if not self.codesigning_allowed:
return None
if not os.path.exists(self.derived_sources_folder_path):
os.makedirs(self.derived_sources_folder_path)
output_file = os.path.join(
self.derived_sources_folder_path,
self.bazel_product_name + '_UIRunner.entitlements')
if os.path.exists(output_file):
os.remove(output_file)
with open(self.runner_entitlements_template, 'r') as template:
contents = template.read()
contents = contents.replace(
'$(TeamIdentifier)',
self._ExtractSigningTeamIdentifier(self.artifact_output_path))
contents = contents.replace(
'$(BundleIdentifier)',
self._ExtractSigningBundleIdentifier(self.artifact_output_path))
with open(output_file, 'w') as output:
output.write(contents)
return output_file
def _ExtractSigningIdentity(self, signed_bundle):
"""Returns the identity used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Authority')
def _ExtractSigningTeamIdentifier(self, signed_bundle):
"""Returns the team identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'TeamIdentifier')
def _ExtractSigningBundleIdentifier(self, signed_bundle):
"""Returns the bundle identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Identifier')
def _ExtractSigningAttribute(self, signed_bundle, attribute):
"""Returns the attribute used to sign the given bundle path."""
if not self.codesigning_allowed:
return '<CODE_SIGNING_ALLOWED=NO>'
cached = self.codesign_attributes.get(signed_bundle)
if cached:
return cached.Get(attribute)
timer = Timer('\tExtracting signature for ' + signed_bundle,
'extracting_signature').Start()
output = subprocess.check_output(['xcrun',
'codesign',
'-dvv',
signed_bundle],
stderr=subprocess.STDOUT)
timer.End()
bundle_attributes = CodesignBundleAttributes(output)
self.codesign_attributes[signed_bundle] = bundle_attributes
return bundle_attributes.Get(attribute)
def _UpdateLLDBInit(self, clear_source_map=False):
"""Updates ~/.lldbinit-tulsiproj to enable debugging of Bazel binaries."""
# Make sure a reference to ~/.lldbinit-tulsiproj exists in ~/.lldbinit or
# ~/.lldbinit-Xcode. Priority is given to ~/.lldbinit-Xcode if it exists,
# otherwise the bootstrapping will be written to ~/.lldbinit.
BootstrapLLDBInit()
with open(TULSI_LLDBINIT_FILE, 'w') as out:
out.write('# This file is autogenerated by Tulsi and should not be '
'edited.\n')
if clear_source_map:
out.write('settings clear target.source-map\n')
return 0
if self.normalized_prefix_map:
source_map = ('./', self._NormalizePath(self.workspace_root))
out.write('# This maps the normalized root to that used by '
'%r.\n' % os.path.basename(self.project_file_path))
else:
# NOTE: settings target.source-map is different from
# DBGSourcePathRemapping; the former is an LLDB target-level
# remapping API that rewrites breakpoints, the latter is an LLDB
# module-level remapping API that changes DWARF debug info in memory.
#
# If we had multiple remappings, it would not make sense for the
# two APIs to share the same mappings. They have very different
# side-effects in how they individually handle debug information.
source_map = self._ExtractTargetSourceMap()
out.write('# This maps Bazel\'s execution root to that used by '
'%r.\n' % os.path.basename(self.project_file_path))
out.write('settings set target.source-map "%s" "%s"\n' % source_map)
return 0
def _DWARFdSYMBinaries(self, dsym_bundle_path):
"""Returns an array of abs paths to DWARF binaries in the dSYM bundle.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
Returns:
str[]: a list of strings representing the absolute paths to each binary
found within the dSYM bundle.
"""
dwarf_dir = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'DWARF')
dsym_binaries = []
for f in os.listdir(dwarf_dir):
# Ignore hidden files, such as .DS_Store files.
if not f.startswith('.'):
# Append full path info.
dsym_binary = os.path.join(dwarf_dir, f)
dsym_binaries.append(dsym_binary)
return dsym_binaries
def _UUIDInfoForBinary(self, source_binary_path):
"""Returns exit code of dwarfdump along with every UUID + arch found.
Args:
source_binary_path: absolute path to the binary file.
Returns:
(Int, str[(str, str)]): a tuple containing the return code of dwarfdump
as its first element, and a list of strings
representing each UUID found for each given
binary slice found within the binary with its
given architecture, if no error has occcured.
"""
returncode, output = self._RunSubprocess([
'xcrun',
'dwarfdump',
'--uuid',
source_binary_path
])
if returncode:
_PrintXcodeWarning('dwarfdump returned %d while finding the UUID for %s'
% (returncode, source_binary_path))
return (returncode, [])
# All UUIDs for binary slices will be returned as the second from left,
# from output; "UUID: D4DE5AA2-79EE-36FE-980C-755AED318308 (x86_64)
# /Applications/Calendar.app/Contents/MacOS/Calendar"
uuids_found = []
for dwarfdump_output in output.split('\n'):
if not dwarfdump_output:
continue
found_output = re.match(r'^(?:UUID: )([^ ]+) \(([^)]+)', dwarfdump_output)
if not found_output:
continue
found_uuid = found_output.group(1)
if not found_uuid:
continue
found_arch = found_output.group(2)
if not found_arch:
continue
uuids_found.append((found_uuid, found_arch))
return (0, uuids_found)
def _CreateUUIDPlist(self, dsym_bundle_path, uuid, arch, source_maps):
"""Creates a UUID.plist in a dSYM bundle to redirect sources.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
uuid: string representing the UUID of the binary slice with paths to
remap in the dSYM bundle.
arch: the architecture of the binary slice.
source_maps: list of tuples representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with the
paths to Xcode-visible sources used for the purposes of
Tulsi debugging as strings ($1).
Returns:
Bool: True if no error was found, or False, representing a failure to
write when creating the plist.
"""
# Create a UUID plist at (dsym_bundle_path)/Contents/Resources/.
remap_plist = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'%s.plist' % uuid)
# Via an XML plist, add the mappings from _ExtractTargetSourceMap().
try:
with open(remap_plist, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
'<dict>\n'
'<key>DBGSourcePathRemapping</key>\n'
'<dict>\n')
for source_map in source_maps:
# Add the mapping as a DBGSourcePathRemapping to the UUID plist here.
out.write('<key>%s</key>\n<string>%s</string>\n' % source_map)
# Make sure that we also set DBGVersion to 3.
out.write('</dict>\n'
'<key>DBGVersion</key>\n'
'<string>3</string>\n'
'</dict>\n'
'</plist>\n')
except OSError as e:
_PrintXcodeError('Failed to write %s, received error %s' %
(remap_plist, e))
return False
# Update the dSYM symbol cache with a reference to this dSYM bundle.
err_msg = self.update_symbol_cache.UpdateUUID(uuid,
dsym_bundle_path,
arch)
if err_msg:
_PrintXcodeWarning('Attempted to save (uuid, dsym_bundle_path, arch) '
'to DBGShellCommands\' dSYM cache, but got error '
'\"%s\".' % err_msg)
return True
def _CleanExistingDSYMs(self):
"""Clean dSYM bundles that were left over from a previous build."""
output_dir = self.built_products_dir
output_dir_list = os.listdir(output_dir)
for item in output_dir_list:
if item.endswith('.dSYM'):
shutil.rmtree(os.path.join(output_dir, item))
def _PlistdSYMPaths(self, dsym_bundle_path):
"""Adds Plists to a given dSYM bundle to redirect DWARF data."""
# Retrieve the paths that we are expected to remap.
# Always include a direct path from the execroot to Xcode-visible sources.
source_maps = [self._ExtractTargetSourceMap()]
# Remap relative paths from the workspace root.
if self.normalized_prefix_map:
# Take the normalized path and map that to Xcode-visible sources.
source_maps.append(('./', self._NormalizePath(self.workspace_root)))
# Find the binaries within the dSYM bundle. UUIDs will match that of the
# binary it was based on.
dsym_binaries = self._DWARFdSYMBinaries(dsym_bundle_path)
if not dsym_binaries:
_PrintXcodeWarning('Could not find the binaries that the dSYM %s was '
'based on to determine DWARF binary slices to patch. '
'Debugging will probably fail.' % (dsym_bundle_path))
return 404
# Find the binary slice UUIDs with dwarfdump from each binary.
for source_binary_path in dsym_binaries:
returncode, uuid_info_found = self._UUIDInfoForBinary(source_binary_path)
if returncode:
return returncode
# Create a plist per UUID, each indicating a binary slice to remap paths.
for uuid, arch in uuid_info_found:
plist_created = self._CreateUUIDPlist(dsym_bundle_path,
uuid,
arch,
source_maps)
if not plist_created:
return 405
return 0
def _NormalizePath(self, path):
"""Returns paths with a common form, normalized with a trailing slash.
Args:
path: a file system path given in the form of a string.
Returns:
str: a normalized string with a trailing slash, based on |path|.
"""
return os.path.normpath(path) + os.sep
def _ExtractTargetSourceMap(self, normalize=True):
"""Extracts the source path as a tuple associated with the WORKSPACE path.
Args:
normalize: Defines if all paths should be normalized. Preferred for APIs
like DBGSourcePathRemapping and target.source-map but won't
work for the purposes of -fdebug-prefix-map.
Returns:
None: if an error occurred.
(str, str): a single tuple representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with
the paths to Xcode-visible sources used for the purposes
of Tulsi debugging as strings ($1).
"""
# All paths route to the "workspace root" for sources visible from Xcode.
sm_destpath = self.workspace_root
if normalize:
sm_destpath = self._NormalizePath(sm_destpath)
# Add a redirection for the Bazel execution root, the path where sources
# are referenced by Bazel.
sm_execroot = self.bazel_exec_root
if normalize:
sm_execroot = self._NormalizePath(sm_execroot)
return (sm_execroot, sm_destpath)
def _LinkTulsiWorkspace(self):
"""Links the Bazel Workspace to the Tulsi Workspace (`tulsi-workspace`)."""
tulsi_workspace = os.path.join(self.project_file_path,
'.tulsi',
'tulsi-workspace')
if os.path.islink(tulsi_workspace):
os.unlink(tulsi_workspace)
os.symlink(self.bazel_exec_root, tulsi_workspace)
if not os.path.exists(tulsi_workspace):
_PrintXcodeError(
'Linking Tulsi Workspace to %s failed.' % tulsi_workspace)
return -1
@staticmethod
def _SplitPathComponents(path):
"""Splits the given path into an array of all of its components."""
components = path.split(os.sep)
# Patch up the first component if path started with an os.sep
if not components[0]:
components[0] = os.sep
return components
def _RunSubprocess(self, cmd):
"""Runs the given command as a subprocess, returning (exit_code, output)."""
self._PrintVerbose('%r' % cmd, 1)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = process.communicate()
return (process.returncode, output)
def _PrintVerbose(self, msg, level=0):
if self.verbose > level:
_PrintUnbuffered(msg)
def main(argv):
build_settings = bazel_build_settings.BUILD_SETTINGS
if build_settings is None:
_Fatal('Unable to resolve build settings. Please report a Tulsi bug.')
return 1
return BazelBuildBridge(build_settings).Run(argv)
if __name__ == '__main__':
_LockFileAcquire(_LockFileCreate())
_logger = tulsi_logging.Logger()
logger_warning = tulsi_logging.validity_check()
if logger_warning:
_PrintXcodeWarning(logger_warning)
_timer = Timer('Everything', 'complete_build').Start()
signal.signal(signal.SIGINT, _InterruptHandler)
_exit_code = main(sys.argv)
_timer.End()
sys.exit(_exit_code)
|
main.py
|
from pymodbus.client.sync import ModbusTcpClient
import sys
import time
from time import gmtime, strftime
import signal
import threading
import pandas as pd
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
class ProccessControlMonitor(object):
def __init__(self):
e = Engine()
# IP, data_address, slaveid, type, name
# PLC1
e.addInstrument("10.6.0.1", 0, 1, "input_register", "reservoirsensor")
e.addInstrument("10.6.0.1", 0, 1, "coil", "pump1sensor")
e.addInstrument("10.6.0.1", 1, 1, "coil", "valve1sensor")
# PLC2
e.addInstrument("10.6.0.2", 0, 3, "coil", "valve2sensor")
e.addInstrument("10.6.0.2", 1, 3, "coil", "pump2sensor")
# PLC3
e.addInstrument("10.7.0.3", 0, 4, "coil", "valve3sensor")
e.addInstrument("10.7.0.3", 0, 4, "coil", "pump3sensor")
e.addInstrument("10.7.0.3", 0, 4, "input_register", "municipaltanksensor")
# # PLC4
e.addInstrument("10.7.0.4", 0, 2, "input_register", "chlorinetanksensor")
e.addInstrument("10.7.0.4", 0, 2, "coil", "chlorinevalvesensor")
e.addInstrument("10.7.0.4", 1, 2, "coil", "chlorinepumpsensor")
e.addInstrument("10.7.0.4", 1, 2, "input_register", "chlorinatorsensor")
e.start()
# Query the Engine every 2s to get the latest statuses.
while True:
time.sleep(0.5)
for instrument in e.getInstruments():
log.info("{} \t {} \t {} \t {}".format(instrument.ip, instrument.data, instrument.name, instrument.last))
print ""
class MeasuredInstrumnet(object):
def __init__(self, ip, address, unit, type, name):
self.ip = ip
self.address = address
self.unit = unit
self.type = type
self.name = name
self.data = None
self.last = None
def __str__(self):
return "IP: {} Name: {} Value: '{}' Last: {}".format(self.ip, self.name, self.data, self.last)
def setData(self, data):
self.last = strftime("%Y-%m-%d %H:%M:%S", gmtime())
self.data = data
class Engine(object):
def __init__(self, tick = 1 ):
signal.signal(signal.SIGINT , self.sigHandler)
signal.signal(signal.SIGHUP , self.sigHandler)
self.running = True
self.measured_instruments = []
self.tick = tick
self.measurements = []
def sigHandler(self, num, frame):
self.stop()
def start(self):
for measured_instrument in self.measured_instruments:
engine_thread = threading.Thread(target=self.run, args=(measured_instrument,))
engine_thread.daemon = True
engine_thread.start()
# Every 30s write out a trace file.
write_thread = threading.Thread(target=self.writeMeasurements)
write_thread.daemon = True
write_thread.start()
def run(self, measured_instrument):
while self.running:
c = ModbusTcpClient(measured_instrument.ip)
start = time.time()
while not c.connect():
log.debug("Can't connect, retry in 0.5s")
self.measurements.append((time.time(), measured_instrument.ip, measured_instrument.name, ((time.time() - start)*1000), -2))
time.sleep(0.5)
if measured_instrument.type == "input_register":
response = c.read_input_registers(measured_instrument.address, 1, unit=measured_instrument.unit)
measured_instrument.setData(response.registers[0])
if measured_instrument.type == "coil":
response = c.read_coils(measured_instrument.address, 1, unit=measured_instrument.unit)
measured_instrument.setData(response.bits[0])
c.close()
self.measurements.append((time.time(), measured_instrument.ip, measured_instrument.name, ((time.time() - start)*1000), 1))
log.debug("Tick: {}".format(self.tick))
time.sleep(self.tick)
def stop(self):
self.running = False
log.info("Shutting down....")
time.sleep(20) # Wait for all connections
self.flush()
# TODO: Close any modbus connections.
sys.exit(0)
def addInstrument(self, ip, address, unit, type, name):
self.measured_instruments.append(MeasuredInstrumnet(ip, address, unit, type, name))
def getInstruments(self):
return self.measured_instruments
def flush(self):
pd.DataFrame(self.measurements, columns=['datetime', 'ip', 'name', 'latency', 'status']).to_csv("log/measurements-tcp-{}.csv".format(sys.argv[1]))
def writeMeasurements(self):
while self.running:
self.flush()
time.sleep(30)
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit("Needs a mode argument e.g. {} [normal|lossy20]".format(sys.argv[0]))
ProccessControlMonitor()
|
main.py
|
# -----------------------------------------------------------
# Keroline Voice Assistant
#
# This module implements the functionality of the assistant
#
# Text_input() or voice_input is called from app.py then in search_command
# starts searching by keywords and the required method is called from classes
# Commands and Mathematics.
#
# Class Assistant contains parameters that are necessary for correct work of some methods.
# Class Voice contains synthesis and recognition methods
#
# Author: Valerii Tsekhmaistruk
# GitHub: https://github.com/ValeriiTsekhmaistruk/Keroline-Voice-Assistant
# Email: valeriitseh1305@gmail.com
# -----------------------------------------------------------
import os
import sys
import webbrowser
import math
import time
import random
import json
import pyttsx3
import threading
import speech_recognition
from collections import namedtuple
import locale
locale.setlocale(locale.LC_ALL, '')
class Assistant:
# Reading assistant parameters from config.json
with open("config.json", "r", encoding='utf-8') as file:
data = json.load(file)
city = data.get('city')
voice = bool(data.get('voice')) # on/off voice
spotify = data.get('spotify')
telegram = data.get('telegram')
browser = data.get('browser')
office = data.get('office')
answers = {
'hi': ('Здраствуй!', 'Приветствую!', 'Здраствуйте!', 'Доброго времени суток!'),
'bye': ('До встречи!', 'Прощайте!', 'До новых встреч!', 'До свидания!'),
'grat': ('Пожалуйста!', 'Всегда пожалуйста!', 'Обращайтесь!', 'Рада помочь!'),
'execution': ('Выполняю!', 'Прошу!', 'Конечно!')
}
answer = ''
class Voice:
# Speech recognize and synthesize
@staticmethod
def say(string):
# Speech synthesize
def engine_start(text):
if Assistant.voice:
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
else:
return
start = threading.Thread(target=engine_start, args=[string])
start.start()
@staticmethod
def say_num(string):
# Speech synthesis for int and float
if string % 1 != 0:
result = round(string, 2)
split_num = str(result).split('.')
Voice.say(f'{split_num[0]},{split_num[1]}')
else:
Voice.say(int(string))
@staticmethod
def listen():
# Capturing audio from a microphone and speech recognize
mic = speech_recognition.Microphone()
recognizer = speech_recognition.Recognizer()
with mic:
try:
audio = recognizer.listen(mic, 5, 5)
result = recognizer.recognize_google(audio, language="ru-RU").lower()
return result
except speech_recognition.UnknownValueError:
Assistant.answer = 'Повторите пожалуйста'
Voice.say('Повторите пожалуйста')
return ''
except speech_recognition.RequestError:
Assistant.answer = 'Проверьте подключение к интернету'
Voice.say('Проверьте подключение к интернету')
return ''
except speech_recognition.WaitTimeoutError:
Assistant.answer = 'Проверьте микрофон'
Voice.say('Проверьте микрофон')
return ''
class Commands:
@staticmethod
def communication(text):
# Method for communicating with the user
hi_key_words = ('привет', 'приветствую')
bye_key_words = ('пока', 'прощай')
grat_key_words = ('спасибо', 'благодарю')
info_key_words = ('кто', ' ')
command_key_words = ('делаешь', 'умеешь', 'можешь', 'ты')
if bool(set(hi_key_words) & set(text)):
answer = random.choice(Assistant.answers.get('hi'))
Assistant.answer = answer
Voice.say(answer)
elif bool(set(bye_key_words) & set(text)):
answer = random.choice(Assistant.answers.get('bye'))
Voice.say(answer)
sys.exit()
elif bool(set(grat_key_words) & set(text)):
answer = random.choice(Assistant.answers.get('grat'))
Assistant.answer = answer
Voice.say(answer)
elif bool(set(info_key_words) & set(text)):
Assistant.answer = f'Я Кэролайн, ваш личный голосовой ассистент'
Voice.say(f'Я Кэролайн, ваш личный голосовой ассистент')
elif bool(set(command_key_words) & set(text)):
webbrowser.get().open('https://github.com/ValeriiTsekhmaistruk/Keroline-Voice-Assistant')
Assistant.answer = 'Всю информацию о моих возможностях вы можете найти здесь'
Voice.say('Всю информацию о моих возможностях вы можете найти здесь')
@staticmethod
def search_google(text):
# Google search
if len(text) == 1:
Assistant.answer = 'Что именно вы хотите найти?'
Voice.say('Что именно вы хотите найти?')
return
text.pop(0)
search_query = ' '.join(text)
url = 'https://google.com/search?q=' + search_query
answer = random.choice(Assistant.answers.get('execution'))
Assistant.answer = answer
Voice.say(answer)
webbrowser.get().open(url)
@staticmethod
def say_time(text):
# Date and time voice notification
time_words = ('время', 'времени', 'час', 'часов')
date_words = ('дата', 'дату')
if bool(set(time_words) & set(text)):
Assistant.answer = time.strftime('%H:%M', time.localtime())
Voice.say(time.strftime('%H:%M', time.localtime()))
elif bool(set(date_words) & set(text)):
Assistant.answer = time.strftime('%d.%m.%Y', time.localtime())
Voice.say(time.strftime('%d/%m/%Y', time.localtime()))
elif 'день' in text:
Assistant.answer = time.strftime('%A', time.localtime())
Voice.say(time.strftime('%A', time.localtime()))
@staticmethod
def get_weather(text):
# Searches google weather forecast
black_list = ('покажи', 'прогноз', 'в', 'погода', 'погоду', 'погоды', 'пожалуйста', 'мне')
clear_text = list(filter(lambda item: item not in black_list, text)) # deleting words that are in black_list
if len(clear_text) == 0 and Assistant.city == '':
Assistant.answer = 'Укажите город'
Voice.say('Укажите город')
elif len(clear_text) == 0:
answer = random.choice(Assistant.answers.get('execution'))
Assistant.answer = answer
Voice.say(answer)
url = 'https://google.com/search?q=' + f'погода в {Assistant.city}'
webbrowser.get().open(url)
else:
answer = random.choice(Assistant.answers.get('execution'))
Assistant.answer = answer
Voice.say(answer)
url = 'https://google.com/search?q=' + f'погода в {clear_text[0]}'
webbrowser.get().open(url)
@staticmethod
def run_app(text):
# run app to path in config.json
black_list = ('открой', 'запусти', 'пожалуйста', 'будь', 'добра')
clear_text = list(filter(lambda item: item not in black_list, text))
if len(clear_text) == 0:
Assistant.answer = 'Укажите приложение'
Voice.say('Укажите приложение')
return
try:
spotify = ('spotify', Assistant.spotify)
telegram = ('telegram', Assistant.telegram)
browser = ('браузер', Assistant.browser)
office = ('офис', Assistant.office)
apps = (spotify, telegram, browser, office)
for app in apps: # search app by keyword
if clear_text[0] == app[0]:
if app[1] == '':
Assistant.answer = 'Приложение не установленно'
Voice.say('Приложение не установленно')
return
else:
answer = random.choice(Assistant.answers.get('execution'))
Assistant.answer = answer
Voice.say(answer)
os.startfile(app[1])
return
Assistant.answer = 'Приложение не найденно'
Voice.say('Приложение не найденно')
except FileNotFoundError:
Assistant.answer = 'Указан неверный путь к приложению'
Voice.say('Указан неверный путь к приложению')
@staticmethod
def coin(x):
# Coin flipping
flip = round(random.random())
if flip == 1:
Assistant.answer = 'Орёл'
Voice.say('Орёл')
else:
Assistant.answer = 'Решка'
Voice.say('Решка')
@staticmethod
def yes_or_no(x):
# Random answer yes or no
rand_answer = round(random.random())
if rand_answer == 1:
Assistant.answer = 'Да'
Voice.say('Да')
else:
Assistant.answer = 'Нет'
Voice.say('Нет')
class Mathematics:
@staticmethod
def simple_math(text):
# Calculation sum, subtraction, multiplication and division
multi_word = ('умнож', 'умножить', '*', 'х', 'x')
div_word = ('подели', 'раздели', '/')
sum_word = ('плюс', '+')
sub_word = ('минус', '-')
num = list(map(lambda x: x.replace(',', '.'), text))
num = list(map(float, filter(lambda x: x.replace('.', '').isdigit(), num))) # converting str to float
if len(num) == 1:
Assistant.answer = 'Не хватает аргумента'
Voice.say('Не хватает аргумента')
return
if len(num) == 0:
Assistant.answer = 'Укажите аргументы'
Voice.say('Укажите аргументы')
return
result = ''
if bool(set(multi_word) & set(text)):
result = (num[0] * num[1])
elif bool(set(div_word) & set(text)):
if num[1] == 0:
Assistant.answer = 'Делить на 0 нельзя!!!'
Voice.say('Делить на 0 нельзя!')
return
else:
result = (num[0] / num[1])
elif bool(set(sum_word) & set(text)):
result = (num[0] + num[1])
elif bool(set(sub_word) & set(text)):
result = (num[0] - num[1])
if result % 1 == 0:
Assistant.answer = round(result)
else:
Assistant.answer = result
Voice.say_num(result)
@staticmethod
def math_sqrt(text):
# Calculation sqrt
try:
num = list(map(float, filter(lambda x: x.replace('.', '').isdigit(), text)))
result = math.sqrt(num[0])
if result % 1 == 0:
Assistant.answer = round(result)
else:
Assistant.answer = result
Voice.say_num(result)
except IndexError:
Assistant.answer = 'Укажите аргумент'
Voice.say('Укажите аргумент')
return
@staticmethod
def math_exp(text):
# Calculation exp
try:
num = list(map(float, filter(lambda x: x.replace('.', '').isdigit(), text)))
if 'квадрат' in text:
result = (num[0]**2)
elif 'куб' in text:
result = (num[0]**3)
else:
result = (num[0]**num[1])
if result % 1 == 0:
Assistant.answer = round(result)
else:
Assistant.answer = result
Voice.say_num(result)
except IndexError:
Assistant.answer = 'Укажите аргумент'
Voice.say('Укажите аргумент')
return
class KeyWord:
# Keywords and their methods
key_word = namedtuple('key_word', 'word func')
communication = key_word(('привет', 'приветствую', 'пока', 'прощай', 'спасибо', 'благодарю', 'делаешь',
'умеешь', 'можешь', 'кто', 'ты'), Commands.communication)
search_google = key_word(('найди', 'поищи'), Commands.search_google)
say_time = key_word(('время', 'времени', 'час', 'часов', 'дата', 'дату', 'день'), Commands.say_time)
get_weather = key_word(('погода', 'погоду', 'погоды', 'прогноз'), Commands.get_weather)
simple_math = key_word(('+', '-', '*', '/', 'плюс', 'минус', 'умножь', 'умножить',
'подели', 'раздели', 'х', 'x'), Mathematics.simple_math)
math_sqrt = key_word(('корень', 'корень'), Mathematics.math_sqrt)
math_exp = key_word(('степень', 'степени', 'квадрат', 'куб'), Mathematics.math_exp)
run_app = key_word(('включи', 'запусти', 'открой'), Commands.run_app)
coin = key_word(('монетка', 'монетку', 'монета', 'монету'), Commands.coin)
yes_or_no = key_word(('да', 'нет'), Commands.yes_or_no)
key_words = (search_google, simple_math, math_sqrt, math_exp, communication, say_time, get_weather, run_app, coin,
yes_or_no)
def search_command(command):
# Keyword search in string and method call
text = command.lower().split(" ")
if text[0] == '':
return
def search(words, i=0):
if i <= len(words) - 1:
if set(words[i][0]) & set(text):
words[i][1](text)
else:
search(words, i+1)
else:
Assistant.answer = 'Неизвестная команда'
Voice.say('Неизвестная команда')
return
search(KeyWord.key_words)
def input_text(user_command):
# Call search_command
if len(user_command.replace(' ', '')) == 0:
return
search_command(user_command)
return Assistant.answer
def input_voice():
# Speech recognize and call search_command
user_command = Voice.listen()
search_command(user_command)
return Assistant.answer
|
contextutil.py
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import shutil
import signal
import sys
import tempfile
import termios
import threading
import time
import uuid
import zipfile
from contextlib import closing, contextmanager
from queue import Queue
from socketserver import TCPServer
from types import FrameType
from typing import IO, Any, Callable, Iterator, Mapping, Optional, Tuple, Type, Union, cast
from colors import green
from pants.util.dirutil import safe_delete
from pants.util.tarutil import TarFile
class InvalidZipPath(ValueError):
"""Indicates a bad zip file path."""
@contextmanager
def environment_as(**kwargs: Optional[str]) -> Iterator[None]:
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.7'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key: str, val: Optional[str]) -> None:
if val is not None:
os.environ[key] = val
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
def _purge_env() -> None:
# N.B. Without the use of `del` here (which calls `os.unsetenv` under the hood), subprocess
# invokes or other things that may access the environment at the C level may not see the
# correct env vars (i.e. we can't just replace os.environ with an empty dict).
# See https://docs.python.org/3/library/os.html#os.unsetenv for more info.
#
# Wraps iterable in list() to make a copy and avoid issues with deleting while iterating.
for k in list(os.environ.keys()):
del os.environ[k]
def _restore_env(env: Mapping[str, str]) -> None:
for k, v in env.items():
os.environ[k] = v
@contextmanager
def hermetic_environment_as(**kwargs: Optional[str]) -> Iterator[None]:
"""Set the environment to the supplied values from an empty state."""
old_environment = os.environ.copy()
_purge_env()
try:
with environment_as(**kwargs):
yield
finally:
_purge_env()
_restore_env(old_environment)
@contextmanager
def argv_as(args: Tuple[str, ...]) -> Iterator[None]:
"""Temporarily set `sys.argv` to the supplied value."""
old_args = sys.argv
try:
sys.argv = list(args)
yield
finally:
sys.argv = old_args
@contextmanager
def _stdio_stream_as(src_fd: int, dst_fd: int, dst_sys_attribute: str, mode: str) -> Iterator[None]:
"""Replace the given dst_fd and attribute on `sys` with an open handle to the given src_fd."""
if src_fd == -1:
src = open("/dev/null", mode)
src_fd = src.fileno()
# Capture the python and os level file handles.
old_dst = getattr(sys, dst_sys_attribute)
old_dst_fd = os.dup(dst_fd)
if src_fd != dst_fd:
os.dup2(src_fd, dst_fd)
# Open up a new file handle to temporarily replace the python-level io object, then yield.
new_dst = os.fdopen(dst_fd, mode)
is_atty = new_dst.isatty()
setattr(sys, dst_sys_attribute, new_dst)
try:
yield
finally:
try:
if is_atty:
termios.tcdrain(dst_fd)
else:
new_dst.flush()
new_dst.close()
except BaseException:
pass
# Restore the python and os level file handles.
os.dup2(old_dst_fd, dst_fd)
setattr(sys, dst_sys_attribute, old_dst)
@contextmanager
def stdio_as(stdout_fd: int, stderr_fd: int, stdin_fd: int) -> Iterator[None]:
"""Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
The streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
"""
with _stdio_stream_as(stdin_fd, 0, "stdin", "r"), _stdio_stream_as(
stdout_fd, 1, "stdout", "w"
), _stdio_stream_as(stderr_fd, 2, "stderr", "w"):
yield
@contextmanager
def signal_handler_as(
sig: int, handler: Union[int, Callable[[int, FrameType], None]]
) -> Iterator[None]:
"""Temporarily replaces a signal handler for the given signal and restores the old handler.
:param sig: The target signal to replace the handler for (e.g. signal.SIGINT).
:param handler: The new temporary handler.
"""
old_handler = signal.signal(sig, handler)
try:
yield
finally:
signal.signal(sig, old_handler)
@contextmanager
def temporary_dir(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
prefix: Optional[str] = tempfile.template,
) -> Iterator[str]:
"""A with-context that creates a temporary directory.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary directory.
:param cleanup: Whether or not to clean up the temporary directory.
:param permissions: If provided, sets the directory permissions to this mode.
"""
path = tempfile.mkdtemp(dir=root_dir, suffix=suffix, prefix=prefix)
try:
if permissions is not None:
os.chmod(path, permissions)
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
) -> Iterator[str]:
"""A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name
@contextmanager
def temporary_file(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
binary_mode: bool = True,
) -> Iterator[IO]:
"""A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
:param suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param permissions: If provided, sets the file to use these permissions.
:param binary_mode: Whether file opens in binary or text mode.
"""
mode = "w+b" if binary_mode else "w+" # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions)
yield fd
finally:
if cleanup:
safe_delete(fd.name)
@contextmanager
def safe_file(path: str, suffix: Optional[str] = None, cleanup: bool = True) -> Iterator[str]:
"""A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param suffix: Use this suffix to create the copy. Otherwise use a random string.
:param cleanup: Whether or not to clean up the copy.
"""
safe_path = f"{path}.{(suffix or uuid.uuid4())}"
if os.path.exists(path):
shutil.copy(path, safe_path)
try:
yield safe_path
if cleanup:
shutil.move(safe_path, path)
else:
shutil.copy(safe_path, path)
finally:
if cleanup:
safe_delete(safe_path)
@contextmanager
def pushd(directory: str) -> Iterator[str]:
"""A with-context that encapsulates pushd/popd."""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def open_zip(path_or_file: Union[str, Any], *args, **kwargs) -> Iterator[zipfile.ZipFile]:
"""A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`.
:raises: `InvalidZipPath` if path_or_file is invalid.
:raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file.
"""
if not path_or_file:
raise InvalidZipPath(f"Invalid zip location: {path_or_file}")
if "allowZip64" not in kwargs:
kwargs["allowZip64"] = True
try:
zf = zipfile.ZipFile(path_or_file, *args, **kwargs)
except zipfile.BadZipfile as bze:
# Use the realpath in order to follow symlinks back to the problem source file.
raise zipfile.BadZipfile(f"Bad Zipfile {os.path.realpath(path_or_file)}: {bze}")
try:
yield zf
finally:
zf.close()
@contextmanager
def open_tar(path_or_file: Union[str, Any], *args, **kwargs) -> Iterator[TarFile]:
"""A with-context for tar files. Passes through positional and kwargs to tarfile.open.
If path_or_file is a file, caller must close it separately.
"""
(path, fileobj) = (
(path_or_file, None) if isinstance(path_or_file, str) else (None, path_or_file)
)
kwargs["fileobj"] = fileobj
with closing(TarFile.open(path, *args, **kwargs)) as tar:
# We must cast the normal tarfile.TarFile to our custom pants.util.tarutil.TarFile.
typed_tar = cast(TarFile, tar)
yield typed_tar
class Timer:
"""Very basic with-context to time operations.
Example usage:
>>> from pants.util.contextutil import Timer
>>> with Timer() as timer:
... time.sleep(2)
...
>>> timer.elapsed
2.0020849704742432
"""
def __init__(self, clock=time) -> None:
self._clock = clock
def __enter__(self) -> "Timer":
self.start: float = self._clock.time()
self.finish: Optional[float] = None
return self
@property
def elapsed(self) -> float:
end_time: float = self.finish if self.finish is not None else self._clock.time()
return end_time - self.start
def __exit__(self, typ, val, traceback):
self.finish = self._clock.time()
@contextmanager
def exception_logging(logger: logging.Logger, msg: str) -> Iterator[None]:
"""Provides exception logging via `logger.exception` for a given block of code.
:param logger: The `Logger` instance to use for logging.
:param msg: The message to emit before `logger.exception` emits the traceback.
"""
try:
yield
except Exception:
logger.exception(msg)
raise
@contextmanager
def maybe_profiled(profile_path: Optional[str]) -> Iterator[None]:
"""A profiling context manager.
:param profile_path: The path to write profile information to. If `None`, this will no-op.
"""
if not profile_path:
yield
return
import cProfile
profiler = cProfile.Profile()
try:
profiler.enable()
yield
finally:
profiler.disable()
profiler.dump_stats(profile_path)
view_cmd = green(
"gprof2dot -f pstats {path} | dot -Tpng -o {path}.png && open {path}.png".format(
path=profile_path
)
)
logging.getLogger().info(
f"Dumped profile data to: {profile_path}\nUse e.g. {view_cmd} to render and view."
)
@contextmanager
def http_server(handler_class: Type) -> Iterator[int]:
def serve(port_queue: "Queue[int]", shutdown_queue: "Queue[bool]") -> None:
httpd = TCPServer(("", 0), handler_class)
httpd.timeout = 0.1
port_queue.put(httpd.server_address[1])
while shutdown_queue.empty():
httpd.handle_request()
port_queue: "Queue[int]" = Queue()
shutdown_queue: "Queue[bool]" = Queue()
t = threading.Thread(target=lambda: serve(port_queue, shutdown_queue))
t.daemon = True
t.start()
try:
yield port_queue.get(block=True)
finally:
shutdown_queue.put(True)
t.join()
|
spam.py
|
import requests
import csv
import random
import datetime
import threading
names = []
with open("data/names.csv", newline='') as f:
for l in csv.reader(f, delimiter=','):
names.append(l[0])
passwords = []
with open("data/passwords.csv", newline='') as f:
for l in csv.reader(f, delimiter=','):
passwords.append(l[0])
user_agents = []
with open("data/user-agents.csv", newline='') as f:
for l in csv.reader(f, delimiter=','):
user_agents.append(l[0])
url = 'https://supportdanmarkstekniskeuniversitet.weebly.com/ajax/apps/formSubmitAjax.php'
cookies = {
'language': 'en',
}
proxies = {
"http": '5.186.155.151:21520',
"http": '78.156.109.221:14003',
"http": '176.20.154.49:10789',
"http": '37.97.23.100:63642',
"http": '176.20.154.49:17613',
"http": '95.154.20.222:26414',
"http": '95.154.20.222:22134',
"http": '37.97.23.100:17398',
"http": '95.154.20.222:33145',
"http": '84.238.33.155:10200',
"http": '5.186.155.151:53135',
"http": '5.186.155.151:22988',
"http": '37.97.23.100:61737',
"http": '95.154.20.222:19173',
"http": '176.20.154.49:47478',
"http": '95.154.20.222:31318',
"http": '176.20.154.49:57368',
"http": '176.20.154.49:52353',
"http": '178.157.228.96:19547',
"http": '176.20.154.49:52678',
"http": '5.103.137.240:1080',
"http": '185.89.43.41:1085',
"http": '93.176.85.240:39309',
"http": '93.164.33.114:41028',
"http": '185.89.42.53:1085',
"http": '5.103.139.93:1080',
"http": '185.89.42.91:1085',
"http": '89.239.212.208:1080',
"http": '45.159.115.62:1080',
"http": '80.63.107.91:4145',
"http": '185.89.43.225:1085',
"http": '45.159.115.60:1080',
"http": '185.89.42.102:1085',
"http": '77.68.237.158:47566',
"http": '93.163.52.152:4145',
"http": '87.60.31.9:4145',
"http": '93.167.67.69:4145',
"http": '185.89.42.47:1085',
}
def chill():
counter = 0
while True:
counter+=1
f_name = random.choice(names)
# Generate random ID
s_id = "s" + random.choice(["19", "20", "21"]) + str(random.randrange(1000,9999))
# Generate random birthday date
start_date = datetime.date(1975, 5, 4)
end_date = datetime.date(2001, 8, 6)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
birthday = start_date + datetime.timedelta(days=random_number_of_days)
#Generate random email
email = f'{f_name.lower()}{random.randrange(0, 99)}@{random.choice(["gmail.com", "outlook.com", "hotmail.com", "live.com", "sapo.pt", "connectdenmark.com", "forum.dk", "jubiipost.dk", "yahoo.com"])}'
email_dtu = s_id + "@student.dtu.dk"
files = {
'_u775833441838287881': (None, email_dtu ),
'_u127872288731110885': (None, random.choice(passwords)),
'_u698974889672718481': (None, email),
'_u187973938768131043': (None, random.choice(passwords)),
'_u979731754128240284': (None, str(birthday)),
'wsite_subject': (None, ''),
'form_version': (None, '2'),
'wsite_approved': (None, 'approved'),
'ucfid': (None, '516641276899099204'),
'recaptcha_token': (None, ''),
}
headers = {
'User-Agent': random.choice(user_agents)
}
response = requests.post(url, headers=headers, cookies=cookies, files=files ,proxies=proxies)
print(counter)
def send_them_to_mars():
NUMBER_OF_THREADS = 50
threads = []
for i in range(NUMBER_OF_THREADS):
t = threading.Thread(target=chill)
t.daemon = True
threads.append(t)
for i in range(NUMBER_OF_THREADS):
threads[i].start()
for i in range(NUMBER_OF_THREADS):
threads[i].join()
chill()
#send_them_to_mars()
|
main_class_based_backup.py
|
#!/usr/bin/python
import time
import threading
import time
import nmap
import multiprocessing
import os
import sys
import ConfigParser
#import mysql.connector
import MySQLdb
import atexit
import IPtable
import texttable as tt
import Simple_Logger
r = '\033[31m' #red
b = '\033[34m' #blue
g = '\033[32m' #green
y = '\033[33m' #yellow
m = '\033[34m' #magenta
c = '\033[36m' #magenta
e = '\033[0m' #end
def test():
print "\n\n\n Exiting Bye Bye !!!"
atexit.register(test)
class NmapScan:
def __init__(self):
self.IP=""
self.PORT=None
self.SWITCH=""
self.CURRENT_PROJECT_ID=""
self.takescan=""
self.N=4
self.Port_Divisior=7500
self.Pause_Flag=False
self.Stop_Flag=False
self.ipcount=0
self.IPtable=IPtable.IPtable()
self.method_id="INIT"
self.Thread_pool=[]
self.retry_count=0
self.max_retries=3
self.simple_logger=Simple_Logger.SimpleLogger()
self.lock=threading.Lock()
self.folder_name=os.path.join("Results","Data_")
def generate_Error_log(status,ipx,portx,pid):
try:
print "Logged exception"
'''self.data_path=self.folder_name+str(self.pid)
error_file=str(project_id)+"_error.txt"
error_file_path = os.path.join(self.data_path, error_file)
self.lock.acquire()
simple_logger.log(error_file_path,"Error -->,Status:Error Complete,Host :"+str(ipx)+",Port:"+str(portx)+",Project id :"+str(pid)+"\n")
self.lock.release()'''
except Exception ,ee:
print "Exception while writing to error file :"+str(ee)
def portscanner(self,ipx,portx): #switch,current_project_id
nm=nmap.PortScanner()
try:
if portx=="top_ports":
nm.scan(ipx,None,self.SWITCH)
else:
nm.scan(ipx,portx,self.SWITCH)
except Exception ,ex:
self.seperator()
print r+"\n\nEXCEPTION in nmap built in utiliry--> "+str(ex) +e
self.seperator()
self.seperator()
print g+"\n\nRe-attempts made on this record :"+str(self.retry_count)+e
self.seperator()
self.retry_count =self.retry_count+1
if (self.retry_count < self.max_retries):
print g+"\n\nRe-attemting for the failed record"+e
self.IPtable.UpdateStatus('incomplete',ipx,portx,int(self.CURRENT_PROJECT_ID))
else:
print g+"\n\nMax re attempts exceeded - Updating status to ERror-complete"+e
print r+"\n\nPlease see the error log for further details.IT would mention the host for which the nmap module failed"+e
self.IPtable.UpdateStatus('error-complete',ipx,portx,int(self.CURRENT_PROJECT_ID))
self.generate_Error_log('error-complete',ipx,portx,int(self.CURRENT_PROJECT_ID))
return 0
try:
temp=nm.scanstats()['uphosts']
if (int(temp) != 0):
host=ipx
if 'tcp' in nm[host].all_protocols():
self.seperator()
print "Result for IP : " + host
print('Protocol : TCP' )
for kk in nm[host]['tcp'].keys():
if (nm[host]['tcp'][kk]['name'])=='':
nm[host]['tcp'][kk]['name']='unknown'
lport = nm[ipx]['tcp'].keys()
lport.sort()
for port in lport:
print b+'port : ' +y+str(port) + ' \t ' + g+ nm[host]['tcp'][port]['state'] +' \t' +r +'' + nm[host]['tcp'][port]['name'] +e
self.seperator()
sd=nm.csv()
#print "Reached at update point "
try :
self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update "+str(ee))
print "EXception Update main "+str(ee)
if 'udp' in nm[host].all_protocols():
self.seperator()
#self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
print "Result for IP : " + host
print('Protocol : UDP' )
lport = nm[ipx]['udp'].keys()
lport.sort()
for kk in nm[host]['tcp'].keys():
if (nm[host]['udp'][kk]['name'])=='':
nm[host]['tcp'][kk]['name']='unknown'
for port in lport:
print b+'port : ' +y+str(port) + ' \t ' + g+ nm[host]['udp'][port]['state'] +' \t' +r +'' + nm[host]['udp'][port]['name'] +e
self.seperator()
sd=nm.csv()
try :
self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
print "EXception Update main "+str(ee)
self.print_Log("Exception in update "+str(ee))
status="complete"
#print "\n\n\n!!!Completed!!! Ip : "+ipx+"\n\n\n -Protocols ---> "+str(nm[host].all_protocols())+"\n\n"
try :
self.IPtable.UpdateStatus(status,ipx,portx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update status "+str(ee))
else:
statuss="host-down"
try :
self.IPtable.UpdateStatus(statuss,ipx,portx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update status host-down "+str(ee))
except Exception,exc:
self.print_Log("Parent exception : "+str(exc))
def ThreadEnd(self,ipl):
print "\n\nThread ended with host ip -"+str(ipl)+"\n\n"
#startProcessing(1)
def simplescanner(self,ipl):
self.method_id="Simple scanner"
self.print_Log("Started Simple acanner")
stport=0
lsport=0
port_list=[]
process_list=[]
try :
port_list=self.IPtable.getPorts(str(ipl),self.CURRENT_PROJECT_ID)
if(port_list):
for port in port_list:
fport=str(port[0]) #fport=1 -5001
#print "\n\nFport is :"+fport +" IP :" +str(ipl) +"id :" +str(self.CURRENT_PROJECT_ID)
time.sleep(10)
try :
self.IPtable.UpdateStatus('processing',ipl,fport,int(self.CURRENT_PROJECT_ID))
except Exception, ee:
print "EXception 13.01 : " +str(ee)
tp=multiprocessing.Process(target=self.portscanner,args=(ipl,fport)) #
process_list.append(tp)
tp.start()
#print "\n\nStarted subprocess for ip " +str(ipl) +" and port "+ str(port) +" and Process : "+str(tp)
for process_ in process_list:
process_.join()
print "\n\n Finished subprocess for ip " +str(ipl) +" and Process : "+str(process_)
else:
#print "The current ip address has all its ports scanned -->Must have not been there" +str(ipl)
self.print_Log("Some exception-->The current ip address has all its ports scanned -->Must have not been there" +str(ipl))
self.print_Log("Ended Simple acanner")
except Exception ,ee:
print "EXception 11" +str(ee)
self.print_Log("Exception inSimpleScanner-->"+str(ee))
self.ThreadEnd(ipl)
def topport_scan(self,ipls,portl): #this would be invoked if the given port list would be empty such that only the top ports would be scanned
tp=multiprocessing.Process(target=portscanner,args=(ipls,"top_ports"))
tp.start()
tp.join()
def getBulkInsertList_(self,start,end,iplist):
#print "About to make bulk enteries - #Ip:"+ str(len(iplist) )
BulkList=[]
counter=1
#global P
for ip in iplist:
x=int(start)
pnum=end-start+1 #First port number in the sequence say 1-10023 is the range ->pnum =10023
r=pnum%self.Port_Divisior #r = 10023 % 5000 --> r=23
q=pnum//self.Port_Divisior # Floor division ->q=quetient= 10023/5000 => 2.004 ,since floor ,thus q=2
check=q*self.Port_Divisior #check =2*5000 =>10,000
#x=int(start) #x=1
ip_list=[]
while check>0: #(1) check=10000 >0 (2) check=5000 > 0
for tport in range(x,x+self.Port_Divisior,self.Port_Divisior):
fport=str(tport)+'-' +str(tport+self.Port_Divisior) #fport=1 -5001
BulkList.append((self.CURRENT_PROJECT_ID,ip,fport,'incomplete'))
x=x+self.Port_Divisior
check=check-self.Port_Divisior # (A) 1 --> check=5000 , (B) 1 --> check =0
counter=counter+1
#By this time 1-10,000 ports would be scanned .The idea is to scan 5000 ports at 1 time.
#The number of ports left are 23
check=q*self.Port_Divisior #check =10,000
#print "\n\n\n\n check is "+str(check )+" Pnum is "+str(pnum)+"\n\n\n\n"
if check < end :
if pnum!=0 : #pnum=10023
print "Scanning remaining ports"
prange=str(start+check)+"-"+str(start+check+r-1) #prange= (100001-10,0023) -->Thus the remaining 23 ports are ranged out
print "Range is :"+ prange+"\n\n\n"
BulkList.append((self.CURRENT_PROJECT_ID,ip,prange,'incomplete'))
print "\n\nLoop executed : "+str(counter)
return BulkList;
def getBulkInsertList(self,all_ports,iplist):
print "(1)--About to make bulk enteries - #Ip:"+ str(len(iplist))
BulkList=[]
if (all_ports == None) :
print "in if(1)"
all_Ports_="top_ports"
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_Ports_,'incomplete'))
elif "-" in all_ports:
print "in elif(1)"
tlist=all_ports.split('-') #Split them and the list would be stored in variable named tlist
stport=int(tlist[0]) #First port
lsport=int(tlist[1])
if ((lsport-stport)< 5000):
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_ports,'incomplete'))
else :
BulkList=self.getBulkInsertList_(stport,lsport,iplist)
else :
print "in else"
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_ports,'incomplete'))
#print "\n\nBulk List is \n\n"
#print BulkList
return BulkList
def multiscan(self,start,end,ipls): #This would be invokd when the number of ports per host to be scanned exceed 5000
pnum=end-start+1 #First port number in the sequence say 1-10023 is the range ->pnum =10023
r=pnum%5000 #r = 10023 % 5000 --> r=23
q=pnum//5000 # Floor division ->q=quetient= 10023/5000 => 2.004 ,since floor ,thus q=2
check=q*5000 #check =2*5000 =>10,000
x=int(start) #x=1
while check>0: #(1) check=10000 >0 (2) check=5000 > 0
for tport in range(x,x+5000,5000):
fport=str(tport)+'-' +str(tport+5000) #fport=1 -5001
tp=multiprocessing.Process(target=portscanner,args=(ipls,fport))
tp.start()
#tp.join()
x=x+5000 # (A) 1 --> x=5001 -->It will break from this loop (B) 1 --> x=10,001 -->it shall break the loop
# print "Scan from " + str(tport) + " till " + str(tport+5000)+ " Done"
check=check-5000 # (A) 1 --> check=5000 , (B) 1 --> check =0
#By this time 1-10,000 ports would be scanned .The idea is to scan 5000 ports at 1 time.
#The number of ports left are 23
check=q*5000 #check =10,000
if pnum!=0: #pnum=10023
# print "Scanning remaining ports"
prange=str(start+check)+"-"+str(start+check+r-1) #prange= (100001-10,0023) -->Thus the remaining 23 ports are ranged out
# print prange
tp=multiprocessing.Process(target=portscanner,args=(ipls,prange)) #Finally invoking the cpode portscanner for remaining 23 ports with range (10,001 -10,023)
tp.start()
#tp.join()
def singlescan(self,start,end,ipls):
#print "Single Scan"
prange=str(start)+"-"+str(end)
tp=multiprocessing.Process(target=portscanner,args=(ipls,prange))
tp.start()
tp.join()
def numofips(self,iprange): #Converts CIDR notation as simple list
scanner=nmap.PortScanner()
IPlist=scanner.listscan(iprange)
return IPlist #Thus this wosuld be a list of IP addres
def banner(self,):
print g+" ################################################################# "+e
print g+" ###"+r+" __ "+g+"### "+e
print g+" ###"+r+" /\ \ \_ __ ___ __ _ _ __ "+g+"### "+e
print g+" ###"+r+" / \/ / '_ ` _ \ / _` | '_ \ "+g+"### "+e
print g+" ###"+r+"/ /\ /| | | | | | (_| | |_) | "+g+"### "+e
print g+" ###"+r+"\_\ \/ |_| |_| |_|\__,_| .__/ "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+r+" _ _ "+g+"### "+e
print g+" ###"+r+" /_\ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ "+g+"### "+e
print g+" ###"+r+" //_\\| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ "+g+"### "+e
print g+" ###"+r+"/ _ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | "+g+"### "+e
print g+" ###"+r+"\_/ \_/\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| "+g+"### "+e
print g+" ###"+r+" "+g+"### "+e
print g+" ###"+r+" __ _ _ "+g+"### "+e
print g+" ###"+r+"/ _\ ___ _ __(_)_ __ | |_ "+g+"### "+e
print g+" ###"+r+"\ \ / __| '__| | '_ \| __| "+g+"### "+e
print g+" ###"+r+"_\ \ (__| | | | |_) | |_ "+g+"### "+e
print g+" ###"+r+"\__/\___|_| |_| .__/ \__| "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+b+" Written by: M$P@T3L "+g+"### "+e
print g+" ################################################################# "+e
def seperator(self):
print r+ "----------------------------------------------" +e
def create_schema(self):
with open(schema_file, 'rt') as f:
schema = f.read()
conn.executescript(schema)
def prompt_project(self):
projectname=raw_input(b+"What is your Project name(no white spaces)? \n>"+y)
return projectname
def prompt_ips(self):
ips=raw_input(b+"Type the IP range: \n>"+y)
IP=ips
return ips
def prompt_ports(self):
ports=raw_input(b+"Enter the Port number or Ports range: \n>"+y)
#global PORT
if ports == "":
self.PORT=None
elif(ports=="*"):
self.PORT="1-65535"
else:
self.PORT=ports
return self.PORT
def print_Log(self,message):
print str(message)
def print_Error(self,message):
print str(message)
def db_projectname(self,projectname_db,IP_range,Port_range): # Store the project name and return the auto generated id
self.method_id="db_projectname"
self.print_Log("Method started")
print "Hello"
time.sleep(10)
try :
pid=self.IPtable.Insert(projectname_db,IP_range,Port_range)
if (pid !=-1):
self.CURRENT_PROJECT_ID=pid
else:
self.print_Log("Some error occured while storing !!" +str(pid))
self.print_Log("Method ended")
except Exception ,ee :
self.print_Error( "Exception in db_projectname "+str(ee))
#print self.CURRENT_PROJECT_ID
#print cursor.lastrowid
def scanbanner(self):
cp=ConfigParser.RawConfigParser() #parses config files
cppath="nmap.cfg" #This is the config file to be read.The config file would have various sections.Each section would be in [sq] beakets.each section would be having key/val pairs as conf setting options
cp.read(cppath) #Read the current file nmap.cfg.The file has got only 1 section given as :[Scantype]
#global self.SWITCH
#global self.takescan
print b+"SELECT THE TYPE OF SCAN: "
self.seperator()
print y+"1). Intense Scan"
print "2). Intense + UDP Scan"
print "3). Intense + TCP full Scan"
print "4). Intense + No Ping Scan"
print "5). TCP Ping Scan"
print "6). PCI Ping Sweep"
print "7). PCI full ports TCP"
print "8). PCI Top 200 UDP"
print "9). PCI Top 100 UDP"
print "10). PCI Top 1000 TCP"
self.takescan=raw_input(b+"Select the type of Scan:\n>"+y)
if self.takescan=="1":
self.SWITCH=cp.get('Scantype','Intense')
elif self.takescan == "2":
self.SWITCH=cp.get('Scantype','Intense_UDP') #-sU -T4 -A -n
elif self.takescan == "3":
self.SWITCH=cp.get('Scantype','Intense_TCPall') #-sS -T4 -A -n--max-rtt-timeout 500ms
elif self.takescan == "4":
self.SWITCH=cp.get('Scantype','Intense_NoPing') #T4 -A -v -Pn -n
elif self.takescan == "5":
self.SWITCH=cp.get('Scantype','Ping') #-PS
elif self.takescan == "6":
self.SWITCH=cp.get('Scantype','PCI_Ping_Sweep') #-PE -n -oA
elif self.takescan == "7":
self.SWITCH=cp.get('Scantype','PCI_Full_ports_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 1000ms --top-ports 1000
elif self.takescan == "8":
self.SWITCH=cp.get('Scantype','PCI_Top_200_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 200
elif self.takescan == "9":
self.SWITCH=cp.get('Scantype','PCI_Top_100_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 100
elif self.takescan == "10":
self.SWITCH=cp.get('Scantype','PCI_Top_1000_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 500ms
else:
print "Invalid value supplied"
print "Using Default(1)"
self.SWITCH=cp.get('Scantype','Intense')
def prompt_ProjectID(self): #would prompt the user with paused projects -->status=incomplete or paused in projects table
print "\n"
tab = tt.Texttable()
x = [[]] #multi dimension array
cursor=self.IPtable.getPausedScans()
if cursor:
print r+"List of Project with IDs"+e +"\n"
for row in cursor:
x.append([str(row[0]),str(row[1])]) #Place details in the array to display later
tab.add_rows(x) #thus the table would have all rows and 2 columns
tab.set_cols_align(['r','r'])
tab.header(['IDs','PROJECT_NAME']) #setting heder details for col
print tab.draw() #this would draw the table on the console
print "\n"
id_ = raw_input(b+"Enter The Project Id For Scanning :"+e)
try :
if(int(id_)):
return id_
except :
print "Exception 6-->Invalid Value"
return ""
else:
print "\n\nNo incomplete Projects\n\n";
time.sleep(1);
self.main()
def prompt_ScanType(self):
scanType=raw_input(b+"Enter Your choice: \n"+y +"\n(1) For Launching New Scan \n(2) For Launching Paused Scans\n "+e)
try:
if((int(scanType)<1)or(int(scanType) >2)):
return 1;
else :
return scanType;
except :
return 1;
def getHostPort(self,project_id):
try:
self.method_id="getHostPort()-->main"
self.print_Log("Started")
project_data=[]
project_data=self.IPtable.getHostPort(project_id)
self.method_id="getHostPort()-->main"
self.print_Log("Ended")
return project_data
except Exception ,ee:
print "Exception 14" +str(ee)
self.print_Error("Exception --getHostPort--"+str(ee))
return 0;
def launch_PausedScan(self,project_id):
print "Reached Here in Launch Paused Scan !!!\n";
self.method_id="LaunchPausedScan()"
self.print_Log( "Started Launch Paused ")
success=self.IPtable.MakeUpdate(project_id)
if(success==1):
self.startProcessing(self.N)
elif(success==2): #when its paused b4 making bulk entries
port_host=self.getHostPort(project_id)
if(port_host):
ip_range=port_host[0]
port_range=port_host[1]
listip=self.numofips(ip_range)
BulkEntries=self.makeBulkEnteries(listip,port_range)
#global N
self.startProcessing(self.N)
else:
print "The given project id is not present in Database :-->Kindly recheck "
self.print_Log("The given project id is not present in Database :-->Kindly recheck ")
else:
print "\n\nThe update method for status= incomplete has exception \n\n"
self.print_Log("The update method for status= incomplete has exception ")
def stop_all(self):
os._exit()
def makeBulkEnteries(self,all_hosts,all_ports):
#print "In here !!1"
self.method_id="makeBulkEntries()"
self.print_Log("Started")
BulkList=[]
if 1:
BulkList=self.getBulkInsertList(all_ports,all_hosts)
self.method_id="makeBulkEntries()"
self.method_id="makeBulkEntries"
try:
status=self.IPtable.InsertAll(BulkList)
self.method_id="makeBulkEntries()"
if (status != 1):
print "Some error occured while bulk insertion"
except Exception ,ee :
print "EXception 9 "+str(ee)
self.print_Error("EXception make Bulk entries --> "+str(ee))
self.print_Log("Ended")
return BulkList;
def getAllDistinctHosts(self,n):
try :
self.method_id="getAllDistinctHost()"
self.print_Log("started")
iplist=[]
iplist=self.IPtable.DistinctHosts(self.CURRENT_PROJECT_ID,int(n))
self.method_id="getAllDistinctHost()"
self.print_Log("Ended")
return iplist
except Exception ,ee :
print "Exception 10 " +str (ee)
self.print_Error("Exception "+str(ee))
return 0
def start_Polling(self):
try:
stop_db_poll=False #use this logic to stop unnecessary db poll when all hosts finish
#global N
while 1:
time.sleep(5)
active_threads=threading.enumerate()
counter=len(active_threads)
print self.seperator()
print "Polling \n Threads remaining are :"+str(active_threads)+"\n"
print self.seperator()
#if some thread might die-->processing or lets say that initially all rec have status as incomplete and the parent thread would be the polling thread.The status is changed to be processing by the threads that are started by the parent thread.Say for some reason the parent thread would start a thread ,but it might not be scheduled by the scheduler ,and the polling thread would be running asynchronously,the polling thread would immidiately detect the thread count to be =1 as the child threads would have not been scheduled yet ,thus the status would also not be as processing...it would show to be of type incomplete--->thus keeping this condition at head its importent to check herethat if the thread count =1-->main thread only then there should be no record with status as incomplete or processing.Now lets say a person has intentionally paused the scan ,then in that case the project-table would show the status as paused and iptable might contain both entries as processing and incomplete.That use case would be ignored and the scan would come to end
if(counter==1):
status=self.IPtable.checkStatus(self.CURRENT_PROJECT_ID)
if(status):
processing_status=status[0]
pause_status=status[1]
if((processing_status) and (not (pause_status))):#will just check once
print "Still left with some hosts that display status as processing or incomplete "
time.sleep(10)#the reason for this delay is suppose some thread is fired but not scheduled yet and thus the status would show as incomplete and if we immidiately statprocessing,then 2 threads might point to 1 record
self.startProcessing(self.N)
#print "Main Thread--->Again Starting pooling in 50 sec :"
time.sleep(50)
else:
print "Active Threads are only 1 --Scan about to finish --Threads remaining are :"+str(active_threads)
self.print_Log("Active Threads are only 1 --Scan about to finish --Threads remaining are :"+str(active_threads))
break;
#include logic to stop unnecessary polling see count (*) where status=p if that=limit then dont poll
elif(counter <=(self.N+1)):
if(not(self.getPausedStatus(self.CURRENT_PROJECT_ID))):
limit=(self.N+1)-counter
if(limit != 0):
#print "\n\nLaunching :"+str(limit)+" Threads for hosts"
left_hosts=self.startProcessing(limit) #chk if its 0 then break or dont poll till current th fn
#print "Making main thread sleep for 1 seconds"
time.sleep(1)
#print "Waking main thread awake after 1 seconds"
else:
#print "Making main thread sleep for 1 seconds"
time.sleep(1)
#print "Waking main thread awake after 1 seconds"
else:
time.sleep(10)
else :
print "\n\n\n\n------FATEL ERROR-------\n\n\n"
print "Number of threads cant exceed : "+str(self.N+1)
except Exception ,ee:
print "Exception caught 15" +str(ee)
def StartThreads(self,hosts):
#print "\n In start thread method !!! \n"
self.method_id="Start THreads"
threads=[]
#print "Starting : "+str(len(hosts)) +"Threads for "+ str(hosts) +"Hosts :"
print "\n"
print self.seperator()
self.print_Log("Starting : "+str(len(hosts)) +"Threads for "+ str(hosts) +"Hosts" )
print self.seperator()
print "\n"
for host in hosts:
#print "host is "+str(host)
lk= threading.enumerate()
#print "\n Current thread count : "+str(len(lk))
#print "\n\nThe threads enumerate returned are : " +str(lk) +"\n\n"
self.print_Log(g+"******************************************************************************************************************************************\n"+e+"Current thread count : "+str(len(lk)))
self.print_Log("The threads enumerate returned are : " +str(lk)+g+"\n******************************************************************************************************************************************"+e)
if len(lk)<(self.N+1) :
currentIP= str(host)
obj=NmapScan()
obj.IP=self.IP
obj.PORT=self.PORT
obj.SWITCH=self.SWITCH
obj.CURRENT_PROJECT_ID=self.CURRENT_PROJECT_ID
obj.takescan=self.takescan
obj.N=self.N
obj.Port_Divisior=self.Port_Divisior
obj.Pause_Flag=self.Pause_Flag
obj.Stop_Flag=self.Stop_Flag
obj.ipcount=self.ipcount
obj.IPtable=IPtable.IPtable()
obj.simple_logger=self.simple_logger
#self.method_id="INIT"
t = threading.Thread(target=obj.simplescanner, args=([currentIP]))
threads.append(t)
#print "Starting thread for IP :"+str(host)
#self.print_Log("Starting thread for IP :"+str(host))
t.start()
self.Thread_pool.append(t)
#print "\n\n\nStarted thread for IP :"+str(host) + " --> Thread is : "+ str(t)
self.print_Log( "\nStarted thread for IP :"+str(host) + " --> Thread is : "+ str(t))
time.sleep(3)
def startProcessing(self,n):
try :
All_hosts=self.getAllDistinctHosts(n)
#print "Hosts to be given to thread : "+str(All_hosts)
if (All_hosts):
self.StartThreads(All_hosts)
else :
return;
except Exception ,ee :
print "Exception 12 " +str(ee)
def getPausedStatus(self,project_id):
try :
status=self.IPtable.getStatus(project_id)
return status
except Exception ,ee:
print "Exception getstatus " +str(ee)
return 0
def pause_scan(self):
global Pause
Pause =1
stop_all();
def main(self,path='',targethosts='',targetports='',switch='',scan_type='',mode="c",project_id='',assessment_id='',app_id=''):
if (scan_type=="1"):
self.SWITCH=switch
self.PORT=targetports
print "The mode recieved is :" +str(mode)
if(mode=="c"):
self.db_projectname(path,targethosts,self.PORT)
self.seperator()
elif mode =="g-init":
if assessment_id =='':
return;
else:
self.db_projectname(path,targethosts,self.PORT)
self.IPtable.update_mapping(app_id,self.CURRENT_PROJECT_ID,assessment_id)
return self.CURRENT_PROJECT_ID
elif mode=="g-start":
self.CURRENT_PROJECT_ID=int(project_id)
x=333#gui mode
print b +"[+]" + "Starting SCAN" +e
#targethosts=['10.0.1.39','10.0.1.39','10.0.1.39','10.0.1.39']
ipcount=len(self.numofips(targethosts))
if (',' in targethosts):
listip=targethosts.split(',')
else:
listip=self.numofips(targethosts)
BulkEntries=self.makeBulkEnteries(listip,self.PORT)
#global N
self.startProcessing(self.N) #this is the part wher the prompt input finishes
#print "Main Thread Starting pooling in 50 sec :"
time.sleep(100)
# "**Pooling started **\n"
self.method_id="Main()"
self.print_Log("**Pooling started :**")
self.start_Polling()
#print "\n\n\n\n\nScan Finished\n\n\n\n\n "
else:
#global self.CURRENT_PROJECT_ID
if (mode=="c"):
self.CURRENT_PROJECT_ID=self.prompt_ProjectID()
else:
self.CURRENT_PROJECT_ID=int(project_id)
if (self.CURRENT_PROJECT_ID != ""):
self.launch_PausedScan(self.CURRENT_PROJECT_ID)
print "\n\nMain thread starting Polling .........\n\n"
print "Main Thread Starting pooling in 10 sec :"
time.sleep(100)
print "Pooling started :"
self.start_Polling()
def driver_main(self,ips='',project_name='',port='',scan_type='',switch='',project_id='',mode="c",assessment_id="",app_id=""):
try:
print ("("+ips,project_name,port,scan_type,switch,project_id,mode,assessment_id,app_id+")")
print "\n\n Hello world \n\n"
time.sleep(10)
start = time.time()
os.system('cls' if os.name == 'nt' else 'clear')
db_filename="nmapscan"
start = time.time()
#self.main()
#mode="c"path='',targethosts='',targetports='',switch='',scan_type='',mode="c",project_id=''):
self.main(project_name,ips,port,switch,scan_type,mode,project_id,assessment_id,app_id)
print "Reached here as well !!!"
if mode != "g-init" :
th_count=threading.enumerate()
print "# of threads Alive are :"+str(len(th_count))
#while (1) :
if 1:
if (len(th_count)==1):
print "\nNow stopping and saving Global Project Id : "+ str(self.CURRENT_PROJECT_ID)+"\n";
#global self.CURRENT_PROJECT_ID
if ((self.CURRENT_PROJECT_ID != "") and (self.CURRENT_PROJECT_ID is not None)):
status=self.IPtable.checkStatus(self.CURRENT_PROJECT_ID)#if some thread might die-->processing or lets say that initially all rec have status as incomplete and the parent thread would be the polling thread.The status is changed to be processing by the threads that are started by the parent thread.Say for some reason the parent thread would start a thread ,but it might not be scheduled by the scheduler ,and the polling thread would be running asynchronously,the polling thread would immidiately detect the thread count to be =1 as the child threads would have not been scheduled yet ,thus the status would also not be as processing...it would show to be of type incomplete--->thus keeping this condition at head its importent to check herethat if the thread count =1-->main thread only then there should be no record with status as incomplete or processing.Now lets say a person has intentionally paused the scan ,then in that case the project-table would show the status as paused and iptable might contain both entries as processing and incomplete.That use case would be ignored and the scan would come to end
if(status):
processing_status=status[0]
pause_status=status[1]
if((processing_status) and (not (pause_status))):#will just check once
print "Still left with some hosts that display status as processing !!!"
time.sleep(10)#the reason for this delay is suppose some thread is fired but not scheduled yet and thus the status would show as incomplete and if we immidiately statprocessing,then 2 threads might point to 1 record
self.startProcessing(self.N)
print "Main Thread--->Again Starting pooling in 50 sec :"
time.sleep(50)
print "Polling started-->again :"
self.start_Polling()
#xx=2
if ((not(processing_status)) and (not(pause_status))): #to update status from incompl to comp
print "Launching clear logs !!!"
self.IPtable.clearLogs(self.CURRENT_PROJECT_ID,'complete')
#else :
#clearLogs(self.CURRENT_PROJECT_ID,'complete')
end_time = time.time()
print "Time taken in seconds : "+str(end_time-start)
elif mode =="g-init":
print "\n\nPROPER\n\n"
return self.CURRENT_PROJECT_ID
except KeyboardInterrupt:
print c+"\n[*]"+g+" Scan is Aborted"+e
print c+"[*]"+g+" Stopping"+e
self.print_Log("\n[*]"+g+" Scan is Aborted")
time.sleep(1)
pass
except Exception ,ee:
self.print_Log("Exception in driver() "+str(ee))
#NmapScanObj=NmapScan()
#NmapScanObj.driver_main()
|
OSCSceneController.py
|
import yaml
import sys
import os
from threading import Timer, Thread
from pythonosc import osc_server, dispatcher, udp_client
import itertools
import argparse
import datetime
import time
parser = argparse.ArgumentParser(description='Route OSC packets corresponding to scenes')
parser.add_argument('--no-gui', action='store_true', help="Run the scene controller purely from the command line")
parser.add_argument("-s", "--scenes", help="Path to scenes.yaml", metavar="FILE")
parser.add_argument("-i", "--input-port", metavar='N', type=int, help="Port for OSC server to listen on (Default 8000)")
parser.add_argument("-o", "--output-address", help="IP address and port to send feedback traffic to")
args = parser.parse_args()
# Only import if needed
if not args.no_gui:
import signal
import json
import webbrowser
import appdirs
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import font
from tkinter import messagebox
from tkinter.scrolledtext import ScrolledText
class UserPreferences:
def __init__(self):
self.user_data_dir = appdirs.user_data_dir("OSCSceneController", "SteffeyDev")
self.data = {}
try:
if not os.path.exists(self.user_data_dir):
os.makedirs(self.user_data_dir)
self.preferences_file_path = self.user_data_dir + "/preferences.json"
print(self.preferences_file_path)
if os.path.exists(self.preferences_file_path):
with open(self.preferences_file_path, 'r') as preferencesFile:
self.data = json.loads(preferencesFile.read())
preferencesFile.close()
except:
pass
def get(self, name):
if name in self.data:
return self.data[name]
return None
def set(self, name, value):
self.data[name] = value
try:
with open(self.preferences_file_path, 'w') as preferencesFile:
preferencesFile.write(json.dumps(self.data))
preferencesFile.close()
except:
pass
debug = False
active_scene = None
log_data = []
class OSCMessage:
def __init__(self, message, args = None, *, delay = 0):
if debug:
print("Creating OSCMessage from message:", message)
self._prefix = message.split("/")[1]
self._addr = message.split(" ")[0]
self._delay = delay
self._args = []
if args is not None:
if type(args) is list:
self._args = args
elif type(args) is tuple:
self._args = list(itertools.chain.from_iterable([args])) # convert to array
else:
self._args = [args]
else:
# Go through each argument, parse it, and add it to the array in the correct type
for arg in message.split(" ")[1:]:
if self._prefix == "scene":
self._args.append(int(arg))
continue
# Check first if it is an int
if arg.isdigit():
self._args.append(int(arg))
continue
# Then see if if is a float
try:
self._args.append(float(arg))
continue
except ValueError:
pass
# Then see if it is a boolean type
if arg.lower() == "true":
self._args.append(True)
continue
if arg.lower() == "false":
self._args.append(False)
continue
# If all else fails, it must be a string
self._args.append(arg)
@property
def address(self):
return self._addr
@property
def arguments(self):
return self._args
@property
def delay(self):
return self._delay
@delay.setter
def delay(self, delay):
self._delay = delay
@property
def prefix(self):
return self._prefix
### Generate the OSC commands that need to be sent for each scene
# by parsing the YAML file
class SceneParser():
def __init__(self):
self.scene_map = None
self.midi_map = None
self.scene_names = None
self.loaded = False
def parseFromFile(self, filename):
config = yaml.load(open(filename, 'r'))
scenes = config['scenes']
endpoints = config['endpoints']
mapping = config['map']
self.scene_map = {}
self.scene_names = {}
self.midi_map = {}
self.udp_clients = {}
self.udp_client_strings = {}
print("\nOutput Settings")
udp_clients = {}
for endpoint in endpoints:
self.udp_clients[endpoint['prefix']] = udp_client.SimpleUDPClient(endpoint['ip'], endpoint['port'], allow_broadcast=True)
self.udp_client_strings[endpoint['prefix']] = endpoint['ip'] + ":" + str(endpoint['port'])
print("Sending commands that start with /" + endpoint['prefix'] + " to " + endpoint['ip'] + ":" + str(endpoint['port']))
for scene in scenes:
arr = []
for key, value in scene.items():
if not (key == "key" or key == "name" or key == "midi"):
self.get_commands(key, value, mapping[key], arr)
if debug:
print("Array generated for scene " + scene['name'] + ":")
print(arr)
print()
if 'midi' in scene:
self.midi_map[scene['midi']] = scene['key']
self.scene_map[scene['key']] = arr
self.scene_names[scene['key']] = scene['name']
self.loaded = True
def is_osc_command(self, item):
return isinstance(item, str) and item.startswith("/") and len(item.split("/")) > 1
def get_commands(self, key, value, map_value, array):
def print_error(key, value, map_value):
print("Could not process item with key ", key, ", value:", value, ", and map value:", map_value)
log_data.append("\nConfiguration Warning - Could not process item with key \"" + key + "\", value: \"" + str(value) + "\", and map value: \"" + str(map_value) + "\"")
if debug:
print("Getting commands for key:", key, "and value:", value, "\nUsing map_value:", map_value)
if isinstance(value, dict):
for _key, _value in value.items():
self.get_commands(_key, _value, map_value[_key], array)
elif isinstance(value, list):
delay = 0
for item in value:
if 'delay' in item:
delay = int(item.split(" ")[1].replace("s", ""))
for map_key, map_val in map_value.items():
if map_key in value and map_key != "none":
if map_key in map_value and 'in' in map_value[map_key] and self.is_osc_command(map_value[map_key]['in']):
array.append(OSCMessage(map_value[map_key]['in'], delay=delay))
else:
print_error(key, value, map_value)
else:
if map_key in map_value and 'out' in map_value[map_key] and self.is_osc_command(map_value[map_key]['out']):
array.append(OSCMessage(map_value[map_key]['out'], delay=delay))
else:
print_error(key, value, map_value)
elif isinstance(value, str):
string = value.split(" ")[0]
delay = 0
if len(value.split(" ")) > 1:
delay = int(value.split(" ")[1].replace("s", ""))
if string in map_value and self.is_osc_command(map_value[string]):
array.append(OSCMessage(map_value[string], delay=delay))
else:
print_error(key, value, map_value)
elif isinstance(value, int):
array.append(OSCMessage(map_value.replace('x', str(value / 127))))
else:
print_error(key, value, map_value)
def getSceneMap(self):
return self.scene_map
def getSceneNames(self):
return self.scene_names
def getMidiMap(self):
return self.midi_map
def getUdpClients(self):
return self.udp_clients
def getUdpClientStrings(self):
return self.udp_client_strings
def isLoaded(self):
return self.loaded
class OSCSceneController():
def __init__(self, parser):
self.parser = parser
self.server_thread = None
self.server = None
self.last_scene = None
self.running = False
self.output_client = None
def start(self, input_port):
if self.running:
self.stop()
if not self.parser.isLoaded():
log_data.append("No configuration loaded, once you load a configuration the server will start")
return
for key, string in self.parser.getUdpClientStrings().items():
if string.split(":")[1] == str(input_port):
log_data.append("Cannot start server because the input port {0} is the same as the the output port for prefix '{1}'. Please change the input port.".format(input_port, key))
return
try:
dispatch = dispatcher.Dispatcher()
for key in self.parser.getSceneMap():
dispatch.map("/scene/" + key, self.respond_to_scene)
for number in self.parser.getMidiMap():
dispatch.map("/midi-scene/" + str(round(number / 127, 2)), self.respond_to_scene)
dispatch.set_default_handler(self.route_message)
self.server = osc_server.BlockingOSCUDPServer(("0.0.0.0", input_port), dispatch)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.start()
log_data.append("\nServer started, listening on all interfaces on port {0}...\n".format(input_port))
self.running = True
except KeyboardInterrupt:
print("Exiting...")
sys.exit(0)
def stop(self):
if self.running:
if self.server is not None:
self.server.shutdown()
self.server = None
if self.server_thread is not None:
self.server_thread.join()
self.server_thread = None
self.running = False
log_data.append("\nServer stopped\n")
def isRunning(self):
return self.running
def route_message(self, addr, *args):
self.send_msg(OSCMessage(addr, args))
def respond_to_scene(self, addr, args = 1):
scene_map = self.parser.getSceneMap()
midi_map = self.parser.getMidiMap()
scene_names = self.parser.getSceneNames()
new_scene = ""
if addr.split("/")[1] == "scene":
new_scene = addr.split("/")[2]
elif addr.split("/")[1] == "midi-scene":
new_scene = midi_map[int(round(float(addr.split("/")[2]) * 127))]
else:
log_data.append("\nReceived invalid message: {0}".format(addr))
return
if new_scene not in scene_map:
log_data.append("\nReceived undefined scene '{0}'".format(new_scene))
return
# If we are recieving one of the turn off signals that
# we are sending, ignore it (prevent feedback loop)
if args == 0 or args == 0.0:
if new_scene != self.last_scene:
self.send_msg(OSCMessage("/scene/" + new_scene, [0.0]))
self.send_msg(OSCMessage("/scene/" + new_scene, [0]))
return
# If we are trying to select the same scene, resend confirmation message but don't process again
if new_scene == self.last_scene:
if self.output_client is not None:
self.send_msg(OSCMessage("/scene/" + new_scene + " 1"))
return
log_data.append("")
log_data.append("Received: " + addr + " " + str(args))
# Only send outgoing messages if we know where to send them to
if self.output_client is not None:
### First we need to send message to turn on the new scene
self.send_msg(OSCMessage("/scene/" + new_scene + " 1"))
### Then we need to send message to turn off current scene
# If we know what the last scene is, deselect it
if self.last_scene is not None:
self.send_msg(OSCMessage("/scene/" + self.last_scene, 0))
self.send_msg(OSCMessage("/scene/" + self.last_scene, 0.0))
# If we don't know what the last scene is, turn them all off
# except for the current scene
else:
delay = 10
for key in scene_map:
if key != new_scene:
self.send_msg(OSCMessage("/scene/" + key, 0, delay=(delay/100)), quiet = True)
self.send_msg(OSCMessage("/scene/" + key, 0.0, delay=(delay/100)), quiet = True)
delay += 5
self.last_scene = new_scene
# Update GUI
global active_scene
active_scene = scene_names[new_scene]
### Finally we need to actual send the OSC messages that make up the scene change
for osc_command in scene_map[new_scene]:
self.send_msg(osc_command)
def send_msg(self, message, delay_bypass = False, quiet = False):
if message.delay == 0 or delay_bypass:
if message.prefix == "scene":
if self.output_client is not None:
self.output_client.send_message(message.address, message.arguments)
elif message.prefix in self.parser.getUdpClients():
self.parser.getUdpClients()[message.prefix].send_message(message.address, message.arguments)
else:
log_data.append("Prefix not recognized: {0}".format(message.prefix))
if not quiet:
log_data.append("Sending \"" + message.address + " " + " ".join([str(s) for s in message.arguments]) + "\" to " + self.parser.getUdpClientStrings()[message.prefix])
else:
wait = message.delay
if not quiet:
log_data.append("Scheduling \"{0}\" to be sent after {1} seconds".format(message.address, message.delay))
r = Timer(wait, self.send_msg, [message, True, quiet])
r.start()
def setOutputAddress(self, ip, port):
self.output_client = udp_client.SimpleUDPClient(ip, port, allow_broadcast=True)
if not args.no_gui:
class MyApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.withdraw() #hide window
self.filename = None
self.parser = SceneParser()
self.controller = OSCSceneController(self.parser)
self.log_data_len = None
self.output_port = None
self.output_ip_address = None
self.preferences = UserPreferences()
self.minsize(500, 430)
menubar = tk.Menu(self)
self.config(menu=menubar, background="white")
self.iconbitmap('app_icon.ico')
if (sys.platform == "darwin"):
self.createcommand('::tk::mac::ShowPreferences', self.quit)
self.build()
self.deiconify()
self.after(1000, self.updateGUI)
# Load data from preferences file
if self.preferences.get('output_ip_address') is not None:
self.output_ip_address = self.preferences.get('output_ip_address')
self.output_ip_text.set(self.output_ip_address)
if self.preferences.get('output_port') is not None:
self.output_port = self.preferences.get('output_port')
self.output_port_text.set(str(self.output_port))
if self.preferences.get('filename') is not None:
self.filename = self.preferences.get('filename')
self.scene_file_text.set(self.filename.split("/")[-1])
if self.preferences.get('input_port') is not None:
self.input_port_text.set(self.preferences.get('input_port'))
if self.output_ip_address is not None and self.output_port is not None:
self.controller.setOutputAddress(self.output_ip_address, self.output_port)
self.preferences.set('output_port', self.output_port)
self.log("Output port set, sending to {0}:{1}".format(self.output_ip_address, self.output_port))
if self.filename is not None:
self.parser.parseFromFile(self.filename)
self.scene_file_text.set(self.filename.split("/")[-1])
self.log("Successfully loaded configuration from file: {0}".format(self.filename))
self.controller.start(int(self.input_port_text.get()))
else:
self.log("To start, load a configuration (a YAML file with the scenes in it).")
# def quit(self):
# self.stop()
# self.destroy()
def stop(self):
self.controller.stop()
def updateGUI(self):
global active_scene
if (active_scene is not None):
self.active_scene_text.set(active_scene)
global log_data
if len(log_data) > 0:
log_data_store = log_data
log_data = []
self.log_text_box.configure(state='normal')
for item in log_data_store:
self.log_text_box.insert('end', item + '\n')
self.log_text_box.yview('end')
self.log_text_box.configure(state='disabled')
self.after(1000, self.updateGUI)
def reload_scene_handler(self):
self.focus()
if (self.filename is not None):
self.parser.parseFromFile(self.filename)
self.log("Reloaded configuration from file: {0}".format(self.filename))
else:
self.log("Cannot reload, no configuration loaded")
def load_from_file_handler(self):
self.focus()
new_filename = filedialog.askopenfilename()
if new_filename != "": # User did not click cancel button
if new_filename.split(".")[-1] != "yaml" and new_filename.split(".")[-1] != "yml":
messagebox.showerror("Invalid File", "Please select a Yaml configuration file with a '.yaml' extension. Open the documentation for more information")
else:
self.filename = new_filename
self.parser.parseFromFile(new_filename)
self.scene_file_text.set(new_filename.split("/")[-1])
self.log("Successfully loaded new configuration from file: {0}".format(new_filename))
self.controller.start(int(self.input_port_text.get()))
self.preferences.set('filename', self.filename)
def isPort(self, value_if_allowed, text):
if value_if_allowed == "":
return True
if text in '0123456789':
try:
return (int(value_if_allowed) < 65536)
except ValueError:
return False
else:
return False
def isIpAddress(self, value_if_allowed, text):
if value_if_allowed == "":
return True
if text in '0123456789.':
try:
parts = value_if_allowed.split('.')
if len(parts) > 4:
return False
for part in parts:
if part != "" and int(part) > 255:
return False
return True
except ValueError:
return False
else:
return False
def log(self, text):
self.log_text_box.configure(state='normal')
self.log_text_box.insert('end', '\n' + text + '\n')
self.log_text_box.configure(state='disabled')
self.log_text_box.yview('end')
def input_port_changed(self, text):
self.focus()
try:
self.controller.start(int(self.input_port_text.get()))
self.preferences.set('input_port', int(self.input_port_text.get()))
except PermissionError:
messagebox.showerror("Invalid Port", "It looks like that port is already in use or is reserved, try another one!")
def verifyIpAddress(self, text):
try:
parts = text.split('.')
for part in parts:
if int(part) > 255:
return False
return len(parts) == 4
except ValueError:
return False
def output_ip_changed(self, text):
if self.output_ip_text.get() == "":
self.output_ip_address = None
return
if (self.verifyIpAddress(self.output_ip_text.get())):
self.output_ip_address = self.output_ip_text.get()
self.preferences.set('output_ip_address', self.output_ip_address)
if self.output_port is not None:
self.controller.setOutputAddress(self.output_ip_address, self.output_port)
self.log("Output IP address changed, now sending to {0}:{1}".format(self.output_ip_address, self.output_port))
else:
messagebox.showerror("Invalid IP Address", "Please enter a valid IPv4 address")
self.outgoing_ip_entry.focus()
def output_port_changed(self, text):
if self.output_port_text.get() == "":
self.output_port = None
return
try:
self.output_port = int(self.output_port_text.get())
self.preferences.set('output_port', self.output_port)
if self.output_ip_address is not None and self.output_port is not None:
self.controller.setOutputAddress(self.output_ip_address, self.output_port)
self.log("Output port changed, now sending to {0}:{1}".format(self.output_ip_address, self.output_port))
except ValueError:
messagebox.showerror("Invalid Port", "Please entry a integer value in the range 1000-65535")
self.outgoing_port_entry.focus()
def focus_root(self, text):
self.focus()
def open_documentation(self, extra):
webbrowser.open_new("https://github.com/SteffeyDev/osc-scenes/blob/master/README.md")
def generateLine(self, rootComponent, width):
canvas = tk.Canvas(rootComponent, width=width, height=4, bg="white", bd=0, highlightthickness=0)
canvas.create_line(0, 3, width, 3, fill="gray")
canvas.pack()
def build(self):
style=ttk.Style()
style.theme_use('alt')
style.configure("TLabel", background="white")
style.configure("TFrame", background="white")
style.configure("TButton", relief="flat", background="lightgray")
style.map("TButton",
background=[('pressed', 'lightgray'), ('active', 'gray')],
relief=[('pressed', 'flat'), ('active', 'flat')]
)
style.configure("Link.TLabel", foreground="blue", cursor="hand2")
largeBoldFont = font.Font(size=25, weight='bold')
mediumBoldFont = font.Font(size=15, weight='bold')
smallBoldFont = font.Font(size=13, weight='bold')
isPortCommand = (self.register(self.isPort), '%P', '%S')
isIpAddressCommand = (self.register(self.isIpAddress), '%P', '%S')
split = ttk.Frame(self)
left_side = ttk.Frame(split, width=250, height=400, borderwidth=5)
active_scene_box = tk.Frame(left_side, bg="white") #ttk.Frame(left_side, style="Left.TFrame")
self.active_scene_text = tk.StringVar()
self.active_scene_text.set("None")
ttk.Label(active_scene_box, textvariable=self.active_scene_text, font=largeBoldFont).pack()
self.generateLine(active_scene_box, 100)
ttk.Label(active_scene_box, text="Current Scene").pack()
active_scene_box.pack(pady=10)
active_scene_box.config()
input_box = ttk.Frame(left_side)
self.input_port_text = tk.StringVar()
listening_address_entry = ttk.Entry(input_box, width=5, textvariable=self.input_port_text, font=largeBoldFont, justify="center", validate="key", validatecommand=isPortCommand)
listening_address_entry.bind('<Return>', self.input_port_changed)
listening_address_entry.pack()
self.input_port_text.set("8002")
self.generateLine(input_box, 100)
ttk.Label(input_box, text="Listening Port").pack()
input_box.pack(pady=15)
output_box = ttk.Frame(left_side)
output_address_box = ttk.Frame(output_box)
output_ip_text = tk.StringVar()
#output_ip_text.trace("w", lambda name, index, mode, output_ip_text=output_ip_text: self.output_ip_changed(output_ip_text))
outgoing_ip_entry = ttk.Entry(output_address_box, width=13, textvariable=output_ip_text, font=smallBoldFont, justify="center", validate="key", validatecommand=isIpAddressCommand)
outgoing_ip_entry.bind('<FocusOut>', self.output_ip_changed)
outgoing_ip_entry.bind('<Return>', self.focus_root)
outgoing_ip_entry.pack(side="left", padx=2)
ttk.Label(output_address_box, text=":", font=smallBoldFont).pack(side="left", pady=(0,6))
self.output_ip_text = output_ip_text
self.outgoing_ip_entry = outgoing_ip_entry
output_port_text = tk.StringVar()
#output_port_text.trace("w", lambda name, index, mode, output_port_text=output_port_text: self.output_port_changed(output_port_text))
outgoing_port_entry = ttk.Entry(output_address_box, width=5, textvariable=output_port_text, font=smallBoldFont, justify="center", validate="key", validatecommand=isPortCommand)
outgoing_port_entry.bind('<FocusOut>', self.output_port_changed)
outgoing_port_entry.bind('<Return>', self.focus_root)
outgoing_port_entry.pack(side="right", padx=2)
output_address_box.pack()
self.generateLine(output_box, 130)
ttk.Label(output_box, text="Outgoing Reply").pack()
output_box.pack(pady=15)
self.output_port_text = output_port_text
self.outgoing_port_entry = outgoing_port_entry
scene_box = ttk.Frame(left_side)
self.scene_file_text = tk.StringVar()
self.scene_file_text.set("None")
ttk.Label(scene_box, wraplength=210, font=mediumBoldFont, textvariable=self.scene_file_text).pack()
self.generateLine(scene_box, 140)
ttk.Label(scene_box, text="Loaded Configuration").pack()
scene_button_box = ttk.Frame(scene_box)
ttk.Button(scene_button_box, text='Reload', command=lambda: self.reload_scene_handler()).pack(side='left', padx=2)
ttk.Button(scene_button_box, text='Load from File', command=lambda: self.load_from_file_handler()).pack(side='right', padx=2)
scene_button_box.pack(pady=10)
scene_box.pack(pady=15)
docLabel = ttk.Label(left_side, text="Open Documentation", style="Link.TLabel", cursor="hand2", font=font.Font(underline=1))
docLabel.bind("<Button-1>", self.open_documentation)
docLabel.pack(side="bottom", anchor="s")
right_side = ttk.Frame(split)
self.log_text_box = ScrolledText(right_side, bg='lightgray', highlightthickness=10, highlightbackground='lightgray', highlightcolor='lightgray', borderwidth=0, wrap='word')
self.log_text_box.pack(side="left", expand=1, fill="both", padx=(5,0))
self.log_text_box.insert('insert', """
Welcome to the OSC Packet Control.
You can send OSC messages in the form: /scene/<key> or /midi-scene/<number> to trigger a scene change.
Upon reciept of a message, I'll automatically retransmit and send out “/scene/<last_key> 0”, where <last_key> is the key of the previous current scene. If you want to receive these messages, set the Outgoing Reply IP address and port.
""")
self.log_text_box.configure(state='disabled')
left_side.pack(side="left", fill="y", padx=20)
right_side.pack(side="right", fill="both", expand=1)
split.grid(column=0, row=0, sticky='news')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
class GracefulKiller:
def __init__(self, app):
self.app = app
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.app.destroy()
def macos_quit(self):
self.app.destroy()
if args.no_gui:
class CommandLineApp:
def __init__(self, args):
if args.scenes is None:
print("Fatal Error: The --scenes argument is required")
sys.exit(1)
parser = SceneParser()
parser.parseFromFile(args.scenes)
self.controller = OSCSceneController(parser)
self.input_port = args.input_port
# Load data from preferences file
if args.output_address is not None:
try:
output_ip_address = args.output_address.split(':')[0]
output_port = int(args.output_address.split(':')[1])
self.controller.setOutputAddress(output_ip_address, output_port)
self.log("Output port set, sending to {0}:{1}".format(output_ip_address, output_port))
except:
print("Fatal Error: Make sure the --output-address argument is in the same format as '192.168.1.1:9000'")
sys.exit(1)
self.log("Successfully loaded configuration from file: {0}".format(args.scenes))
def run(self):
port = self.input_port if args.input_port is not None else 8000
self.log("Starting OSC Server on port {}".format(port))
self.controller.start(port)
def stop(self):
self.log("Stopping OSC Server")
self.controller.stop()
def log(self, text):
print("{} - {}".format(datetime.datetime.now(), text))
if __name__ == "__main__":
if args.no_gui:
app = CommandLineApp(args)
app.run()
interrupt = False
while not interrupt:
try:
time.sleep(1)
if len(log_data) > 0:
[ app.log(text.strip()) for text in log_data if len(text) > 0 ]
log_data = []
except KeyboardInterrupt:
interrupt = True
app.stop()
else:
app = MyApp()
killer = GracefulKiller(app)
# Handle MacOS quit event
if (sys.platform == "darwin"):
app.createcommand('::tk::mac::Quit', killer.macos_quit)
app.title("OSC Scene Controller")
app.mainloop()
app.stop()
|
tts.py
|
import time
import pyttsx3
import queue
from threading import Thread
# using pyttsx3 for voices
# more info at: https://github.com/nateshmbhat/pyttsx3
class TalkBot:
def __init__(self, config=None):
if config is None:
config = {}
self.engine = pyttsx3.init()
self.engine.setProperty('voice', config.get('voice', 'english_rp+f3'))
self.engine.setProperty('rate', config.get('rate', 150))
self.kill_flag = False
self.queue = queue.Queue()
self.speaking_thread = Thread(target=self.read_msg, daemon=True)
self.speaking_thread.start()
def stop(self):
self.kill_flag = True
def add_msg_to_queue(self, msg):
self.queue.put(msg)
def read_msg(self):
while not self.kill_flag:
msg_to_speak = self.queue.get()
time.sleep(0.5)
self.engine.say(msg_to_speak)
self.engine.runAndWait()
self.queue.task_done()
|
server.py
|
import os
import urllib
import posixpath
from abc import ABCMeta, abstractmethod
from threading import Thread
from SocketServer import ThreadingTCPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from ModExternalMinimap.lib.websocket_server import WebsocketServer
class ConcurrentServer(object):
__metaclass__ = ABCMeta
def __init__(self, host='', port=8000):
self.host = host
self.port = port
self.__thread = None
def start(self):
if self.__thread is not None:
return
self.__thread = Thread(target=self._run_function)
self.__thread.start()
def stop(self):
self._close_function()
self.__thread.join()
self.__thread = None
@abstractmethod
def _run_function(self):
pass
@abstractmethod
def _close_function(self):
pass
class ConcurrentHTTPServer(ConcurrentServer):
def __init__(self, host='', port=8000, directory='.'):
super(ConcurrentHTTPServer, self).__init__(host=host, port=port)
class RequestHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
trailing_slash = path.rstrip().endswith('/')
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = directory # patch SimpleHTTPRequestHandler to use different directory than working dir
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
self._request_handler = RequestHandler
self._server = None
def _run_function(self):
self._server = ThreadingTCPServer((self.host, self.port), self._request_handler)
self._server.daemon_threads = True
self._server.serve_forever()
def _close_function(self):
self._server.shutdown()
self._server.server_close()
self._server = None
class ConcurrentWebSocketServer(ConcurrentServer):
def __init__(self, host='', port=8001, allowed_origins=None):
super(ConcurrentWebSocketServer, self).__init__(host=host, port=port)
self._server = None
self._allowed_origins = allowed_origins
def _run_function(self):
self._server = WebsocketServer(host=self.host, port=self.port)
self._server.daemon_threads = True
self._server.set_fn_new_client(lambda c, _: self.on_client_connect(c))
self._server.set_fn_client_left(lambda c, _: self.on_client_disconnect(c))
self._server.set_fn_message_received(lambda c, _, m: self.on_message(c, m))
self._server.set_fn_allow_connection(self.allow_connection)
self._server.serve_forever()
def _close_function(self):
self._server.shutdown()
self._server.server_close()
self._server = None
def send_message(self, client, message):
if self._server:
self._server.send_message(client, message)
def broadcast(self, message):
if self._server:
self._server.send_message_to_all(message)
def on_client_connect(self, client):
pass
def on_client_disconnect(self, client):
pass
def on_message(self, client, message):
pass
def allow_connection(self, origin):
return self._allowed_origins is None or origin is None or origin in self._allowed_origins
|
runner.py
|
# -*- coding: utf-8 -*-
#
# Finite State Machine
#
# Written in 2021 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2021 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import time
from abc import ABC, abstractmethod
from threading import Thread
class Processor(ABC):
@abstractmethod
def process(self) -> bool:
"""
Do the job
:return: False on nothing to do
"""
raise NotImplemented
class Handler(ABC):
@abstractmethod
def setup(self):
""" Prepare for Handling """
raise NotImplemented
@abstractmethod
def handle(self):
""" Handling run loop """
raise NotImplemented
@abstractmethod
def finish(self):
""" Cleanup after handled """
raise NotImplemented
class Runnable(ABC):
@abstractmethod
def run(self):
""" Run in a thread """
raise NotImplemented
class Runner(Runnable, Handler, Processor, ABC):
"""
Runner
~~~~~~
@abstract method:
- process()
"""
def __init__(self):
super().__init__()
self.__running = False
@property
def running(self) -> bool:
return self.__running
def stop(self):
self.__running = False
# Override
def run(self):
self.setup()
try:
self.handle()
finally:
self.finish()
# Override
def setup(self):
self.__running = True
# Override
def handle(self):
while self.running:
if not self.process():
self._idle()
# Override
def finish(self):
self.__running = False
# noinspection PyMethodMayBeStatic
def _idle(self):
time.sleep(0.125)
class Daemon:
def __init__(self, target, daemonic: bool = True):
super().__init__()
self.__target = target
self.__daemon = daemonic
self.__thread = None
self.__timeout = 1.0
@property
def timeout(self) -> float:
return self.__timeout
@timeout.setter
def timeout(self, waiting: float):
self.__timeout = waiting
@property
def alive(self) -> bool:
thr = self.__thread
if thr is not None:
return thr.is_alive()
def start(self) -> Thread:
self.__force_stop()
thr = Thread(target=self.__target, daemon=self.__daemon)
self.__thread = thr
thr.start()
return thr
def __force_stop(self):
thr: Thread = self.__thread
if thr is not None:
self.__thread = None
try:
thr.join(timeout=self.timeout)
except RuntimeError as error:
print('[ERROR] failed to join thread: %s' % error)
def stop(self):
self.__force_stop()
|
nb_inventory.py
|
# Copyright (c) 2018 Remy Leone
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
name: nb_inventory
plugin_type: inventory
author:
- Remy Leone (@sieben)
- Anthony Ruhier (@Anthony25)
- Nikhil Singh Baliyan (@nikkytub)
- Sander Steffann (@steffann)
- Douglas Heriot (@DouglasHeriot)
short_description: NetBox inventory source
description:
- Get inventory hosts from NetBox
extends_documentation_fragment:
- constructed
- inventory_cache
options:
plugin:
description: token that ensures this is a source file for the 'netbox' plugin.
required: True
choices: ['netbox.netbox.nb_inventory']
api_endpoint:
description: Endpoint of the NetBox API
required: True
env:
- name: NETBOX_API
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
default: True
type: boolean
cert:
description:
- Certificate path
default: False
key:
description:
- Certificate key path
default: False
ca_path:
description:
- CA path
default: False
follow_redirects:
description:
- Determine how redirects are followed.
- By default, I(follow_redirects) is set to uses urllib2 default behavior.
default: urllib2
choices: ['urllib2', 'all', 'yes', 'safe', 'none']
config_context:
description:
- If True, it adds config_context in host vars.
- Config-context enables the association of arbitrary data to devices and virtual machines grouped by
region, site, role, platform, and/or tenant. Please check official netbox docs for more info.
default: False
type: boolean
flatten_config_context:
description:
- If I(config_context) is enabled, by default it's added as a host var named config_context.
- If flatten_config_context is set to True, the config context variables will be added directly to the host instead.
default: False
type: boolean
version_added: "0.2.1"
flatten_local_context_data:
description:
- If I(local_context_data) is enabled, by default it's added as a host var named local_context_data.
- If flatten_local_context_data is set to True, the config context variables will be added directly to the host instead.
default: False
type: boolean
version_added: "0.3.0"
flatten_custom_fields:
description:
- By default, host custom fields are added as a dictionary host var named custom_fields.
- If flatten_custom_fields is set to True, the fields will be added directly to the host instead.
default: False
type: boolean
version_added: "0.2.1"
token:
required: False
description:
- NetBox API token to be able to read against NetBox.
- This may not be required depending on the NetBox setup.
env:
# in order of precedence
- name: NETBOX_TOKEN
- name: NETBOX_API_KEY
plurals:
description:
- If True, all host vars are contained inside single-element arrays for legacy compatibility with old versions of this plugin.
- Group names will be plural (ie. "sites_mysite" instead of "site_mysite")
- The choices of I(group_by) will be changed by this option.
default: True
type: boolean
version_added: "0.2.1"
interfaces:
description:
- If True, it adds the device or virtual machine interface information in host vars.
default: False
type: boolean
version_added: "0.1.7"
services:
description:
- If True, it adds the device or virtual machine services information in host vars.
default: True
type: boolean
version_added: "0.2.0"
fetch_all:
description:
- By default, fetching interfaces and services will get all of the contents of NetBox regardless of query_filters applied to devices and VMs.
- When set to False, separate requests will be made fetching interfaces, services, and IP addresses for each device_id and virtual_machine_id.
- If you are using the various query_filters options to reduce the number of devices, you may find querying NetBox faster with fetch_all set to False.
- For efficiency, when False, these requests will be batched, for example /api/dcim/interfaces?limit=0&device_id=1&device_id=2&device_id=3
- These GET request URIs can become quite large for a large number of devices. If you run into HTTP 414 errors, you can adjust the max_uri_length option to suit your web server.
default: True
type: boolean
version_added: "0.2.1"
group_by:
description:
- Keys used to create groups. The I(plurals) option controls which of these are valid.
- I(rack_group) is supported on NetBox versions 2.10 or lower only
- I(location) is supported on NetBox versions 2.11 or higher only
type: list
choices:
- sites
- site
- location
- tenants
- tenant
- racks
- rack
- rack_group
- rack_role
- tags
- tag
- device_roles
- role
- device_types
- device_type
- manufacturers
- manufacturer
- platforms
- platform
- region
- cluster
- cluster_type
- cluster_group
- is_virtual
- services
- status
default: []
group_names_raw:
description: Will not add the group_by choice name to the group names
default: False
type: boolean
version_added: "0.2.0"
query_filters:
description: List of parameters passed to the query string for both devices and VMs (Multiple values may be separated by commas)
type: list
default: []
device_query_filters:
description: List of parameters passed to the query string for devices (Multiple values may be separated by commas)
type: list
default: []
vm_query_filters:
description: List of parameters passed to the query string for VMs (Multiple values may be separated by commas)
type: list
default: []
timeout:
description: Timeout for NetBox requests in seconds
type: int
default: 60
max_uri_length:
description:
- When fetch_all is False, GET requests to NetBox may become quite long and return a HTTP 414 (URI Too Long).
- You can adjust this option to be smaller to avoid 414 errors, or larger for a reduced number of requests.
type: int
default: 4000
version_added: "0.2.1"
virtual_chassis_name:
description:
- When a device is part of a virtual chassis, use the virtual chassis name as the Ansible inventory hostname.
- The host var values will be from the virtual chassis master.
type: boolean
default: False
dns_name:
description:
- Force IP Addresses to be fetched so that the dns_name for the primary_ip of each device or VM is set as a host_var.
- Setting interfaces will also fetch IP addresses and the dns_name host_var will be set.
type: boolean
default: False
ansible_host_dns_name:
description:
- If True, sets DNS Name (fetched from primary_ip) to be used in ansible_host variable, instead of IP Address.
type: boolean
default: False
compose:
description: List of custom ansible host vars to create from the device object fetched from NetBox
default: {}
type: dict
"""
EXAMPLES = """
# netbox_inventory.yml file in YAML format
# Example command line: ansible-inventory -v --list -i netbox_inventory.yml
plugin: netbox.netbox.nb_inventory
api_endpoint: http://localhost:8000
validate_certs: True
config_context: False
group_by:
- device_roles
query_filters:
- role: network-edge-router
device_query_filters:
- has_primary_ip: 'true'
# has_primary_ip is a useful way to filter out patch panels and other passive devices
# Query filters are passed directly as an argument to the fetching queries.
# You can repeat tags in the query string.
query_filters:
- role: server
- tag: web
- tag: production
# See the NetBox documentation at https://netbox.readthedocs.io/en/stable/rest-api/overview/
# the query_filters work as a logical **OR**
#
# Prefix any custom fields with cf_ and pass the field value with the regular NetBox query string
query_filters:
- cf_foo: bar
# NetBox inventory plugin also supports Constructable semantics
# You can fill your hosts vars using the compose option:
plugin: netbox.netbox.nb_inventory
compose:
foo: last_updated
bar: display_name
nested_variable: rack.display_name
# You can use keyed_groups to group on properties of devices or VMs.
# NOTE: It's only possible to key off direct items on the device/VM objects.
plugin: netbox.netbox.nb_inventory
keyed_groups:
- prefix: status
key: status.value
# For use in Ansible Tower (AWX), please see this blog from RedHat: https://www.ansible.com/blog/using-an-inventory-plugin-from-a-collection-in-ansible-tower
# The credential for NetBox will need to expose NETBOX_API and NETBOX_TOKEN as environment variables.
# Example Ansible Tower credential Input Configuration:
fields:
- id: NETBOX_API
type: string
label: NetBox Host URL
- id: NETBOX_TOKEN
type: string
label: NetBox API Token
secret: true
required:
- NETBOX_API
- NETBOX_TOKEN
# Example Ansible Tower credential Injector Configuration:
env:
NETBOX_API: '{{ NETBOX_API }}'
NETBOX_TOKEN: '{{ NETBOX_TOKEN }}'
"""
import json
import uuid
import math
import os
from copy import deepcopy
from functools import partial
from sys import version as python_version
from threading import Thread
from typing import Iterable
from itertools import chain
from collections import defaultdict
from ipaddress import ip_interface
from packaging import specifiers, version
from ansible.constants import DEFAULT_LOCAL_TMP
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib import error as urllib_error
from ansible.module_utils.six.moves.urllib.parse import urlencode
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = "netbox.netbox.nb_inventory"
def _fetch_information(self, url):
results = None
cache_key = self.get_cache_key(url)
# get the user's cache option to see if we should save the cache if it is changing
user_cache_setting = self.get_option("cache")
# read if the user has caching enabled and the cache isn't being refreshed
attempt_to_read_cache = user_cache_setting and self.use_cache
# attempt to read the cache if inventory isn't being refreshed and the user has caching enabled
if attempt_to_read_cache:
try:
results = self._cache[cache_key]
need_to_fetch = False
except KeyError:
# occurs if the cache_key is not in the cache or if the cache_key expired
# we need to fetch the URL now
need_to_fetch = True
else:
# not reading from cache so do fetch
need_to_fetch = True
if need_to_fetch:
self.display.v("Fetching: " + url)
try:
response = open_url(
url,
headers=self.headers,
timeout=self.timeout,
validate_certs=self.validate_certs,
follow_redirects=self.follow_redirects,
client_cert=self.cert,
client_key=self.key,
ca_path=self.ca_path,
)
except urllib_error.HTTPError as e:
"""This will return the response body when we encounter an error.
This is to help determine what might be the issue when encountering an error.
Please check issue #294 for more info.
"""
# Prevent inventory from failing completely if the token does not have the proper permissions for specific URLs
if e.code == 403:
self.display.display(
"Permission denied: {0}. This may impair functionality of the inventory plugin.".format(
url
),
color="red",
)
# Need to return mock response data that is empty to prevent any failures downstream
return {"results": [], "next": None}
raise AnsibleError(to_native(e.fp.read()))
try:
raw_data = to_text(response.read(), errors="surrogate_or_strict")
except UnicodeError:
raise AnsibleError(
"Incorrect encoding of fetched payload from NetBox API."
)
try:
results = json.loads(raw_data)
except ValueError:
raise AnsibleError("Incorrect JSON payload: %s" % raw_data)
# put result in cache if enabled
if user_cache_setting:
self._cache[cache_key] = results
return results
def get_resource_list(self, api_url):
"""Retrieves resource list from netbox API.
Returns:
A list of all resource from netbox API.
"""
if not api_url:
raise AnsibleError("Please check API URL in script configuration file.")
resources = []
# Handle pagination
while api_url:
api_output = self._fetch_information(api_url)
resources.extend(api_output["results"])
api_url = api_output["next"]
return resources
def get_resource_list_chunked(self, api_url, query_key, query_values):
# Make an API call for multiple specific IDs, like /api/ipam/ip-addresses?limit=0&device_id=1&device_id=2&device_id=3
# Drastically cuts down HTTP requests comnpared to 1 request per host, in the case where we don't want to fetch_all
# Make sure query_values is subscriptable
if not isinstance(query_values, list):
query_values = list(query_values)
def query_string(value, separator="&"):
return separator + query_key + "=" + str(value)
# Calculate how many queries we can do per API call to stay within max_url_length
largest_value = str(max(query_values, default=0)) # values are always id ints
length_per_value = len(query_string(largest_value))
chunk_size = math.floor((self.max_uri_length - len(api_url)) / length_per_value)
# Sanity check, for case where max_uri_length < (api_url + length_per_value)
if chunk_size < 1:
chunk_size = 1
if self.api_version in specifiers.SpecifierSet("~=2.6.0"):
# Issue netbox-community/netbox#3507 was fixed in v2.7.5
# If using NetBox v2.7.0-v2.7.4 will have to manually set max_uri_length to 0,
# but it's probably faster to keep fetch_all: True
# (You should really just upgrade your NetBox install)
chunk_size = 1
resources = []
for i in range(0, len(query_values), chunk_size):
chunk = query_values[i : i + chunk_size]
# process chunk of size <= chunk_size
url = api_url
for value in chunk:
url += query_string(value, "&" if "?" in url else "?")
resources.extend(self.get_resource_list(url))
return resources
@property
def group_extractors(self):
# List of group_by options and hostvars to extract
# Some keys are different depending on plurals option
extractors = {
"disk": self.extract_disk,
"memory": self.extract_memory,
"vcpus": self.extract_vcpus,
"status": self.extract_status,
"config_context": self.extract_config_context,
"local_context_data": self.extract_local_context_data,
"custom_fields": self.extract_custom_fields,
"region": self.extract_regions,
"cluster": self.extract_cluster,
"cluster_group": self.extract_cluster_group,
"cluster_type": self.extract_cluster_type,
"is_virtual": self.extract_is_virtual,
self._pluralize_group_by("site"): self.extract_site,
self._pluralize_group_by("tenant"): self.extract_tenant,
self._pluralize_group_by("rack"): self.extract_rack,
"rack_role": self.extract_rack_role,
self._pluralize_group_by("tag"): self.extract_tags,
self._pluralize_group_by("role"): self.extract_device_role,
self._pluralize_group_by("platform"): self.extract_platform,
self._pluralize_group_by("device_type"): self.extract_device_type,
self._pluralize_group_by("manufacturer"): self.extract_manufacturer,
}
# Locations were added in 2.11 replacing rack-groups.
if self.api_version >= version.parse("2.11"):
extractors.update(
{"location": self.extract_location,}
)
else:
extractors.update(
{"rack_group": self.extract_rack_group,}
)
if self.services:
extractors.update(
{"services": self.extract_services,}
)
if self.interfaces:
extractors.update(
{"interfaces": self.extract_interfaces,}
)
if self.interfaces or self.dns_name or self.ansible_host_dns_name:
extractors.update(
{"dns_name": self.extract_dns_name,}
)
return extractors
def _pluralize_group_by(self, group_by):
mapping = {
"site": "sites",
"tenant": "tenants",
"rack": "racks",
"tag": "tags",
"role": "device_roles",
"platform": "platforms",
"device_type": "device_types",
"manufacturer": "manufacturers",
}
if self.plurals:
mapped = mapping.get(group_by)
return mapped or group_by
else:
return group_by
def _pluralize(self, extracted_value):
# If plurals is enabled, wrap in a single-element list for backwards compatibility
if self.plurals:
return [extracted_value]
else:
return extracted_value
def _objects_array_following_parents(
self, initial_object_id, object_lookup, object_parent_lookup
):
objects = []
object_id = initial_object_id
# Keep looping until the object has no parent
while object_id is not None:
object_slug = object_lookup[object_id]
if object_slug in objects:
# Won't ever happen - defensively guard against infinite loop
break
objects.append(object_slug)
# Get the parent of this object
object_id = object_parent_lookup[object_id]
return objects
def extract_disk(self, host):
return host.get("disk")
def extract_vcpus(self, host):
return host.get("vcpus")
def extract_status(self, host):
return host["status"]
def extract_memory(self, host):
return host.get("memory")
def extract_platform(self, host):
try:
return self._pluralize(self.platforms_lookup[host["platform"]["id"]])
except Exception:
return
def extract_services(self, host):
try:
services_lookup = (
self.vm_services_lookup
if host["is_virtual"]
else self.device_services_lookup
)
return list(services_lookup[host["id"]].values())
except Exception:
return
def extract_device_type(self, host):
try:
return self._pluralize(self.device_types_lookup[host["device_type"]["id"]])
except Exception:
return
def extract_rack(self, host):
try:
return self._pluralize(self.racks_lookup[host["rack"]["id"]])
except Exception:
return
def extract_rack_group(self, host):
# A host may have a rack. A rack may have a rack_group. A rack_group may have a parent rack_group.
# Produce a list of rack_groups:
# - it will be empty if the device has no rack, or the rack has no rack_group
# - it will have 1 element if the rack's group has no parent
# - it will have multiple elements if the rack's group has a parent group
rack = host.get("rack", None)
if not isinstance(rack, dict):
# Device has no rack
return None
rack_id = rack.get("id", None)
if rack_id is None:
# Device has no rack
return None
return self._objects_array_following_parents(
initial_object_id=self.racks_group_lookup[rack_id],
object_lookup=self.rack_groups_lookup,
object_parent_lookup=self.rack_group_parent_lookup,
)
def extract_rack_role(self, host):
try:
return self.racks_role_lookup[host["rack"]["id"]]
except Exception:
return
def extract_site(self, host):
try:
return self._pluralize(self.sites_lookup[host["site"]["id"]])
except Exception:
return
def extract_tenant(self, host):
try:
return self._pluralize(self.tenants_lookup[host["tenant"]["id"]])
except Exception:
return
def extract_device_role(self, host):
try:
if "device_role" in host:
return self._pluralize(
self.device_roles_lookup[host["device_role"]["id"]]
)
elif "role" in host:
return self._pluralize(self.device_roles_lookup[host["role"]["id"]])
except Exception:
return
def extract_config_context(self, host):
try:
if self.flatten_config_context:
# Don't wrap in an array if we're about to flatten it to separate host vars
return host["config_context"]
else:
return self._pluralize(host["config_context"])
except Exception:
return
def extract_local_context_data(self, host):
try:
if self.flatten_local_context_data:
# Don't wrap in an array if we're about to flatten it to separate host vars
return host["local_context_data"]
else:
return self._pluralize(host["local_context_data"])
except Exception:
return
def extract_manufacturer(self, host):
try:
return self._pluralize(
self.manufacturers_lookup[host["device_type"]["manufacturer"]["id"]]
)
except Exception:
return
def extract_primary_ip(self, host):
try:
address = host["primary_ip"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_primary_ip4(self, host):
try:
address = host["primary_ip4"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_primary_ip6(self, host):
try:
address = host["primary_ip6"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_tags(self, host):
try:
tag_zero = host["tags"][0]
# Check the type of the first element in the "tags" array.
# If a dictionary (NetBox >= 2.9), return an array of tags' slugs.
if isinstance(tag_zero, dict):
return list(sub["slug"] for sub in host["tags"])
# If a string (NetBox <= 2.8), return the original "tags" array.
elif isinstance(tag_zero, str):
return host["tags"]
# If tag_zero fails definition (no tags), return the empty array.
except Exception:
return host["tags"]
def extract_interfaces(self, host):
try:
interfaces_lookup = (
self.vm_interfaces_lookup
if host["is_virtual"]
else self.device_interfaces_lookup
)
interfaces = deepcopy(list(interfaces_lookup[host["id"]].values()))
before_netbox_v29 = bool(self.ipaddresses_intf_lookup)
# Attach IP Addresses to their interface
for interface in interfaces:
if before_netbox_v29:
interface["ip_addresses"] = list(
self.ipaddresses_intf_lookup[interface["id"]].values()
)
else:
interface["ip_addresses"] = list(
self.vm_ipaddresses_intf_lookup[interface["id"]].values()
if host["is_virtual"]
else self.device_ipaddresses_intf_lookup[
interface["id"]
].values()
)
interface["tags"] = list(sub["slug"] for sub in interface["tags"])
return interfaces
except Exception:
return
def extract_custom_fields(self, host):
try:
return host["custom_fields"]
except Exception:
return
def extract_regions(self, host):
# A host may have a site. A site may have a region. A region may have a parent region.
# Produce a list of regions:
# - it will be empty if the device has no site, or the site has no region set
# - it will have 1 element if the site's region has no parent
# - it will have multiple elements if the site's region has a parent region
site = host.get("site", None)
if not isinstance(site, dict):
# Device has no site
return []
site_id = site.get("id", None)
if site_id is None:
# Device has no site
return []
return self._objects_array_following_parents(
initial_object_id=self.sites_region_lookup[site_id],
object_lookup=self.regions_lookup,
object_parent_lookup=self.regions_parent_lookup,
)
def extract_location(self, host):
# A host may have a location. A location may have a parent location.
# Produce a list of locations:
# - it will be empty if the device has no location
# - it will have 1 element if the device's location has no parent
# - it will have multiple elements if the location has a parent location
try:
location_id = host["location"]["id"]
except (KeyError, TypeError):
# Device has no location
return []
return self._objects_array_following_parents(
initial_object_id=location_id,
object_lookup=self.locations_lookup,
object_parent_lookup=self.locations_parent_lookup,
)
def extract_cluster(self, host):
try:
# cluster does not have a slug
return host["cluster"]["name"]
except Exception:
return
def extract_cluster_group(self, host):
try:
return self.clusters_group_lookup[host["cluster"]["id"]]
except Exception:
return
def extract_cluster_type(self, host):
try:
return self.clusters_type_lookup[host["cluster"]["id"]]
except Exception:
return
def extract_is_virtual(self, host):
return host.get("is_virtual")
def extract_dns_name(self, host):
# No primary IP assigned
if not host.get("primary_ip"):
return None
before_netbox_v29 = bool(self.ipaddresses_lookup)
if before_netbox_v29:
ip_address = self.ipaddresses_lookup.get(host["primary_ip"]["id"])
else:
if host["is_virtual"]:
ip_address = self.vm_ipaddresses_lookup.get(host["primary_ip"]["id"])
else:
ip_address = self.device_ipaddresses_lookup.get(
host["primary_ip"]["id"]
)
# Don"t assign a host_var for empty dns_name
if ip_address.get("dns_name") == "":
return None
return ip_address.get("dns_name")
def refresh_platforms_lookup(self):
url = self.api_endpoint + "/api/dcim/platforms/?limit=0"
platforms = self.get_resource_list(api_url=url)
self.platforms_lookup = dict(
(platform["id"], platform["slug"]) for platform in platforms
)
def refresh_sites_lookup(self):
url = self.api_endpoint + "/api/dcim/sites/?limit=0"
sites = self.get_resource_list(api_url=url)
self.sites_lookup = dict((site["id"], site["slug"]) for site in sites)
def get_region_for_site(site):
# Will fail if site does not have a region defined in NetBox
try:
return (site["id"], site["region"]["id"])
except Exception:
return (site["id"], None)
# Dictionary of site id to region id
self.sites_region_lookup = dict(map(get_region_for_site, sites))
def refresh_regions_lookup(self):
url = self.api_endpoint + "/api/dcim/regions/?limit=0"
regions = self.get_resource_list(api_url=url)
self.regions_lookup = dict((region["id"], region["slug"]) for region in regions)
def get_region_parent(region):
# Will fail if region does not have a parent region
try:
return (region["id"], region["parent"]["id"])
except Exception:
return (region["id"], None)
# Dictionary of region id to parent region id
self.regions_parent_lookup = dict(
filter(lambda x: x is not None, map(get_region_parent, regions))
)
def refresh_locations_lookup(self):
# Locations were added in v2.11. Return empty lookups for previous versions.
if self.api_version < version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/locations/?limit=0"
locations = self.get_resource_list(api_url=url)
self.locations_lookup = dict(
(location["id"], location["slug"]) for location in locations
)
def get_location_parent(location):
# Will fail if location does not have a parent location
try:
return (location["id"], location["parent"]["id"])
except Exception:
return (location["id"], None)
def get_location_site(location):
# Locations MUST be assigned to a site
return (location["id"], location["site"]["id"])
# Dictionary of location id to parent location id
self.locations_parent_lookup = dict(
filter(None, map(get_location_parent, locations))
)
# Location to site lookup
self.locations_site_lookup = dict(map(get_location_site, locations))
def refresh_tenants_lookup(self):
url = self.api_endpoint + "/api/tenancy/tenants/?limit=0"
tenants = self.get_resource_list(api_url=url)
self.tenants_lookup = dict((tenant["id"], tenant["slug"]) for tenant in tenants)
def refresh_racks_lookup(self):
url = self.api_endpoint + "/api/dcim/racks/?limit=0"
racks = self.get_resource_list(api_url=url)
self.racks_lookup = dict((rack["id"], rack["name"]) for rack in racks)
def get_group_for_rack(rack):
try:
return (rack["id"], rack["group"]["id"])
except Exception:
return (rack["id"], None)
def get_role_for_rack(rack):
try:
return (rack["id"], rack["role"]["slug"])
except Exception:
return (rack["id"], None)
self.racks_group_lookup = dict(map(get_group_for_rack, racks))
self.racks_role_lookup = dict(map(get_role_for_rack, racks))
def refresh_rack_groups_lookup(self):
# Locations were added in v2.11 replacing rack groups. Do nothing for 2.11+
if self.api_version >= version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/rack-groups/?limit=0"
rack_groups = self.get_resource_list(api_url=url)
self.rack_groups_lookup = dict(
(rack_group["id"], rack_group["slug"]) for rack_group in rack_groups
)
def get_rack_group_parent(rack_group):
try:
return (rack_group["id"], rack_group["parent"]["id"])
except Exception:
return (rack_group["id"], None)
# Dictionary of rack group id to parent rack group id
self.rack_group_parent_lookup = dict(map(get_rack_group_parent, rack_groups))
def refresh_device_roles_lookup(self):
url = self.api_endpoint + "/api/dcim/device-roles/?limit=0"
device_roles = self.get_resource_list(api_url=url)
self.device_roles_lookup = dict(
(device_role["id"], device_role["slug"]) for device_role in device_roles
)
def refresh_device_types_lookup(self):
url = self.api_endpoint + "/api/dcim/device-types/?limit=0"
device_types = self.get_resource_list(api_url=url)
self.device_types_lookup = dict(
(device_type["id"], device_type["slug"]) for device_type in device_types
)
def refresh_manufacturers_lookup(self):
url = self.api_endpoint + "/api/dcim/manufacturers/?limit=0"
manufacturers = self.get_resource_list(api_url=url)
self.manufacturers_lookup = dict(
(manufacturer["id"], manufacturer["slug"]) for manufacturer in manufacturers
)
def refresh_clusters_lookup(self):
url = self.api_endpoint + "/api/virtualization/clusters/?limit=0"
clusters = self.get_resource_list(api_url=url)
def get_cluster_type(cluster):
# Will fail if cluster does not have a type (required property so should always be true)
try:
return (cluster["id"], cluster["type"]["slug"])
except Exception:
return (cluster["id"], None)
def get_cluster_group(cluster):
# Will fail if cluster does not have a group (group is optional)
try:
return (cluster["id"], cluster["group"]["slug"])
except Exception:
return (cluster["id"], None)
self.clusters_type_lookup = dict(map(get_cluster_type, clusters))
self.clusters_group_lookup = dict(map(get_cluster_group, clusters))
def refresh_services(self):
url = self.api_endpoint + "/api/ipam/services/?limit=0"
services = []
if self.fetch_all:
services = self.get_resource_list(url)
else:
device_services = self.get_resource_list_chunked(
api_url=url,
query_key="device_id",
query_values=self.devices_lookup.keys(),
)
vm_services = self.get_resource_list_chunked(
api_url=url,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
services = chain(device_services, vm_services)
# Construct a dictionary of dictionaries, separately for devices and vms.
# Allows looking up services by device id or vm id
self.device_services_lookup = defaultdict(dict)
self.vm_services_lookup = defaultdict(dict)
for service in services:
service_id = service["id"]
if service.get("device"):
self.device_services_lookup[service["device"]["id"]][
service_id
] = service
if service.get("virtual_machine"):
self.vm_services_lookup[service["virtual_machine"]["id"]][
service_id
] = service
def refresh_interfaces(self):
url_device_interfaces = self.api_endpoint + "/api/dcim/interfaces/?limit=0"
url_vm_interfaces = (
self.api_endpoint + "/api/virtualization/interfaces/?limit=0"
)
device_interfaces = []
vm_interfaces = []
if self.fetch_all:
device_interfaces = self.get_resource_list(url_device_interfaces)
vm_interfaces = self.get_resource_list(url_vm_interfaces)
else:
device_interfaces = self.get_resource_list_chunked(
api_url=url_device_interfaces,
query_key="device_id",
query_values=self.devices_lookup.keys(),
)
vm_interfaces = self.get_resource_list_chunked(
api_url=url_vm_interfaces,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
# Construct a dictionary of dictionaries, separately for devices and vms.
# For a given device id or vm id, get a lookup of interface id to interface
# This is because interfaces may be returned multiple times when querying for virtual chassis parent and child in separate queries
self.device_interfaces_lookup = defaultdict(dict)
self.vm_interfaces_lookup = defaultdict(dict)
# /dcim/interfaces gives count_ipaddresses per interface. /virtualization/interfaces does not
self.devices_with_ips = set()
for interface in device_interfaces:
interface_id = interface["id"]
device_id = interface["device"]["id"]
# Check if device_id is actually a device we've fetched, and was not filtered out by query_filters
if device_id not in self.devices_lookup:
continue
# Check if device_id is part of a virtual chasis
# If so, treat its interfaces as actually part of the master
device = self.devices_lookup[device_id]
virtual_chassis_master = self._get_host_virtual_chassis_master(device)
if virtual_chassis_master is not None:
device_id = virtual_chassis_master
self.device_interfaces_lookup[device_id][interface_id] = interface
# Keep track of what devices have interfaces with IPs, so if fetch_all is False we can avoid unnecessary queries
if interface["count_ipaddresses"] > 0:
self.devices_with_ips.add(device_id)
for interface in vm_interfaces:
interface_id = interface["id"]
vm_id = interface["virtual_machine"]["id"]
self.vm_interfaces_lookup[vm_id][interface_id] = interface
# Note: depends on the result of refresh_interfaces for self.devices_with_ips
def refresh_ipaddresses(self):
url = (
self.api_endpoint
+ "/api/ipam/ip-addresses/?limit=0&assigned_to_interface=true"
)
ipaddresses = []
if self.fetch_all:
ipaddresses = self.get_resource_list(url)
else:
device_ips = self.get_resource_list_chunked(
api_url=url,
query_key="device_id",
query_values=list(self.devices_with_ips),
)
vm_ips = self.get_resource_list_chunked(
api_url=url,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
ipaddresses = chain(device_ips, vm_ips)
# Construct a dictionary of lists, to allow looking up ip addresses by interface id
# Note that interface ids share the same namespace for both devices and vms so this is a single dictionary
self.ipaddresses_intf_lookup = defaultdict(dict)
# Construct a dictionary of the IP addresses themselves
self.ipaddresses_lookup = defaultdict(dict)
# NetBox v2.9 and onwards
self.vm_ipaddresses_intf_lookup = defaultdict(dict)
self.vm_ipaddresses_lookup = defaultdict(dict)
self.device_ipaddresses_intf_lookup = defaultdict(dict)
self.device_ipaddresses_lookup = defaultdict(dict)
for ipaddress in ipaddresses:
# As of NetBox v2.9 "assigned_object_x" replaces "interface"
if ipaddress.get("assigned_object_id"):
interface_id = ipaddress["assigned_object_id"]
ip_id = ipaddress["id"]
# We need to copy the ipaddress entry to preserve the original in case caching is used.
ipaddress_copy = ipaddress.copy()
if ipaddress["assigned_object_type"] == "virtualization.vminterface":
self.vm_ipaddresses_lookup[ip_id] = ipaddress_copy
self.vm_ipaddresses_intf_lookup[interface_id][
ip_id
] = ipaddress_copy
else:
self.device_ipaddresses_lookup[ip_id] = ipaddress_copy
self.device_ipaddresses_intf_lookup[interface_id][
ip_id
] = ipaddress_copy # Remove "assigned_object_X" attributes, as that's redundant when ipaddress is added to an interface
del ipaddress_copy["assigned_object_id"]
del ipaddress_copy["assigned_object_type"]
del ipaddress_copy["assigned_object"]
continue
if not ipaddress.get("interface"):
continue
interface_id = ipaddress["interface"]["id"]
ip_id = ipaddress["id"]
# We need to copy the ipaddress entry to preserve the original in case caching is used.
ipaddress_copy = ipaddress.copy()
self.ipaddresses_intf_lookup[interface_id][ip_id] = ipaddress_copy
self.ipaddresses_lookup[ip_id] = ipaddress_copy
# Remove "interface" attribute, as that's redundant when ipaddress is added to an interface
del ipaddress_copy["interface"]
@property
def lookup_processes(self):
lookups = [
self.refresh_sites_lookup,
self.refresh_regions_lookup,
self.refresh_locations_lookup,
self.refresh_tenants_lookup,
self.refresh_racks_lookup,
self.refresh_rack_groups_lookup,
self.refresh_device_roles_lookup,
self.refresh_platforms_lookup,
self.refresh_device_types_lookup,
self.refresh_manufacturers_lookup,
self.refresh_clusters_lookup,
]
if self.interfaces:
lookups.append(self.refresh_interfaces)
if self.services:
lookups.append(self.refresh_services)
return lookups
@property
def lookup_processes_secondary(self):
lookups = []
# IP addresses are needed for either interfaces or dns_name options
if self.interfaces or self.dns_name or self.ansible_host_dns_name:
lookups.append(self.refresh_ipaddresses)
return lookups
def refresh_lookups(self, lookups):
# Exceptions that occur in threads by default are printed to stderr, and ignored by the main thread
# They need to be caught, and raised in the main thread to prevent further execution of this plugin
thread_exceptions = []
def handle_thread_exceptions(lookup):
def wrapper():
try:
lookup()
except Exception as e:
# Save for the main-thread to re-raise
# Also continue to raise on this thread, so the default handler can run to print to stderr
thread_exceptions.append(e)
raise e
return wrapper
thread_list = []
try:
for lookup in lookups:
thread = Thread(target=handle_thread_exceptions(lookup))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
# Wait till we've joined all threads before raising any exceptions
for exception in thread_exceptions:
raise exception
finally:
# Avoid retain cycles
thread_exceptions = None
def fetch_api_docs(self):
try:
status = self._fetch_information(self.api_endpoint + "/api/status")
netbox_api_version = ".".join(status["netbox-version"].split(".")[:2])
except:
netbox_api_version = 0
tmp_dir = os.path.split(DEFAULT_LOCAL_TMP)[0]
tmp_file = os.path.join(tmp_dir, "netbox_api_dump.json")
try:
with open(tmp_file) as file:
openapi = json.load(file)
except:
openapi = {}
cached_api_version = openapi.get("info", {}).get("version")
if netbox_api_version != cached_api_version:
openapi = self._fetch_information(
self.api_endpoint + "/api/docs/?format=openapi"
)
with open(tmp_file, "w") as file:
json.dump(openapi, file)
self.api_version = version.parse(openapi["info"]["version"])
self.allowed_device_query_parameters = [
p["name"] for p in openapi["paths"]["/dcim/devices/"]["get"]["parameters"]
]
self.allowed_vm_query_parameters = [
p["name"]
for p in openapi["paths"]["/virtualization/virtual-machines/"]["get"][
"parameters"
]
]
def validate_query_parameter(self, parameter, allowed_query_parameters):
if not (isinstance(parameter, dict) and len(parameter) == 1):
self.display.warning(
"Warning query parameters %s not a dict with a single key." % parameter
)
return None
k = tuple(parameter.keys())[0]
v = tuple(parameter.values())[0]
if not (k in allowed_query_parameters or k.startswith("cf_")):
msg = "Warning: %s not in %s or starting with cf (Custom field)" % (
k,
allowed_query_parameters,
)
self.display.warning(msg=msg)
return None
return k, v
def filter_query_parameters(self, parameters, allowed_query_parameters):
return filter(
lambda parameter: parameter is not None,
# For each element of query_filters, test if it's allowed
map(
# Create a partial function with the device-specific list of query parameters
partial(
self.validate_query_parameter,
allowed_query_parameters=allowed_query_parameters,
),
parameters,
),
)
def refresh_url(self):
device_query_parameters = [("limit", 0)]
vm_query_parameters = [("limit", 0)]
device_url = self.api_endpoint + "/api/dcim/devices/?"
vm_url = self.api_endpoint + "/api/virtualization/virtual-machines/?"
# Add query_filtes to both devices and vms query, if they're valid
if isinstance(self.query_filters, Iterable):
device_query_parameters.extend(
self.filter_query_parameters(
self.query_filters, self.allowed_device_query_parameters
)
)
vm_query_parameters.extend(
self.filter_query_parameters(
self.query_filters, self.allowed_vm_query_parameters
)
)
if isinstance(self.device_query_filters, Iterable):
device_query_parameters.extend(
self.filter_query_parameters(
self.device_query_filters, self.allowed_device_query_parameters
)
)
if isinstance(self.vm_query_filters, Iterable):
vm_query_parameters.extend(
self.filter_query_parameters(
self.vm_query_filters, self.allowed_vm_query_parameters
)
)
# When query_filters is Iterable, and is not empty:
# - If none of the filters are valid for devices, do not fetch any devices
# - If none of the filters are valid for VMs, do not fetch any VMs
# If either device_query_filters or vm_query_filters are set,
# device_query_parameters and vm_query_parameters will have > 1 element so will continue to be requested
if self.query_filters and isinstance(self.query_filters, Iterable):
if len(device_query_parameters) <= 1:
device_url = None
if len(vm_query_parameters) <= 1:
vm_url = None
# Append the parameters to the URLs
if device_url:
device_url = device_url + urlencode(device_query_parameters)
if vm_url:
vm_url = vm_url + urlencode(vm_query_parameters)
# Exclude config_context if not required
if not self.config_context:
if device_url:
device_url = device_url + "&exclude=config_context"
if vm_url:
vm_url = vm_url + "&exclude=config_context"
return device_url, vm_url
def fetch_hosts(self):
device_url, vm_url = self.refresh_url()
self.devices_list = []
self.vms_list = []
if device_url:
self.devices_list = self.get_resource_list(device_url)
if vm_url:
self.vms_list = self.get_resource_list(vm_url)
# Allow looking up devices/vms by their ids
self.devices_lookup = {device["id"]: device for device in self.devices_list}
self.vms_lookup = {vm["id"]: vm for vm in self.vms_list}
# There's nothing that explicitly says if a host is virtual or not - add in a new field
for host in self.devices_list:
host["is_virtual"] = False
for host in self.vms_list:
host["is_virtual"] = True
def extract_name(self, host):
# An host in an Ansible inventory requires an hostname.
# name is an unique but not required attribute for a device in NetBox
# We default to an UUID for hostname in case the name is not set in NetBox
# Use virtual chassis name if set by the user.
if self.virtual_chassis_name and self._get_host_virtual_chassis_master(host):
return host["virtual_chassis"]["name"] or str(uuid.uuid4())
else:
return host["name"] or str(uuid.uuid4())
def generate_group_name(self, grouping, group):
# Check for special case - if group is a boolean, just return grouping name instead
# eg. "is_virtual" - returns true for VMs, should put them in a group named "is_virtual", not "is_virtual_True"
if isinstance(group, bool):
if group:
return grouping
else:
# Don't create the inverse group
return None
# Special case. Extract name from service, which is a hash.
if grouping == "services":
group = group["name"]
grouping = "service"
if grouping == "status":
group = group["value"]
if self.group_names_raw:
return group
else:
return "_".join([grouping, group])
def add_host_to_groups(self, host, hostname):
site_group_by = self._pluralize_group_by("site")
for grouping in self.group_by:
# Don't handle regions here since no hosts are ever added to region groups
# Sites and locations are also specially handled in the main()
if grouping in ["region", site_group_by, "location"]:
continue
if grouping not in self.group_extractors:
raise AnsibleError(
'group_by option "%s" is not valid. Check group_by documentation or check the plurals option. It can determine what group_by options are valid.'
% grouping
)
groups_for_host = self.group_extractors[grouping](host)
if not groups_for_host:
continue
# Make groups_for_host a list if it isn't already
if not isinstance(groups_for_host, list):
groups_for_host = [groups_for_host]
for group_for_host in groups_for_host:
group_name = self.generate_group_name(grouping, group_for_host)
if not group_name:
continue
# Group names may be transformed by the ansible TRANSFORM_INVALID_GROUP_CHARS setting
# add_group returns the actual group name used
transformed_group_name = self.inventory.add_group(group=group_name)
self.inventory.add_host(group=transformed_group_name, host=hostname)
def _add_site_groups(self):
# Map site id to transformed group names
self.site_group_names = dict()
for site_id, site_name in self.sites_lookup.items():
site_group_name = self.generate_group_name(
self._pluralize_group_by("site"), site_name
)
# Add the site group to get its transformed name
site_transformed_group_name = self.inventory.add_group(
group=site_group_name
)
self.site_group_names[site_id] = site_transformed_group_name
def _add_region_groups(self):
# Mapping of region id to group name
region_transformed_group_names = self._setup_nested_groups(
"region", self.regions_lookup, self.regions_parent_lookup
)
# Add site groups as children of region groups
for site_id in self.sites_lookup:
region_id = self.sites_region_lookup.get(site_id, None)
if region_id is None:
continue
self.inventory.add_child(
region_transformed_group_names[region_id],
self.site_group_names[site_id],
)
def _add_location_groups(self):
# Mapping of location id to group name
self.location_group_names = self._setup_nested_groups(
"location", self.locations_lookup, self.locations_parent_lookup
)
# Add location to site groups as children
for location_id, location_slug in self.locations_lookup.items():
if self.locations_parent_lookup.get(location_id, None):
# Only top level locations should be children of sites
continue
site_transformed_group_name = self.site_group_names[
self.locations_site_lookup[location_id]
]
self.inventory.add_child(
site_transformed_group_name, self.location_group_names[location_id]
)
def _setup_nested_groups(self, group, lookup, parent_lookup):
# Mapping of id to group name
transformed_group_names = dict()
# Create groups for each object
for obj_id in lookup:
group_name = self.generate_group_name(group, lookup[obj_id])
transformed_group_names[obj_id] = self.inventory.add_group(group=group_name)
# Now that all groups exist, add relationships between them
for obj_id in lookup:
group_name = transformed_group_names[obj_id]
parent_id = parent_lookup.get(obj_id, None)
if parent_id is not None and parent_id in transformed_group_names:
parent_name = transformed_group_names[parent_id]
self.inventory.add_child(parent_name, group_name)
return transformed_group_names
def _fill_host_variables(self, host, hostname):
extracted_primary_ip = self.extract_primary_ip(host=host)
if extracted_primary_ip:
self.inventory.set_variable(hostname, "ansible_host", extracted_primary_ip)
if self.ansible_host_dns_name:
extracted_dns_name = self.extract_dns_name(host=host)
if extracted_dns_name:
self.inventory.set_variable(
hostname, "ansible_host", extracted_dns_name
)
extracted_primary_ip4 = self.extract_primary_ip4(host=host)
if extracted_primary_ip4:
self.inventory.set_variable(hostname, "primary_ip4", extracted_primary_ip4)
extracted_primary_ip6 = self.extract_primary_ip6(host=host)
if extracted_primary_ip6:
self.inventory.set_variable(hostname, "primary_ip6", extracted_primary_ip6)
for attribute, extractor in self.group_extractors.items():
extracted_value = extractor(host)
# Compare with None, not just check for a truth comparison - allow empty arrays, etc to be host vars
if extracted_value is None:
continue
# Special case - all group_by options are single strings, but tag is a list of tags
# Keep the groups named singular "tag_sometag", but host attribute should be "tags":["sometag", "someothertag"]
if attribute == "tag":
attribute = "tags"
if attribute == "region":
attribute = "regions"
if attribute == "location":
attribute = "locations"
if attribute == "rack_group":
attribute = "rack_groups"
# Flatten the dict into separate host vars, if enabled
if isinstance(extracted_value, dict) and (
(attribute == "config_context" and self.flatten_config_context)
or (attribute == "custom_fields" and self.flatten_custom_fields)
or (
attribute == "local_context_data"
and self.flatten_local_context_data
)
):
for key, value in extracted_value.items():
self.inventory.set_variable(hostname, key, value)
else:
self.inventory.set_variable(hostname, attribute, extracted_value)
def _get_host_virtual_chassis_master(self, host):
virtual_chassis = host.get("virtual_chassis", None)
if not virtual_chassis:
return None
master = virtual_chassis.get("master", None)
if not master:
return None
return master.get("id", None)
def main(self):
# Get info about the API - version, allowed query parameters
self.fetch_api_docs()
self.fetch_hosts()
# Interface, and Service lookup will depend on hosts, if option fetch_all is false
self.refresh_lookups(self.lookup_processes)
# Looking up IP Addresses depends on the result of interfaces count_ipaddresses field
# - can skip any device/vm without any IPs
self.refresh_lookups(self.lookup_processes_secondary)
# If we're grouping by regions, hosts are not added to region groups
# If we're grouping by locations, hosts may be added to the site or location
# - the site groups are added as sub-groups of regions
# - the location groups are added as sub-groups of sites
# So, we need to make sure we're also grouping by sites if regions or locations are enabled
site_group_by = self._pluralize_group_by("site")
if (
site_group_by in self.group_by
or "location" in self.group_by
or "region" in self.group_by
):
self._add_site_groups()
# Create groups for locations. Will be a part of site groups.
if "location" in self.group_by and self.api_version >= version.parse("2.11"):
self._add_location_groups()
# Create groups for regions, containing the site groups
if "region" in self.group_by:
self._add_region_groups()
for host in chain(self.devices_list, self.vms_list):
virtual_chassis_master = self._get_host_virtual_chassis_master(host)
if (
virtual_chassis_master is not None
and virtual_chassis_master != host["id"]
):
# Device is part of a virtual chassis, but is not the master
continue
hostname = self.extract_name(host=host)
self.inventory.add_host(host=hostname)
self._fill_host_variables(host=host, hostname=hostname)
strict = self.get_option("strict")
# Composed variables
self._set_composite_vars(
self.get_option("compose"), host, hostname, strict=strict
)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(
self.get_option("groups"), host, hostname, strict=strict
)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(
self.get_option("keyed_groups"), host, hostname, strict=strict
)
self.add_host_to_groups(host=host, hostname=hostname)
# Special processing for sites and locations as those groups were already created
if getattr(self, "location_group_names", None) and host.get("location"):
# Add host to location group when host is assigned to the location
self.inventory.add_host(
group=self.location_group_names[host["location"]["id"]],
host=hostname,
)
elif getattr(self, "site_group_names", None) and host.get("site"):
# Add host to site group when host is NOT assigned to a location
self.inventory.add_host(
group=self.site_group_names[host["site"]["id"]], host=hostname,
)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
self.use_cache = cache
# NetBox access
token = self.get_option("token")
# Handle extra "/" from api_endpoint configuration and trim if necessary, see PR#49943
self.api_endpoint = self.get_option("api_endpoint").strip("/")
self.timeout = self.get_option("timeout")
self.max_uri_length = self.get_option("max_uri_length")
self.validate_certs = self.get_option("validate_certs")
self.follow_redirects = self.get_option("follow_redirects")
self.config_context = self.get_option("config_context")
self.flatten_config_context = self.get_option("flatten_config_context")
self.flatten_local_context_data = self.get_option("flatten_local_context_data")
self.flatten_custom_fields = self.get_option("flatten_custom_fields")
self.plurals = self.get_option("plurals")
self.interfaces = self.get_option("interfaces")
self.services = self.get_option("services")
self.fetch_all = self.get_option("fetch_all")
self.headers = {
"User-Agent": "ansible %s Python %s"
% (ansible_version, python_version.split(" ")[0]),
"Content-type": "application/json",
}
self.cert = self.get_option("cert")
self.key = self.get_option("key")
self.ca_path = self.get_option("ca_path")
if token:
self.headers.update({"Authorization": "Token %s" % token})
# Filter and group_by options
self.group_by = self.get_option("group_by")
self.group_names_raw = self.get_option("group_names_raw")
self.query_filters = self.get_option("query_filters")
self.device_query_filters = self.get_option("device_query_filters")
self.vm_query_filters = self.get_option("vm_query_filters")
self.virtual_chassis_name = self.get_option("virtual_chassis_name")
self.dns_name = self.get_option("dns_name")
self.ansible_host_dns_name = self.get_option("ansible_host_dns_name")
self.main()
|
qt_visualizer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Visualize the drone position using SPIRIT.
"""
from __future__ import division
from collections import deque
import os
import time
import threading
import numpy as np
from OpenGL import GL, GLU, GLUT
from PyQt5 import QtGui
import pygame as pg
import rospkg
import rospy
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
from helpers import Pose, Fov, Quat, d2r, unit_vector
from opengl_helpers import (gl_font, gl_flag, gl_ortho, gl_primitive,
new_matrix, new_state, Shape)
os.chdir(rospkg.RosPack().get_path("spirit"))
# Convenience
gl = GL
glu = GLU
glut = GLUT
class Drone(Shape):
"""
A shape representing the drone.
This assumes that the drone is square-shaped, with a set height. An arrow is
drawn at the top of the drone, and coloured with navigation lights. (i.e.
there is a red light on the left, and a green light on the right.)
Parameters
----------
size : Optional[float]
The side of each of the drone's square sides. Default is 50 cm.
height : Optional[float]
The height of the drone. Default is 15 cm.
Attributes
----------
vertices
colours
edges
surfaces
arrow_vertices : Sequence[Sequence[float]]
A sequence of 3D coordinates representing the vertices on the arrow.
arrow_colours : Sequence[Sequence[float]]
A sequence of RGB values between 0 and 1, assigned to arrow vertices.
arrow_edges : Sequence[Sequence[int]]
A sequence of 2-tuple representing the indices of the arrow vertices to
be joined.
arrow_surfaces : Sequence[Sequence[int]]
A sequence of the list of indices of vertices forming the arrow, in
order.
See Also
--------
Shape
"""
def __init__(self, size=0.3, height=0.13):
offset = [0, 0, size]
vertices = np.array([
(1, -1, -1), (1, 1, -1),
(-1, 1, -1), (-1, -1, -1),
(1, -1, 1), (1, 1, 1),
(-1, -1, 1), (-1, 1, 1),
]) * size
vertices += offset
vertices[:, 1] *= height
colours = (
(0.5, 0.5, 0.5),
)
edges = (
(0, 1), (0, 3), (0, 4),
(2, 1), (2, 3), (2, 7),
(6, 3), (6, 4), (6, 7),
(5, 1), (5, 4), (5, 7),
)
surfaces = (
# (0, 1, 2, 3),
# (3, 2, 7, 6),
# (6, 7, 5, 4),
# (4, 5, 1, 0),
# (1, 5, 7, 2),
# (4, 0, 3, 6),
)
super(Drone, self).__init__(vertices, colours, edges, surfaces)
self.arrow_vertices = np.array([
(-1, 1, 1), (0, 1, -1), (1, 1, 1), (0, 1, 0),
(-1, -1, 1), (0, -1, -1), (1, -1, 1), (0, -1, 0),
]) * size
self.arrow_vertices += offset
self.arrow_vertices[:, 1] *= height
self.arrow_colours = (
(1, 0, 0), # Red on left
(1, 1, 1), # White in front
(0, 1, 0), # Green on right
(1, 0.5, 0) # Orange in back
)
self.arrow_edges = (
(0, 1), (1, 2), (2, 3), (0, 3),
)
self.arrow_surfaces = (
(0, 1, 2, 3),
)
def draw(self, quaternion=(0, 0, 0, 1), edge_colour=(1, 1, 1)):
"""
Draw the drone.
Parameters
----------
quaternion : Optional[Sequence[float]]
The x, y, z, and w quaternion of the pose. Default is no rotation.
edge_colour : Optional[Sequence[float]]
The colour to draw the edges in. Default is white.
"""
super(Drone, self).draw(quaternion, edge_colour)
# Draw arrow
with new_matrix():
gl.glRotate(*Quat.to_axis(quaternion))
self._draw_components(self.arrow_vertices, self.arrow_colours,
self.arrow_edges, self.arrow_surfaces,
edge_colour)
class TexturesBase(object):
"""
Implements methods which allow usage of textures.
Attributes
----------
textures : Sequence[gl.GLuint]
A list of usable textures.
"""
def setup_textures(self):
"""
Set up texture variables.
"""
self.textures = deque(maxlen=2)
self._latest_texture = deque(maxlen=1)
def add_textures(self, *images):
"""
Add images to the list of usable textures.
Parameters
----------
images : Sequence[str | Image]
A list of filenames or images to load.
Raises
------
pygame.error
If the image cannot be loaded, or if the image format is not
supported.
TypeError
If the input type is unsupported.
"""
for texture_data, width, height in self.load_images(images):
self._latest_texture.append((texture_data, width, height))
def select_texture(self, texture_number=1):
"""
Bind a known texture for use.
Parameters
----------
texture_number : Optional[int]
The number of the texture, by the order it was added. Default is the
latest texture.
Raises
------
IndexError
If `texture_number` is larger than the number of available textures.
"""
gl.glBindTexture(gl.GL_TEXTURE_2D, self.textures[texture_number])
def init_texture(self, texture_data, width, height, texture_number=1):
"""
Initialize a texture for first use.
Parameters
----------
texture_data : Sequence
The image data.
width : int
The width of the image.
height : int
The height of the image.
texture_number : Optional[int]
The number of the texture, by the order it was added. Default is the
latest texture.
Raises
------
IndexError
If `texture_number` is larger than the number of available textures.
"""
self.textures.append(gl.glGenTextures(1))
self.select_texture(texture_number)
gl.glTexParameter(target=gl.GL_TEXTURE_2D,
pname=gl.GL_TEXTURE_MIN_FILTER,
parameter=gl.GL_LINEAR)
# Implementation does not accept kwargs. Order is target, level,
# internalFormat, width, height, border, format, type, and pixels.
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, width, height, 0,
gl.GL_RGB, gl.GL_UNSIGNED_BYTE, texture_data)
def update_texture(self, texture_data, width, height, texture_number=1):
"""
Update a known texture.
Parameters
----------
texture_data : Sequence
The image data.
width : int
The width of the image.
height : int
The height of the image.
texture_number : Optional[int]
The number of the texture, by the order it was added. Default is the
latest texture.
Raises
------
IndexError
If `texture_number` is larger than the number of available textures.
"""
self.select_texture(texture_number)
# Implementation does not accept kwargs. Order is target, level,
# xoffset, yoffset, width, height, format, type, and pixels.
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, width, height,
gl.GL_RGB, gl.GL_UNSIGNED_BYTE, texture_data)
def load_images(self, images):
"""
Load images.
Parameters
----------
images : Sequence[str | Image]
A list of filenames or images to load.
Yields
------
str
The image data.
int
The width of the image.
int
The height of the image.
Raises
------
pygame.error
If the image cannot be loaded, or if the image format is not
supported.
TypeError
If the input type is unsupported.
"""
for image in images:
if isinstance(image, str):
yield self._load_image_from_file(image)
elif isinstance(image, Image):
yield self._load_image_from_ros(image)
else:
raise TypeError("Cannot load image.")
@staticmethod
def _load_image_from_file(filename):
"""
Load image from file.
Parameters
----------
filename : str
The name of the file to be loaded.
Returns
-------
str
The image data.
int
The width of the image.
int
The height of the image.
Raises
------
pygame.error
If the image cannot be loaded, or if the image format is not
supported.
"""
img = pg.image.load(filename)
texture_data = pg.image.tostring(img, "RGB", True)
return texture_data, img.get_width(), img.get_height()
def _load_image_from_ros(self, image):
"""
Load image from a ROS topic.
Parameters
----------
image : Image
The Image to be loaded.
Returns
-------
str
The image data.
int
The width of the image.
int
The height of the image.
"""
qt_image = QtGui.QPixmap.fromImage(
QtGui.QImage(image.data, image.width, image.height,
QtGui.QImage.Format_RGB888))
return qt_image, image.width, image.height
class RendererBase(TexturesBase):
"""
Implements methods which enable rendering the scene.
Attributes
----------
textures
size : np.ndarray[int]
The width and height of the display, in pixels.
width : int
The width of the display, in pixels.
height : int
The height of the display, in pixels.
model : Shape
The model to draw.
distance : float | None
The distance at which to draw. If provided, the visualization can be
zoomed in or out.
fov_x : float
The horizontal field of view, in degrees.
fov_y : float
The vertical field of view, in degrees.
"""
def setup_renderer(self, size, model, distance,
fov_diagonal=None, fov_vertical=None):
"""
Set up rendering parameters.
`fov_vertical` and `fov_diagonal` are mutually exclusive. If neither is
specified, the default vertical field of view is set to 45 degrees.
Parameters
----------
size : Sequence[int]
The width and height of the display, in pixels.
model : Shape
The model to be drawn.
distance : float | None
The distance at which to draw. If provided, the visualization can be
zoomed in or out.
fov_vertical : Optional[float]
The vertical size of the field of view, in degrees.
fov_diagonal : Optional[float]
The diagonal size of the field of view, in degrees.
Raises
------
TypeError
If both `fov_vertical` and `fov_diagonal` are provided.
"""
if fov_diagonal and fov_vertical:
raise TypeError("Enter only one value for field of view size.")
self.size = self.width, self.height = np.asarray(size)
self.aspect_ratio = self.width / self.height
self.model = model
self.distance = distance
self.text = {}
self.pose_cam = self.pose_drone = None
if fov_vertical is not None:
self.fov_y = fov_vertical
elif fov_diagonal is not None:
self.fov_y = Fov.d2v(fov_diagonal, self.aspect_ratio)
else:
self.fov_y = 45
self.fov_x = Fov.v2h(self.fov_y, self.aspect_ratio)
self._image_distance = self.height / (2 * np.tan(d2r(self.fov_y) / 2))
def render(self, pose_cam, pose_drone):
"""
Render the scene.
Parameters
----------
pose_cam : Pose
The pose of the drone when the background image was taken.
pose_drone : Pose
The current pose of the drone.
"""
rel_pos, rot_cam, rot_drone = self._find_rel_pos(pose_cam, pose_drone)
rot_cam[0] *= -1
rot_cam[1], rot_cam[2] = rot_cam[2], rot_cam[1]
rot_drone[0] *= -1
rot_drone[1], rot_drone[2] = rot_drone[2], rot_drone[1]
# Temporarily turn off zooming.
if self.distance:
scale = np.linalg.norm(rel_pos) / self.distance
rel_pos = unit_vector(rel_pos) * self.distance
else:
scale = 1
centre = self._find_drone_on_image(rel_pos)
with new_matrix():
# Set camera orientation.
rot_cam[:3] *= -1 # z-axis is with respect to origin, not camera.
gl.glRotate(*Quat.to_axis(rot_cam))
# Set camera position.
# Convert position to OpenGL coordinate frame first.
rel_pos[1], rel_pos[2] = rel_pos[2], -rel_pos[1]
gl.glTranslate(*rel_pos)
self.draw_background(scale=scale, centre=centre)
self.model.draw(rot_drone)
# noinspection PyUnusedLocal
def draw_background(self, texture_number=1, scale=1, centre=None,
rotation=0):
"""
Draw the background image.
Parameters
----------
texture_number : Optional[int]
The number of the texture, by the order it was added. Default is
the latest texture added.
scale : Optional[float]
The amount of zoom applied to the image. Default is no zoom.
centre : Optional[tuple[int]]
The coordinates of the centre of zoom. Default is the centre of the
image.
rotation : Optional[float]
The amount of clockwise rotation, in degrees. Default is no
rotation.
"""
# TODO: Fix method for zoom.
def find_vertices(x, y):
# centre_x, centre_y = centre
# Temporarily turn off zooming
# scale = 1
centre_x, centre_y = self.size / 2
# Real zooming code.
vertex_x = self.width / 2 - scale * (centre_x - self.width * x)
vertex_y = self.height / 2 - scale * (centre_y - self.height * y)
return vertex_x, vertex_y
# Clear background
if texture_number != 0:
self.draw_background(texture_number=0)
try:
self.select_texture(texture_number)
if "no_texture" in self.text:
del self.text["no_texture"]
except IndexError:
self.text["no_texture"] = ("No textures yet", None, None, (1, 0, 0))
return
if centre is None:
centre = self.size / 2
with gl_flag(gl.GL_TEXTURE_2D):
with gl_ortho(self.width, self.height):
gl.glRotate(rotation, 0, 0, 1)
gl.glTranslate(-self.width / 2, -self.height / 2, 0)
with gl_primitive(gl.GL_QUADS):
for x, y in ((0, 0), (0, 1), (1, 1), (1, 0)):
gl.glTexCoord2f(x, y)
tx, ty = find_vertices(x, y)
gl.glVertex(tx, ty, 0)
def write_text(self, text, position=None, font=gl_font("fixed", 13),
colour=(0, 1, 0)):
"""
Write text on the screen.
Parameters
----------
text : str
The text to write.
position : Optional[Sequence[int]]
A sequence containing the horizontal and vertical positions, in
pixels, of the lower left pixel of the first line of the text.
Default is 40% of the screen right of centre, and 80% of the screen
above centre.
font : Optional[ctypes.c_void_p]
The font to use. Default is 13-point Fixed.
colour : Optional[Sequence[float]]
The text colour, as RGB values between 0 and 1. Default is green.
"""
if position is None:
x = self.width * 0.2
y = self.height * 0.4
else:
x, y = position
with gl_ortho(self.width, self.height):
with new_state():
gl.glColor3fv(colour)
gl.glRasterPos2f(x, y)
glut.glutBitmapString(font, text)
@staticmethod
def _find_rel_pos(pose_cam, pose_drone):
"""
Find the relative positions and orientations of the camera and the
drone.
Parameters
----------
pose_cam : Pose
The pose of the drone when the background image was taken.
pose_drone : Pose
The current pose of the drone.
Returns
-------
rel_pos : np.ndarray
A 3-array with the x, y, and z positions of the relative positions
of the drone, converted to the OpenGL coordinate system.
rot_cam : np.ndarray
A quaternion representing the orientation of the camera, in x, y, z,
w format.
rot_drone : np.ndarray
A quaternion representing the orientation of the drone, in x, y, z,
w format.
"""
rel_pos = pose_cam.position - pose_drone.position
rel_pos[2] *= -1
return rel_pos, pose_cam.orientation, pose_drone.orientation
def _find_drone_on_image(self, rel_pos):
"""
Find the location of the drone on the image.
Parameters
----------
rel_pos : np.ndarray
A 3-array with the x, y, and z positions of the relative positions
of the drone, without conversion.
Returns
-------
centre_x : float
The horizontal location of the drone, in pixels
centre_y : float
The vertical location of the drone, in pixels
"""
# TODO: Consider rotation of the camera
dx, dy, dz = rel_pos
centre_x = self._image_distance * dx / dy + self.width / 2
centre_y = self._image_distance * dz / dy + self.height / 2
return centre_x, centre_y
class Screen(RendererBase):
"""
Class for displaying and updating the screen.
`fov_vertical` and `fov_diagonal` are mutually exclusive. If neither is
specified, the default vertical field of view is set to 45 degrees.
Parameters
----------
size : Sequence[int]
The width and height of the display, in pixels.
model : Shape
The model to be drawn.
fov_vertical : Optional[float]
The vertical size of the field of view, in degrees.
fov_diagonal : Optional[float]
The diagonal size of the field of view, in degrees.
wait : Optional[int]
The time to wait before the next step, in milliseconds. Default is 10.
distance : Optional[float]
The distance at which to draw. If provided, the visualization can be
zoomed in or out. Default is to have no zoom.
Attributes
----------
textures
size
width
height
model
distance
fov_x
fov_y
wait : int
The time to wait before the next step, in milliseconds.
Raises
------
TypeError
If both `fov_vertical` and `fov_diagonal` are provided.
"""
def __init__(self, size, model, fov_vertical=None, fov_diagonal=None,
wait=10, distance=None):
# TODO: Make drone always horizontal? NO
# TODO: Keep drone in centre of image?
# TODO: Allow rotation of background?
# TODO: Keep image aligned with horizon?
# TODO: Zoom only in, or both in and out?
self.setup_textures()
self.setup_renderer(size, model, distance, fov_diagonal, fov_vertical)
self.wait = wait
self.is_active = True
self._bg_initialized = False
def run(self):
"""
Run the display.
"""
pg.init()
glut.glutInit()
pg.display.set_caption("Past Image Viewer")
pg.display.set_mode(self.size, pg.OPENGL)
self.set_perspective()
self.add_textures("media/blank.png")
self.init_texture(*self._latest_texture.pop(), texture_number=0)
while self.is_active:
try:
self.step()
except pg.error:
pg.quit()
self.is_active = False
pg.time.wait(self.wait)
def step(self):
"""
Show one frame.
"""
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
self.is_active = False
return
try:
if self._bg_initialized:
self.update_texture(*self._latest_texture.pop())
else:
self.init_texture(*self._latest_texture.pop())
self._bg_initialized = True
except IndexError:
pass
self.clear()
try:
self.render(self.pose_cam, self.pose_drone)
except AttributeError:
self.write_text("No data yet", colour=(1, 0, 0))
return
for text, position, font, colour in self.text.values():
kwargs = {"text": text,
"position": position,
"font": font,
"colour": colour}
self.write_text(**{k: v for k, v in kwargs.items()
if v is not None})
pg.display.flip()
def set_perspective(self, near=0.1, far=100):
"""
Set up the perspective projection matrix.
Parameters
----------
near : Optional[float]
The distance to the near clipping plane in the z-direction. Default
is 10 cm.
far : Optional[float]
The distance to the far clipping plane in the z-direction. Default
is 100 m.
"""
glu.gluPerspective(self.fov_y, self.aspect_ratio, near, far)
@staticmethod
def clear():
"""
Reset OpenGL buffers to preset values.
"""
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
class VisualizerBase(object):
def bg_callback(self, background):
self.screen.add_textures(background)
def pose_cam_callback(self, pose_cam):
self.screen.pose_cam = Pose(pose_cam)
def pose_drone_callback(self, pose_drone):
self.screen.pose_drone = Pose(pose_drone)
def tracked_callback(self, tracked):
self.tracked = tracked.data
if not self.tracked:
self.screen.text["tracking"] = ("Tracking lost", None,
gl_font("helvetica", 18), (1, 0, 0))
elif "tracking" in self.screen.text:
del self.screen.text["tracking"]
def _start_screen(self, size):
self.screen = Screen(size, model=Drone(), fov_diagonal=92)
threading.Thread(target=self.screen.run).start()
@property
def is_active(self):
return self.screen.is_active
class Visualizer(VisualizerBase):
def __init__(self, size=(640, 480)):
self._start_screen(size)
rospy.Subscriber("/ardrone/past_image", Image, self.bg_callback,
queue_size=1)
rospy.Subscriber("/ardrone/past_pose", PoseStamped,
self.pose_cam_callback, queue_size=1)
rospy.Subscriber("/ardrone/pose", PoseStamped, self.pose_drone_callback,
queue_size=1)
rospy.Subscriber("/ardrone/tracked", Bool, self.tracked_callback,
queue_size=1)
class TestVisualizer(VisualizerBase):
def __init__(self, size=(640, 480)):
rospy.Subscriber("/ardrone/image_raw", Image, self.bg_callback,
queue_size=1)
self._start_screen(size)
pos_cam = [-1.5, -4, 4]
rot_cam = [-0, 0, 0, 1]
pos_drone = [-1.5, -1, 4]
rot_drone = [-0.3, 0, 0, 1]
self.pose_cam_callback(Pose.generate_stamped(pos_cam, rot_cam))
self.pose_drone_callback(Pose.generate_stamped(pos_drone, rot_drone))
def test_offline(size=(640, 480)):
screen = Screen(size, model=Drone(), fov_diagonal=92)
threading.Thread(target=screen.run).start()
pos_cam = [1, 0, 0]
rot_cam = [0, 0, 0, 1]
pos_drone = [0, -3, 0]
rot_drone = [0, 0, 0, 1]
# pos_cam = [-0.5700, 0.08365, 0.0837]
# rot_cam = [0.0006, 0.0042, 0.0166, 0.9999]
# pos_drone = [-0.4767, 1.3597, 0.0770]
# rot_drone = [0.0078, 0.0087, 0.0059, 0.9999]
screen.pose_cam = Pose.generate_stamped(pos_cam, rot_cam)
screen.pose_drone = Pose.generate_stamped(pos_drone, rot_drone)
time.sleep(3)
screen.add_textures("media/bird.jpg")
def shutdown_hook():
pg.quit()
def main():
rospy.init_node("visualizer", anonymous=True)
rospy.on_shutdown(shutdown_hook)
try:
debug = rospy.get_param("~debug")
except KeyError:
rospy.logwarn("Running offline test.")
debug = "offline"
if debug == "offline":
test_offline()
return
elif debug == "online":
visualizer = TestVisualizer((640, 360))
else:
visualizer = Visualizer((640, 360))
rospy.loginfo("Started visualizer")
while visualizer.is_active:
pass
rospy.signal_shutdown("Done!")
if __name__ == '__main__':
main()
|
basicswap.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2022 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import os
import re
import sys
import zmq
import json
import time
import base64
import random
import shutil
import struct
import urllib.request
import hashlib
import secrets
import datetime as dt
import threading
import traceback
import sqlalchemy as sa
import collections
import concurrent.futures
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.orm.session import close_all_sessions
from .interface_part import PARTInterface, PARTInterfaceAnon, PARTInterfaceBlind
from .interface_btc import BTCInterface
from .interface_ltc import LTCInterface
from .interface_nmc import NMCInterface
from .interface_xmr import XMRInterface
from .interface_passthrough_btc import PassthroughBTCInterface
from . import __version__
from .rpc_xmr import make_xmr_rpc2_func
from .util import (
TemporaryError,
pubkeyToAddress,
format_amount,
format_timestamp,
encodeAddress,
decodeAddress,
DeserialiseNum,
decodeWif,
toWIF,
getKeyID,
make_int,
getP2SHScriptForHash,
getP2WSH,
ensure,
)
from .chainparams import (
chainparams,
Coins,
)
from .script import (
OpCodes,
)
from .messages_pb2 import (
OfferMessage,
BidMessage,
BidAcceptMessage,
XmrBidMessage,
XmrBidAcceptMessage,
XmrSplitMessage,
XmrBidLockTxSigsMessage,
XmrBidLockSpendTxMessage,
XmrBidLockReleaseMessage,
OfferRevokeMessage,
)
from .db import (
CURRENT_DB_VERSION,
TableTypes,
Base,
DBKVInt,
DBKVString,
Offer,
Bid,
SwapTx,
PooledAddress,
SentOffer,
SmsgAddress,
EventQueue,
EventLog,
XmrOffer,
XmrSwap,
XmrSplitData,
Wallets,
KnownIdentity,
)
from .base import BaseApp
from .explorers import (
ExplorerInsight,
ExplorerBitAps,
ExplorerChainz,
)
import basicswap.config as cfg
import basicswap.network as bsn
import basicswap.protocols.atomic_swap_1 as atomic_swap_1
from .basicswap_util import (
KeyTypes,
TxLockTypes,
AddressTypes,
MessageTypes,
SwapTypes,
OfferStates,
BidStates,
TxStates,
TxTypes,
EventTypes,
EventLogTypes,
XmrSplitMsgTypes,
DebugTypes,
strBidState,
describeEventEntry,
getVoutByAddress,
getVoutByP2WSH,
replaceAddrPrefix,
getOfferProofOfFundsHash,
getLastBidState,
isActiveBidState)
from .protocols.xmr_swap_1 import (
addLockRefundSigs,
recoverNoScriptTxnWithKey)
non_script_type_coins = (Coins.XMR, Coins.PART_ANON)
def validOfferStateToReceiveBid(offer_state):
if offer_state == OfferStates.OFFER_RECEIVED:
return True
if offer_state == OfferStates.OFFER_SENT:
return True
return False
def zeroIfNone(value):
if value is None:
return 0
return value
def threadPollChainState(swap_client, coin_type):
while not swap_client.delay_event.is_set():
try:
ci = swap_client.ci(coin_type)
if coin_type == Coins.XMR:
new_height = ci.getChainHeight()
if new_height != swap_client.coin_clients[coin_type]['chain_height']:
swap_client.log.debug('New {} block at height: {}'.format(str(coin_type), new_height))
with swap_client.mxDB:
swap_client.coin_clients[coin_type]['chain_height'] = new_height
else:
chain_state = ci.getBlockchainInfo()
if chain_state['bestblockhash'] != swap_client.coin_clients[coin_type]['chain_best_block']:
swap_client.log.debug('New {} block at height: {}'.format(str(coin_type), chain_state['blocks']))
with swap_client.mxDB:
swap_client.coin_clients[coin_type]['chain_height'] = chain_state['blocks']
swap_client.coin_clients[coin_type]['chain_best_block'] = chain_state['bestblockhash']
swap_client.coin_clients[coin_type]['chain_median_time'] = chain_state['mediantime']
except Exception as e:
swap_client.log.warning('threadPollChainState error: {}'.format(str(e)))
swap_client.delay_event.wait(random.randrange(20, 30)) # random to stagger updates
class WatchedOutput(): # Watch for spends
__slots__ = ('bid_id', 'txid_hex', 'vout', 'tx_type', 'swap_type')
def __init__(self, bid_id, txid_hex, vout, tx_type, swap_type):
self.bid_id = bid_id
self.txid_hex = txid_hex
self.vout = vout
self.tx_type = tx_type
self.swap_type = swap_type
class WatchedTransaction():
# TODO
# Watch for presence in mempool (getrawtransaction)
def __init__(self, bid_id, txid_hex, tx_type, swap_type):
self.bid_id = bid_id
self.txid_hex = txid_hex
self.tx_type = tx_type
self.swap_type = swap_type
class BasicSwap(BaseApp):
def __init__(self, fp, data_dir, settings, chain, log_name='BasicSwap'):
super().__init__(fp, data_dir, settings, chain, log_name)
v = __version__.split('.')
self._version = struct.pack('>HHH', int(v[0]), int(v[1]), int(v[2]))
self.check_progress_seconds = self.settings.get('check_progress_seconds', 60)
self.check_watched_seconds = self.settings.get('check_watched_seconds', 60)
self.check_expired_seconds = self.settings.get('check_expired_seconds', 60 * 5)
self.check_events_seconds = self.settings.get('check_events_seconds', 10)
self.check_xmr_swaps_seconds = self.settings.get('check_xmr_swaps_seconds', 20)
self.startup_tries = self.settings.get('startup_tries', 21) # Seconds waited for will be (x(1 + x+1) / 2
self.debug_ui = self.settings.get('debug_ui', False)
self._last_checked_progress = 0
self._last_checked_watched = 0
self._last_checked_expired = 0
self._last_checked_events = 0
self._last_checked_xmr_swaps = 0
self._possibly_revoked_offers = collections.deque([], maxlen=48) # TODO: improve
self._updating_wallets_info = {}
self._last_updated_wallets_info = 0
# TODO: Adjust ranges
self.min_delay_event = self.settings.get('min_delay_event', 10)
self.max_delay_event = self.settings.get('max_delay_event', 60)
self.min_delay_retry = self.settings.get('min_delay_retry', 60)
self.max_delay_retry = self.settings.get('max_delay_retry', 5 * 60)
self.min_sequence_lock_seconds = self.settings.get('min_sequence_lock_seconds', 1 * 60 * 60)
self.max_sequence_lock_seconds = self.settings.get('max_sequence_lock_seconds', 96 * 60 * 60)
self._bid_expired_leeway = 5
self.swaps_in_progress = dict()
self.SMSG_SECONDS_IN_HOUR = 60 * 60 # Note: Set smsgsregtestadjust=0 for regtest
self.threads = []
self.thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=4, thread_name_prefix='bsp')
# Encode key to match network
wif_prefix = chainparams[Coins.PART][self.chain]['key_prefix']
self.network_key = toWIF(wif_prefix, decodeWif(self.settings['network_key']))
self.network_pubkey = self.settings['network_pubkey']
self.network_addr = pubkeyToAddress(chainparams[Coins.PART][self.chain]['pubkey_address'], bytes.fromhex(self.network_pubkey))
self.sqlite_file = os.path.join(self.data_dir, 'db{}.sqlite'.format('' if self.chain == 'mainnet' else ('_' + self.chain)))
db_exists = os.path.exists(self.sqlite_file)
# HACK: create_all hangs when using tox, unless create_engine is called with echo=True
if not db_exists:
if os.getenv('FOR_TOX'):
self.engine = sa.create_engine('sqlite:///' + self.sqlite_file, echo=True)
else:
self.engine = sa.create_engine('sqlite:///' + self.sqlite_file)
close_all_sessions()
Base.metadata.create_all(self.engine)
self.engine.dispose()
self.engine = sa.create_engine('sqlite:///' + self.sqlite_file)
self.session_factory = sessionmaker(bind=self.engine, expire_on_commit=False)
session = scoped_session(self.session_factory)
try:
self.db_version = session.query(DBKVInt).filter_by(key='db_version').first().value
except Exception:
self.log.info('First run')
self.db_version = CURRENT_DB_VERSION
session.add(DBKVInt(
key='db_version',
value=self.db_version
))
session.commit()
try:
self._contract_count = session.query(DBKVInt).filter_by(key='contract_count').first().value
except Exception:
self._contract_count = 0
session.add(DBKVInt(
key='contract_count',
value=self._contract_count
))
session.commit()
session.close()
session.remove()
self.zmqContext = zmq.Context()
self.zmqSubscriber = self.zmqContext.socket(zmq.SUB)
self.zmqSubscriber.connect(self.settings['zmqhost'] + ':' + str(self.settings['zmqport']))
self.zmqSubscriber.setsockopt_string(zmq.SUBSCRIBE, 'smsg')
for c in Coins:
if c in chainparams:
self.setCoinConnectParams(c)
if self.chain == 'mainnet':
self.coin_clients[Coins.PART]['explorers'].append(ExplorerInsight(
self, Coins.PART,
'https://explorer.particl.io/particl-insight-api'))
self.coin_clients[Coins.LTC]['explorers'].append(ExplorerBitAps(
self, Coins.LTC,
'https://api.bitaps.com/ltc/v1/blockchain'))
self.coin_clients[Coins.LTC]['explorers'].append(ExplorerChainz(
self, Coins.LTC,
'http://chainz.cryptoid.info/ltc/api.dws'))
elif self.chain == 'testnet':
self.coin_clients[Coins.PART]['explorers'].append(ExplorerInsight(
self, Coins.PART,
'https://explorer-testnet.particl.io/particl-insight-api'))
self.coin_clients[Coins.LTC]['explorers'].append(ExplorerBitAps(
self, Coins.LTC,
'https://api.bitaps.com/ltc/testnet/v1/blockchain'))
# non-segwit
# https://testnet.litecore.io/insight-api
random.seed(secrets.randbits(128))
def finalise(self):
self.log.info('Finalise')
with self.mxDB:
self.is_running = False
self.delay_event.set()
if self._network:
self._network.stopNetwork()
self._network = None
for t in self.threads:
t.join()
if sys.version_info[1] >= 9:
self.thread_pool.shutdown(cancel_futures=True)
else:
self.thread_pool.shutdown()
self.zmqContext.destroy()
close_all_sessions()
self.engine.dispose()
def setCoinConnectParams(self, coin):
# Set anything that does not require the daemon to be running
chain_client_settings = self.getChainClientSettings(coin)
bindir = os.path.expanduser(chain_client_settings.get('bindir', ''))
datadir = os.path.expanduser(chain_client_settings.get('datadir', os.path.join(cfg.TEST_DATADIRS, chainparams[coin]['name'])))
connection_type = chain_client_settings.get('connection_type', 'none')
rpcauth = None
if connection_type == 'rpc':
if 'rpcauth' in chain_client_settings:
rpcauth = chain_client_settings['rpcauth']
self.log.debug('Read %s rpc credentials from json settings', coin)
elif 'rpcpassword' in chain_client_settings:
rpcauth = chain_client_settings['rpcuser'] + ':' + chain_client_settings['rpcpassword']
self.log.debug('Read %s rpc credentials from json settings', coin)
session = scoped_session(self.session_factory)
try:
last_height_checked = session.query(DBKVInt).filter_by(key='last_height_checked_' + chainparams[coin]['name']).first().value
except Exception:
last_height_checked = 0
session.close()
session.remove()
self.coin_clients[coin] = {
'coin': coin,
'name': chainparams[coin]['name'],
'connection_type': connection_type,
'bindir': bindir,
'datadir': datadir,
'rpchost': chain_client_settings.get('rpchost', '127.0.0.1'),
'rpcport': chain_client_settings.get('rpcport', chainparams[coin][self.chain]['rpcport']),
'rpcauth': rpcauth,
'blocks_confirmed': chain_client_settings.get('blocks_confirmed', 6),
'conf_target': chain_client_settings.get('conf_target', 2),
'watched_outputs': [],
'last_height_checked': last_height_checked,
'use_segwit': chain_client_settings.get('use_segwit', False),
'use_csv': chain_client_settings.get('use_csv', True),
'core_version_group': chain_client_settings.get('core_version_group', 0),
'pid': None,
'core_version': None,
'explorers': [],
'chain_lookups': chain_client_settings.get('chain_lookups', 'local'),
'restore_height': chain_client_settings.get('restore_height', 0),
'fee_priority': chain_client_settings.get('fee_priority', 0),
# Chain state
'chain_height': None,
'chain_best_block': None,
'chain_median_time': None,
}
if coin == Coins.PART:
self.coin_clients[Coins.PART_ANON] = self.coin_clients[coin]
self.coin_clients[Coins.PART_BLIND] = self.coin_clients[coin]
if self.coin_clients[coin]['connection_type'] == 'rpc':
if coin == Coins.XMR:
if chain_client_settings.get('automatically_select_daemon', False):
self.selectXMRRemoteDaemon(coin)
self.coin_clients[coin]['walletrpchost'] = chain_client_settings.get('walletrpchost', '127.0.0.1')
self.coin_clients[coin]['walletrpcport'] = chain_client_settings.get('walletrpcport', chainparams[coin][self.chain]['walletrpcport'])
if 'walletrpcpassword' in chain_client_settings:
self.coin_clients[coin]['walletrpcauth'] = (chain_client_settings['walletrpcuser'], chain_client_settings['walletrpcpassword'])
else:
raise ValueError('Missing XMR wallet rpc credentials.')
def selectXMRRemoteDaemon(self, coin):
self.log.info('Selecting remote XMR daemon.')
chain_client_settings = self.getChainClientSettings(coin)
remote_daemon_urls = chain_client_settings.get('remote_daemon_urls', [])
rpchost = self.coin_clients[coin]['rpchost']
rpcport = self.coin_clients[coin]['rpcport']
current_daemon_url = f'{rpchost}:{rpcport}'
if current_daemon_url in remote_daemon_urls:
self.log.info(f'Trying last used url {rpchost}:{rpcport}.')
try:
rpc_cb2 = make_xmr_rpc2_func(rpcport, rpchost)
test = rpc_cb2('get_height', timeout=20)['height']
return True
except Exception as e:
self.log.warning(f'Failed to set XMR remote daemon to {rpchost}:{rpcport}, {e}')
random.shuffle(remote_daemon_urls)
for url in remote_daemon_urls:
self.log.info(f'Trying url {url}.')
try:
rpchost, rpcport = url.rsplit(':', 1)
rpc_cb2 = make_xmr_rpc2_func(rpcport, rpchost)
test = rpc_cb2('get_height', timeout=20)['height']
self.coin_clients[coin]['rpchost'] = rpchost
self.coin_clients[coin]['rpcport'] = rpcport
data = {
'rpchost': rpchost,
'rpcport': rpcport,
}
self.editSettings(self.coin_clients[coin]['name'], data)
return True
except Exception as e:
self.log.warning(f'Failed to set XMR remote daemon to {url}, {e}')
raise ValueError('Failed to select a working XMR daemon url.')
def ci(self, coin): # Coin interface
if coin == Coins.PART_ANON:
return self.coin_clients[Coins.PART]['interface_anon']
if coin == Coins.PART_BLIND:
return self.coin_clients[Coins.PART]['interface_blind']
return self.coin_clients[coin]['interface']
def createInterface(self, coin):
if coin == Coins.PART:
return PARTInterface(self.coin_clients[coin], self.chain, self)
elif coin == Coins.BTC:
return BTCInterface(self.coin_clients[coin], self.chain, self)
elif coin == Coins.LTC:
return LTCInterface(self.coin_clients[coin], self.chain, self)
elif coin == Coins.NMC:
return NMCInterface(self.coin_clients[coin], self.chain, self)
elif coin == Coins.XMR:
xmr_i = XMRInterface(self.coin_clients[coin], self.chain, self)
chain_client_settings = self.getChainClientSettings(coin)
xmr_i.setWalletFilename(chain_client_settings['walletfile'])
return xmr_i
else:
raise ValueError('Unknown coin type')
def createPassthroughInterface(self, coin):
if coin == Coins.BTC:
return PassthroughBTCInterface(self.coin_clients[coin], self.chain)
else:
raise ValueError('Unknown coin type')
def setCoinRunParams(self, coin):
cc = self.coin_clients[coin]
if coin == Coins.XMR:
return
if cc['connection_type'] == 'rpc' and cc['rpcauth'] is None:
chain_client_settings = self.getChainClientSettings(coin)
authcookiepath = os.path.join(self.getChainDatadirPath(coin), '.cookie')
pidfilename = cc['name']
if cc['name'] == 'bitcoin' or cc['name'] == 'litecoin' or cc['name'] == 'namecoin':
pidfilename += 'd'
pidfilepath = os.path.join(self.getChainDatadirPath(coin), pidfilename + '.pid')
self.log.debug('Reading %s rpc credentials from auth cookie %s', coin, authcookiepath)
# Wait for daemon to start
# Test pids to ensure authcookie is read for the correct process
datadir_pid = -1
for i in range(20):
try:
with open(pidfilepath, 'rb') as fp:
datadir_pid = int(fp.read().decode('utf-8'))
assert(datadir_pid == cc['pid']), 'Mismatched pid'
assert(os.path.exists(authcookiepath))
except Exception:
time.sleep(0.5)
try:
if os.name != 'nt' or cc['core_version_group'] > 17: # Litecoin on windows doesn't write a pid file
assert(datadir_pid == cc['pid']), 'Mismatched pid'
with open(authcookiepath, 'rb') as fp:
cc['rpcauth'] = fp.read().decode('utf-8')
except Exception as e:
self.log.error('Unable to read authcookie for %s, %s, datadir pid %d, daemon pid %s. Error: %s', str(coin), authcookiepath, datadir_pid, cc['pid'], str(e))
raise ValueError('Error, terminating')
def createCoinInterface(self, coin):
if self.coin_clients[coin]['connection_type'] == 'rpc':
self.coin_clients[coin]['interface'] = self.createInterface(coin)
if coin == Coins.PART:
self.coin_clients[coin]['interface_anon'] = PARTInterfaceAnon(self.coin_clients[coin], self.chain, self)
self.coin_clients[coin]['interface_blind'] = PARTInterfaceBlind(self.coin_clients[coin], self.chain, self)
elif self.coin_clients[coin]['connection_type'] == 'passthrough':
self.coin_clients[coin]['interface'] = self.createPassthroughInterface(coin)
def start(self):
self.log.info('Starting BasicSwap %s, database v%d\n\n', __version__, self.db_version)
self.log.info('sqlalchemy version %s', sa.__version__)
self.log.info('timezone offset: %d (%s)', time.timezone, time.tzname[0])
self.upgradeDatabase(self.db_version)
for c in Coins:
if c not in chainparams:
continue
self.setCoinRunParams(c)
self.createCoinInterface(c)
if self.coin_clients[c]['connection_type'] == 'rpc':
self.waitForDaemonRPC(c)
ci = self.ci(c)
core_version = ci.getDaemonVersion()
self.log.info('%s Core version %d', ci.coin_name(), core_version)
self.coin_clients[c]['core_version'] = core_version
t = threading.Thread(target=threadPollChainState, args=(self, c))
self.threads.append(t)
t.start()
if c == Coins.PART:
self.coin_clients[c]['have_spent_index'] = ci.haveSpentIndex()
try:
# Sanity checks
rv = self.callcoinrpc(c, 'extkey')
if 'result' in rv and 'No keys to list.' in rv['result']:
raise ValueError('No keys loaded.')
if self.callcoinrpc(c, 'getstakinginfo')['enabled'] is not False:
self.log.warning('%s staking is not disabled.', ci.coin_name())
except Exception as e:
self.log.error('Sanity checks failed: %s', str(e))
elif c == Coins.XMR:
ci.ensureWalletExists()
self.checkWalletSeed(c)
if 'p2p_host' in self.settings:
network_key = self.getNetworkKey(1)
self._network = bsn.Network(self.settings['p2p_host'], self.settings['p2p_port'], network_key, self)
self._network.startNetwork()
self.initialise()
def stopDaemon(self, coin):
if coin == Coins.XMR:
return
num_tries = 10
authcookiepath = os.path.join(self.getChainDatadirPath(coin), '.cookie')
stopping = False
try:
for i in range(num_tries):
rv = self.callcoincli(coin, 'stop', timeout=10)
self.log.debug('Trying to stop %s', str(coin))
stopping = True
time.sleep(i + 1)
except Exception as ex:
if 'Could not connect' in str(ex):
if stopping:
for i in range(30):
# The lock file doesn't get deleted
# Using .cookie is a temporary workaround, will only work if rpc password is unset.
# TODO: Query lock on .lock properly
if os.path.exists(authcookiepath):
self.log.debug('Waiting on .cookie file %s', str(coin))
time.sleep(i + 1)
time.sleep(4) # Extra time to settle
return
self.log.error('stopDaemon %s', str(ex))
self.log.error(traceback.format_exc())
raise ValueError('Could not stop {}'.format(str(coin)))
def stopDaemons(self):
for c in Coins:
if c not in chainparams:
continue
chain_client_settings = self.getChainClientSettings(c)
if self.coin_clients[c]['connection_type'] == 'rpc' and chain_client_settings['manage_daemon'] is True:
self.stopDaemon(c)
def upgradeDatabase(self, db_version):
if db_version >= CURRENT_DB_VERSION:
return
self.log.info('Upgrading database from version %d to %d.', db_version, CURRENT_DB_VERSION)
while True:
session = scoped_session(self.session_factory)
current_version = db_version
if current_version == 6:
session.execute('ALTER TABLE bids ADD COLUMN security_token BLOB')
session.execute('ALTER TABLE offers ADD COLUMN security_token BLOB')
db_version += 1
elif current_version == 7:
session.execute('ALTER TABLE transactions ADD COLUMN block_hash BLOB')
session.execute('ALTER TABLE transactions ADD COLUMN block_height INTEGER')
session.execute('ALTER TABLE transactions ADD COLUMN block_time INTEGER')
db_version += 1
elif current_version == 8:
session.execute('''
CREATE TABLE wallets (
record_id INTEGER NOT NULL,
coin_id INTEGER,
wallet_name VARCHAR,
balance_type INTEGER,
amount BIGINT,
updated_at BIGINT,
created_at BIGINT,
PRIMARY KEY (record_id))''')
db_version += 1
elif current_version == 9:
session.execute('ALTER TABLE wallets ADD COLUMN wallet_data VARCHAR')
db_version += 1
elif current_version == 10:
session.execute('ALTER TABLE smsgaddresses ADD COLUMN active_ind INTEGER')
session.execute('ALTER TABLE smsgaddresses ADD COLUMN created_at INTEGER')
session.execute('ALTER TABLE smsgaddresses ADD COLUMN note VARCHAR')
session.execute('ALTER TABLE smsgaddresses ADD COLUMN pubkey VARCHAR')
session.execute('UPDATE smsgaddresses SET active_ind = 1, created_at = 1')
session.execute('ALTER TABLE offers ADD COLUMN addr_to VARCHAR')
session.execute(f'UPDATE offers SET addr_to = "{self.network_addr}"')
db_version += 1
elif current_version == 11:
session.execute('ALTER TABLE bids ADD COLUMN chain_a_height_start INTEGER')
session.execute('ALTER TABLE bids ADD COLUMN chain_b_height_start INTEGER')
session.execute('ALTER TABLE bids ADD COLUMN protocol_version INTEGER')
session.execute('ALTER TABLE offers ADD COLUMN protocol_version INTEGER')
session.execute('ALTER TABLE transactions ADD COLUMN tx_data BLOB')
db_version += 1
elif current_version == 12:
session.execute('''
CREATE TABLE knownidentities (
record_id INTEGER NOT NULL,
address VARCHAR,
label VARCHAR,
publickey BLOB,
num_sent_bids_successful INTEGER,
num_recv_bids_successful INTEGER,
num_sent_bids_rejected INTEGER,
num_recv_bids_rejected INTEGER,
num_sent_bids_failed INTEGER,
num_recv_bids_failed INTEGER,
note VARCHAR,
updated_at BIGINT,
created_at BIGINT,
PRIMARY KEY (record_id))''')
session.execute('ALTER TABLE bids ADD COLUMN reject_code INTEGER')
session.execute('ALTER TABLE bids ADD COLUMN rate INTEGER')
session.execute('ALTER TABLE offers ADD COLUMN amount_negotiable INTEGER')
session.execute('ALTER TABLE offers ADD COLUMN rate_negotiable INTEGER')
db_version += 1
if current_version != db_version:
self.db_version = db_version
self.setIntKVInSession('db_version', db_version, session)
session.commit()
session.close()
session.remove()
self.log.info('Upgraded database to version {}'.format(self.db_version))
continue
break
if db_version != CURRENT_DB_VERSION:
raise ValueError('Unable to upgrade database.')
def waitForDaemonRPC(self, coin_type):
for i in range(self.startup_tries):
if not self.is_running:
return
try:
self.coin_clients[coin_type]['interface'].testDaemonRPC()
return
except Exception as ex:
self.log.warning('Can\'t connect to %s RPC: %s. Trying again in %d second/s.', coin_type, str(ex), (1 + i))
time.sleep(1 + i)
self.log.error('Can\'t connect to %s RPC, exiting.', coin_type)
self.stopRunning(1) # systemd will try to restart the process if fail_code != 0
def checkSynced(self, coin_from, coin_to):
check_coins = (coin_from, coin_to)
for c in check_coins:
if self.coin_clients[c]['connection_type'] != 'rpc':
continue
if c == Coins.XMR:
continue # TODO
synced = round(self.ci(c).getBlockchainInfo()['verificationprogress'], 3)
if synced < 1.0:
raise ValueError('{} chain is still syncing, currently at {}.'.format(self.coin_clients[c]['name'], synced))
def initialiseWallet(self, coin_type):
if coin_type == Coins.PART:
return
ci = self.ci(coin_type)
self.log.info('Initialising {} wallet.'.format(ci.coin_name()))
if coin_type == Coins.XMR:
key_view = self.getWalletKey(coin_type, 1, for_ed25519=True)
key_spend = self.getWalletKey(coin_type, 2, for_ed25519=True)
ci.initialiseWallet(key_view, key_spend)
root_address = ci.getAddressFromKeys(key_view, key_spend)
key_str = 'main_wallet_addr_' + ci.coin_name().lower()
self.setStringKV(key_str, root_address)
return
root_key = self.getWalletKey(coin_type, 1)
root_hash = ci.getAddressHashFromKey(root_key)[::-1]
ci.initialiseWallet(root_key)
key_str = 'main_wallet_seedid_' + ci.coin_name().lower()
self.setStringKV(key_str, root_hash.hex())
def updateIdentityBidState(self, session, address, bid):
identity_stats = session.query(KnownIdentity).filter_by(address=address).first()
if not identity_stats:
identity_stats = KnownIdentity(address=address, created_at=int(time.time()))
if bid.state == BidStates.SWAP_COMPLETED:
if bid.was_sent:
identity_stats.num_sent_bids_successful = zeroIfNone(identity_stats.num_sent_bids_successful) + 1
else:
identity_stats.num_recv_bids_successful = zeroIfNone(identity_stats.num_recv_bids_successful) + 1
elif bid.state in (BidStates.BID_ERROR, BidStates.XMR_SWAP_FAILED_REFUNDED, BidStates.XMR_SWAP_FAILED_SWIPED, BidStates.XMR_SWAP_FAILED):
if bid.was_sent:
identity_stats.num_sent_bids_failed = zeroIfNone(identity_stats.num_sent_bids_failed) + 1
else:
identity_stats.num_recv_bids_failed = zeroIfNone(identity_stats.num_recv_bids_failed) + 1
identity_stats.updated_at = int(time.time())
session.add(identity_stats)
def setIntKVInSession(self, str_key, int_val, session):
kv = session.query(DBKVInt).filter_by(key=str_key).first()
if not kv:
kv = DBKVInt(key=str_key, value=int_val)
else:
kv.value = int_val
session.add(kv)
def setIntKV(self, str_key, int_val):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
self.setIntKVInSession(str_key, int_val, session)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def setStringKV(self, str_key, str_val):
with self.mxDB:
try:
session = scoped_session(self.session_factory)
kv = session.query(DBKVString).filter_by(key=str_key).first()
if not kv:
kv = DBKVString(key=str_key, value=str_val)
else:
kv.value = str_val
session.add(kv)
session.commit()
finally:
session.close()
session.remove()
def getStringKV(self, str_key):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
v = session.query(DBKVString).filter_by(key=str_key).first()
if not v:
return None
return v.value
finally:
session.close()
session.remove()
self.mxDB.release()
def activateBid(self, session, bid):
if bid.bid_id in self.swaps_in_progress:
self.log.debug('Bid %s is already in progress', bid.bid_id.hex())
self.log.debug('Loading active bid %s', bid.bid_id.hex())
offer = session.query(Offer).filter_by(offer_id=bid.offer_id).first()
if not offer:
raise ValueError('Offer not found')
if offer.swap_type == SwapTypes.XMR_SWAP:
xmr_swap = session.query(XmrSwap).filter_by(bid_id=bid.bid_id).first()
self.loadBidTxns(bid, session)
self.watchXmrSwap(bid, offer, xmr_swap)
else:
bid.initiate_tx = session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid.bid_id, SwapTx.tx_type == TxTypes.ITX)).first()
bid.participate_tx = session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid.bid_id, SwapTx.tx_type == TxTypes.PTX)).first()
self.swaps_in_progress[bid.bid_id] = (bid, offer)
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
if bid.initiate_tx and bid.initiate_tx.txid:
self.addWatchedOutput(coin_from, bid.bid_id, bid.initiate_tx.txid.hex(), bid.initiate_tx.vout, BidStates.SWAP_INITIATED)
if bid.participate_tx and bid.participate_tx.txid:
self.addWatchedOutput(coin_to, bid.bid_id, bid.participate_tx.txid.hex(), bid.participate_tx.vout, BidStates.SWAP_PARTICIPATING)
if self.coin_clients[coin_from]['last_height_checked'] < 1:
if bid.initiate_tx and bid.initiate_tx.chain_height:
self.coin_clients[coin_from]['last_height_checked'] = bid.initiate_tx.chain_height
if self.coin_clients[coin_to]['last_height_checked'] < 1:
if bid.participate_tx and bid.participate_tx.chain_height:
self.coin_clients[coin_to]['last_height_checked'] = bid.participate_tx.chain_height
# TODO process addresspool if bid has previously been abandoned
def deactivateBid(self, session, offer, bid):
# Remove from in progress
self.log.debug('Removing bid from in-progress: %s', bid.bid_id.hex())
self.swaps_in_progress.pop(bid.bid_id, None)
bid.in_progress = 0
if session is None:
self.saveBid(bid.bid_id, bid)
# Remove any watched outputs
self.removeWatchedOutput(Coins(offer.coin_from), bid.bid_id, None)
self.removeWatchedOutput(Coins(offer.coin_to), bid.bid_id, None)
if bid.state == BidStates.BID_ABANDONED or bid.state == BidStates.SWAP_COMPLETED:
# Return unused addrs to pool
itx_state = bid.getITxState()
ptx_state = bid.getPTxState()
if itx_state is not None and itx_state != TxStates.TX_REDEEMED:
self.returnAddressToPool(bid.bid_id, TxTypes.ITX_REDEEM)
if itx_state is not None and itx_state != TxStates.TX_REFUNDED:
self.returnAddressToPool(bid.bid_id, TxTypes.ITX_REFUND)
if ptx_state is not None and ptx_state != TxStates.TX_REDEEMED:
self.returnAddressToPool(bid.bid_id, TxTypes.PTX_REDEEM)
if ptx_state is not None and ptx_state != TxStates.TX_REFUNDED:
self.returnAddressToPool(bid.bid_id, TxTypes.PTX_REFUND)
use_session = None
try:
if session:
use_session = session
else:
self.mxDB.acquire()
use_session = scoped_session(self.session_factory)
# Remove any delayed events
if self.debug:
use_session.execute('UPDATE eventqueue SET active_ind = 2 WHERE linked_id = x\'{}\' '.format(bid.bid_id.hex()))
else:
use_session.execute('DELETE FROM eventqueue WHERE linked_id = x\'{}\' '.format(bid.bid_id.hex()))
# Unlock locked inputs (TODO)
if offer.swap_type == SwapTypes.XMR_SWAP:
xmr_swap = use_session.query(XmrSwap).filter_by(bid_id=bid.bid_id).first()
if xmr_swap:
try:
self.ci(offer.coin_from).unlockInputs(xmr_swap.a_lock_tx)
except Exception as e:
self.log.debug('unlockInputs failed {}'.format(str(e)))
pass # Invalid parameter, unknown transaction
elif SwapTypes.SELLER_FIRST:
pass # No prevouts are locked
# Update identity stats
if bid.state in (BidStates.BID_ERROR, BidStates.XMR_SWAP_FAILED_REFUNDED, BidStates.XMR_SWAP_FAILED_SWIPED, BidStates.XMR_SWAP_FAILED, BidStates.SWAP_COMPLETED):
peer_address = offer.addr_from if bid.was_sent else bid.bid_addr
self.updateIdentityBidState(use_session, peer_address, bid)
finally:
if session is None:
use_session.commit()
use_session.close()
use_session.remove()
self.mxDB.release()
def loadFromDB(self):
self.log.info('Loading data from db')
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
for bid in session.query(Bid):
if bid.in_progress == 1 or (bid.state and bid.state > BidStates.BID_RECEIVED and bid.state < BidStates.SWAP_COMPLETED):
try:
self.activateBid(session, bid)
except Exception as ex:
self.log.error('Failed to activate bid! Error: %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
try:
bid.setState(BidStates.BID_ERROR, 'Failed to activate')
offer = session.query(Offer).filter_by(offer_id=bid.offer_id).first()
self.deactivateBid(session, offer, bid)
except Exception as ex:
self.log.error('Further error deactivating: %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
finally:
session.close()
session.remove()
self.mxDB.release()
def initialise(self):
self.log.debug('network_key %s\nnetwork_pubkey %s\nnetwork_addr %s',
self.network_key, self.network_pubkey, self.network_addr)
ro = self.callrpc('smsglocalkeys')
found = False
for k in ro['smsg_keys']:
if k['address'] == self.network_addr:
found = True
break
if not found:
self.log.info('Importing network key to SMSG')
self.callrpc('smsgimportprivkey', [self.network_key, 'basicswap offers'])
ro = self.callrpc('smsglocalkeys', ['anon', '-', self.network_addr])
ensure(ro['result'] == 'Success.', 'smsglocalkeys failed')
# TODO: Ensure smsg is enabled for the active wallet.
self.loadFromDB()
# Scan inbox
# TODO: Redundant? small window for zmq messages to go unnoticed during startup?
# options = {'encoding': 'hex'}
options = {'encoding': 'none'}
ro = self.callrpc('smsginbox', ['unread', '', options])
nm = 0
for msg in ro['messages']:
# TODO: Remove workaround for smsginbox bug
get_msg = self.callrpc('smsg', [msg['msgid'], {'encoding': 'hex', 'setread': True}])
self.processMsg(get_msg)
nm += 1
self.log.info('Scanned %d unread messages.', nm)
def validateSwapType(self, coin_from, coin_to, swap_type):
if coin_from == Coins.XMR:
raise ValueError('TODO: XMR coin_from')
if coin_to == Coins.XMR and swap_type != SwapTypes.XMR_SWAP:
raise ValueError('Invalid swap type for XMR')
if coin_from == Coins.PART_ANON:
raise ValueError('TODO: PART_ANON coin_from')
if coin_to == Coins.PART_ANON and swap_type != SwapTypes.XMR_SWAP:
raise ValueError('Invalid swap type for PART_ANON')
if (coin_from == Coins.PART_BLIND or coin_to == Coins.PART_BLIND) and swap_type != SwapTypes.XMR_SWAP:
raise ValueError('Invalid swap type for PART_BLIND')
def validateOfferAmounts(self, coin_from, coin_to, amount, rate, min_bid_amount):
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
ensure(amount >= min_bid_amount, 'amount < min_bid_amount')
ensure(amount > ci_from.min_amount(), 'From amount below min value for chain')
ensure(amount < ci_from.max_amount(), 'From amount above max value for chain')
amount_to = int((amount * rate) // ci_from.COIN())
ensure(amount_to > ci_to.min_amount(), 'To amount below min value for chain')
ensure(amount_to < ci_to.max_amount(), 'To amount above max value for chain')
def validateOfferLockValue(self, coin_from, coin_to, lock_type, lock_value):
if lock_type == OfferMessage.SEQUENCE_LOCK_TIME:
ensure(lock_value >= self.min_sequence_lock_seconds and lock_value <= self.max_sequence_lock_seconds, 'Invalid lock_value time')
ensure(self.coin_clients[coin_from]['use_csv'] and self.coin_clients[coin_to]['use_csv'], 'Both coins need CSV activated.')
elif lock_type == OfferMessage.SEQUENCE_LOCK_BLOCKS:
ensure(lock_value >= 5 and lock_value <= 1000, 'Invalid lock_value blocks')
ensure(self.coin_clients[coin_from]['use_csv'] and self.coin_clients[coin_to]['use_csv'], 'Both coins need CSV activated.')
elif lock_type == TxLockTypes.ABS_LOCK_TIME:
# TODO: range?
ensure(not self.coin_clients[coin_from]['use_csv'] or not self.coin_clients[coin_to]['use_csv'], 'Should use CSV.')
ensure(lock_value >= 4 * 60 * 60 and lock_value <= 96 * 60 * 60, 'Invalid lock_value time')
elif lock_type == TxLockTypes.ABS_LOCK_BLOCKS:
# TODO: range?
ensure(not self.coin_clients[coin_from]['use_csv'] or not self.coin_clients[coin_to]['use_csv'], 'Should use CSV.')
ensure(lock_value >= 10 and lock_value <= 1000, 'Invalid lock_value blocks')
else:
raise ValueError('Unknown locktype')
def validateOfferValidTime(self, offer_type, coin_from, coin_to, valid_for_seconds):
# TODO: adjust
if valid_for_seconds < 10 * 60:
raise ValueError('Offer TTL too low')
if valid_for_seconds > 48 * 60 * 60:
raise ValueError('Offer TTL too high')
def validateBidValidTime(self, offer_type, coin_from, coin_to, valid_for_seconds):
# TODO: adjust
if valid_for_seconds < 10 * 60:
raise ValueError('Bid TTL too low')
if valid_for_seconds > 24 * 60 * 60:
raise ValueError('Bid TTL too high')
def validateBidAmount(self, offer, bid_amount, bid_rate):
ensure(bid_amount >= offer.min_bid_amount, 'Bid amount below minimum')
ensure(bid_amount <= offer.amount_from, 'Bid amount above offer amount')
if not offer.amount_negotiable:
ensure(offer.amount_from == bid_amount, 'Bid amount must match offer amount.')
if not offer.rate_negotiable:
ensure(offer.rate == bid_rate, 'Bid rate must match offer rate.')
def getOfferAddressTo(self, extra_options):
if 'addr_send_to' in extra_options:
return extra_options['addr_send_to']
return self.network_addr
def postOffer(self, coin_from, coin_to, amount, rate, min_bid_amount, swap_type,
lock_type=TxLockTypes.SEQUENCE_LOCK_TIME, lock_value=48 * 60 * 60, auto_accept_bids=False, addr_send_from=None, extra_options={}):
# Offer to send offer.amount_from of coin_from in exchange for offer.amount_from * offer.rate of coin_to
ensure(coin_from != coin_to, 'coin_from == coin_to')
try:
coin_from_t = Coins(coin_from)
ci_from = self.ci(coin_from_t)
except Exception:
raise ValueError('Unknown coin from type')
try:
coin_to_t = Coins(coin_to)
ci_to = self.ci(coin_to_t)
except Exception:
raise ValueError('Unknown coin to type')
valid_for_seconds = extra_options.get('valid_for_seconds', 60 * 60)
self.validateSwapType(coin_from_t, coin_to_t, swap_type)
self.validateOfferAmounts(coin_from_t, coin_to_t, amount, rate, min_bid_amount)
self.validateOfferLockValue(coin_from_t, coin_to_t, lock_type, lock_value)
self.validateOfferValidTime(swap_type, coin_from_t, coin_to_t, valid_for_seconds)
offer_addr_to = self.getOfferAddressTo(extra_options)
self.mxDB.acquire()
session = None
try:
self.checkSynced(coin_from_t, coin_to_t)
offer_addr = self.newSMSGAddress(use_type=AddressTypes.OFFER)[0] if addr_send_from is None else addr_send_from
offer_created_at = int(time.time())
msg_buf = OfferMessage()
msg_buf.protocol_version = 1
msg_buf.coin_from = int(coin_from)
msg_buf.coin_to = int(coin_to)
msg_buf.amount_from = int(amount)
msg_buf.rate = int(rate)
msg_buf.min_bid_amount = int(min_bid_amount)
msg_buf.time_valid = valid_for_seconds
msg_buf.lock_type = lock_type
msg_buf.lock_value = lock_value
msg_buf.swap_type = swap_type
msg_buf.amount_negotiable = extra_options.get('amount_negotiable', False)
msg_buf.rate_negotiable = extra_options.get('rate_negotiable', False)
if msg_buf.amount_negotiable or msg_buf.rate_negotiable:
ensure(auto_accept_bids is False, 'Auto-accept unavailable when amount or rate are variable')
if 'from_fee_override' in extra_options:
msg_buf.fee_rate_from = make_int(extra_options['from_fee_override'], self.ci(coin_from).exp())
else:
# TODO: conf_target = ci_from.settings.get('conf_target', 2)
conf_target = 2
if 'from_fee_conf_target' in extra_options:
conf_target = extra_options['from_fee_conf_target']
fee_rate, fee_src = self.getFeeRateForCoin(coin_from, conf_target)
if 'from_fee_multiplier_percent' in extra_options:
fee_rate *= extra_options['fee_multiplier'] / 100.0
msg_buf.fee_rate_from = make_int(fee_rate, self.ci(coin_from).exp())
if 'to_fee_override' in extra_options:
msg_buf.fee_rate_to = make_int(extra_options['to_fee_override'], self.ci(coin_to).exp())
else:
# TODO: conf_target = ci_to.settings.get('conf_target', 2)
conf_target = 2
if 'to_fee_conf_target' in extra_options:
conf_target = extra_options['to_fee_conf_target']
fee_rate, fee_src = self.getFeeRateForCoin(coin_to, conf_target)
if 'to_fee_multiplier_percent' in extra_options:
fee_rate *= extra_options['fee_multiplier'] / 100.0
msg_buf.fee_rate_to = make_int(fee_rate, self.ci(coin_to).exp())
if swap_type == SwapTypes.XMR_SWAP:
xmr_offer = XmrOffer()
# Delay before the chain a lock refund tx can be mined
xmr_offer.lock_time_1 = ci_from.getExpectedSequence(lock_type, lock_value)
# Delay before the follower can spend from the chain a lock refund tx
xmr_offer.lock_time_2 = ci_from.getExpectedSequence(lock_type, lock_value)
xmr_offer.a_fee_rate = msg_buf.fee_rate_from
xmr_offer.b_fee_rate = msg_buf.fee_rate_to # Unused: TODO - Set priority?
proof_of_funds_hash = getOfferProofOfFundsHash(msg_buf, offer_addr)
proof_addr, proof_sig = self.getProofOfFunds(coin_from_t, int(amount), proof_of_funds_hash)
# TODO: For now proof_of_funds is just a client side check, may need to be sent with offers in future however.
offer_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.OFFER) + offer_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
msg_valid = max(self.SMSG_SECONDS_IN_HOUR * 1, valid_for_seconds)
ro = self.callrpc('smsgsend', [offer_addr, offer_addr_to, payload_hex, False, msg_valid, False, options])
msg_id = ro['msgid']
offer_id = bytes.fromhex(msg_id)
security_token = extra_options.get('security_token', None)
if security_token is not None and len(security_token) != 20:
raise ValueError('Security token must be 20 bytes long.')
session = scoped_session(self.session_factory)
offer = Offer(
offer_id=offer_id,
active_ind=1,
protocol_version=msg_buf.protocol_version,
coin_from=msg_buf.coin_from,
coin_to=msg_buf.coin_to,
amount_from=msg_buf.amount_from,
rate=msg_buf.rate,
min_bid_amount=msg_buf.min_bid_amount,
time_valid=msg_buf.time_valid,
lock_type=int(msg_buf.lock_type),
lock_value=msg_buf.lock_value,
swap_type=msg_buf.swap_type,
amount_negotiable=msg_buf.amount_negotiable,
rate_negotiable=msg_buf.rate_negotiable,
addr_to=offer_addr_to,
addr_from=offer_addr,
created_at=offer_created_at,
expire_at=offer_created_at + msg_buf.time_valid,
was_sent=True,
auto_accept_bids=auto_accept_bids,
security_token=security_token)
offer.setState(OfferStates.OFFER_SENT)
if swap_type == SwapTypes.XMR_SWAP:
xmr_offer.offer_id = offer_id
session.add(xmr_offer)
session.add(offer)
session.add(SentOffer(offer_id=offer_id))
session.commit()
finally:
if session:
session.close()
session.remove()
self.mxDB.release()
self.log.info('Sent OFFER %s', offer_id.hex())
return offer_id
def revokeOffer(self, offer_id, security_token=None):
self.log.info('Revoking offer %s', offer_id.hex())
session = None
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
offer = session.query(Offer).filter_by(offer_id=offer_id).first()
if offer.security_token is not None and offer.security_token != security_token:
raise ValueError('Mismatched security token')
msg_buf = OfferRevokeMessage()
msg_buf.offer_msg_id = offer_id
signature_enc = self.callcoinrpc(Coins.PART, 'signmessage', [offer.addr_from, offer_id.hex() + '_revoke'])
msg_buf.signature = base64.b64decode(signature_enc)
msg_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.OFFER_REVOKE) + msg_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
ro = self.callrpc('smsgsend', [offer.addr_from, self.network_addr, payload_hex, False, offer.time_valid, False, options])
msg_id = ro['msgid']
finally:
if session:
session.close()
session.remove()
self.mxDB.release()
def grindForEd25519Key(self, coin_type, evkey, key_path_base):
ci = self.ci(coin_type)
nonce = 1
while True:
key_path = key_path_base + '/{}'.format(nonce)
extkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, key_path])['key_info']['result']
privkey = decodeWif(self.callcoinrpc(Coins.PART, 'extkey', ['info', extkey])['key_info']['privkey'])
if ci.verifyKey(privkey):
return privkey
nonce += 1
if nonce > 1000:
raise ValueError('grindForEd25519Key failed')
def getWalletKey(self, coin_type, key_num, for_ed25519=False):
evkey = self.callcoinrpc(Coins.PART, 'extkey', ['account', 'default', 'true'])['evkey']
key_path_base = '44445555h/1h/{}/{}'.format(int(coin_type), key_num)
if not for_ed25519:
extkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, key_path_base])['key_info']['result']
return decodeWif(self.callcoinrpc(Coins.PART, 'extkey', ['info', extkey])['key_info']['privkey'])
return self.grindForEd25519Key(coin_type, evkey, key_path_base)
def getPathKey(self, coin_from, coin_to, offer_created_at, contract_count, key_no, for_ed25519=False):
evkey = self.callcoinrpc(Coins.PART, 'extkey', ['account', 'default', 'true'])['evkey']
ci = self.ci(coin_to)
days = offer_created_at // 86400
secs = offer_created_at - days * 86400
key_path_base = '44445555h/999999/{}/{}/{}/{}/{}/{}'.format(int(coin_from), int(coin_to), days, secs, contract_count, key_no)
if not for_ed25519:
extkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, key_path_base])['key_info']['result']
return decodeWif(self.callcoinrpc(Coins.PART, 'extkey', ['info', extkey])['key_info']['privkey'])
return self.grindForEd25519Key(coin_to, evkey, key_path_base)
def getNetworkKey(self, key_num):
evkey = self.callcoinrpc(Coins.PART, 'extkey', ['account', 'default', 'true'])['evkey']
key_path = '44445556h/1h/{}'.format(int(key_num))
extkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, key_path])['key_info']['result']
return decodeWif(self.callcoinrpc(Coins.PART, 'extkey', ['info', extkey])['key_info']['privkey'])
def getContractPubkey(self, date, contract_count):
account = self.callcoinrpc(Coins.PART, 'extkey', ['account'])
# Derive an address to use for a contract
evkey = self.callcoinrpc(Coins.PART, 'extkey', ['account', 'default', 'true'])['evkey']
# Should the coin path be included?
path = '44445555h'
path += '/' + str(date.year) + '/' + str(date.month) + '/' + str(date.day)
path += '/' + str(contract_count)
extkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, path])['key_info']['result']
pubkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', extkey])['key_info']['pubkey']
return bytes.fromhex(pubkey)
def getContractPrivkey(self, date, contract_count):
# Derive an address to use for a contract
evkey = self.callcoinrpc(Coins.PART, 'extkey', ['account', 'default', 'true'])['evkey']
path = '44445555h'
path += '/' + str(date.year) + '/' + str(date.month) + '/' + str(date.day)
path += '/' + str(contract_count)
extkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, path])['key_info']['result']
privkey = self.callcoinrpc(Coins.PART, 'extkey', ['info', extkey])['key_info']['privkey']
raw = decodeAddress(privkey)[1:]
if len(raw) > 32:
raw = raw[:32]
return raw
def getContractSecret(self, date, contract_count):
# Derive a key to use for a contract secret
evkey = self.callcoinrpc(Coins.PART, 'extkey', ['account', 'default', 'true'])['evkey']
path = '44445555h/99999'
path += '/' + str(date.year) + '/' + str(date.month) + '/' + str(date.day)
path += '/' + str(contract_count)
return hashlib.sha256(bytes(self.callcoinrpc(Coins.PART, 'extkey', ['info', evkey, path])['key_info']['result'], 'utf-8')).digest()
def getReceiveAddressFromPool(self, coin_type, bid_id, tx_type):
self.log.debug('Get address from pool bid_id {}, type {}, coin {}'.format(bid_id.hex(), tx_type, coin_type))
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
record = session.query(PooledAddress).filter(sa.and_(PooledAddress.coin_type == int(coin_type), PooledAddress.bid_id == None)).first() # noqa: E712,E711
if not record:
address = self.getReceiveAddressForCoin(coin_type)
record = PooledAddress(
addr=address,
coin_type=int(coin_type))
record.bid_id = bid_id
record.tx_type = tx_type
addr = record.addr
session.add(record)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
return addr
def returnAddressToPool(self, bid_id, tx_type):
self.log.debug('Return address to pool bid_id {}, type {}'.format(bid_id.hex(), tx_type))
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
try:
record = session.query(PooledAddress).filter(sa.and_(PooledAddress.bid_id == bid_id, PooledAddress.tx_type == tx_type)).one()
self.log.debug('Returning address to pool addr {}'.format(record.addr))
record.bid_id = None
session.commit()
except Exception as ex:
pass
finally:
session.close()
session.remove()
self.mxDB.release()
def getReceiveAddressForCoin(self, coin_type):
new_addr = self.ci(coin_type).getNewAddress(self.coin_clients[coin_type]['use_segwit'])
self.log.debug('Generated new receive address %s for %s', new_addr, str(coin_type))
return new_addr
def getRelayFeeRateForCoin(self, coin_type):
return self.callcoinrpc(coin_type, 'getnetworkinfo')['relayfee']
def getFeeRateForCoin(self, coin_type, conf_target=2):
chain_client_settings = self.getChainClientSettings(coin_type)
override_feerate = chain_client_settings.get('override_feerate', None)
if override_feerate:
self.log.debug('Fee rate override used for %s: %f', str(coin_type), override_feerate)
return override_feerate, 'override_feerate'
return self.ci(coin_type).get_fee_rate(conf_target)
def estimateWithdrawFee(self, coin_type, fee_rate):
if coin_type == Coins.XMR:
self.log.error('TODO: estimateWithdrawFee XMR')
return None
tx_vsize = self.getContractSpendTxVSize(coin_type)
est_fee = (fee_rate * tx_vsize) / 1000
return est_fee
def withdrawCoin(self, coin_type, value, addr_to, subfee):
ci = self.ci(coin_type)
self.log.info('withdrawCoin %s %s to %s %s', value, ci.ticker(), addr_to, ' subfee' if subfee else '')
txid = ci.withdrawCoin(value, addr_to, subfee)
self.log.debug('In txn: {}'.format(txid))
return txid
def withdrawParticl(self, type_from, type_to, value, addr_to, subfee):
self.log.info('withdrawParticl %s %s to %s %s %s', value, type_from, type_to, addr_to, ' subfee' if subfee else '')
if type_from == 'plain':
type_from = 'part'
if type_to == 'plain':
type_to = 'part'
ci = self.ci(Coins.PART)
txid = ci.sendTypeTo(type_from, type_to, value, addr_to, subfee)
self.log.debug('In txn: {}'.format(txid))
return txid
def cacheNewAddressForCoin(self, coin_type):
self.log.debug('cacheNewAddressForCoin %s', coin_type)
key_str = 'receive_addr_' + chainparams[coin_type]['name']
addr = self.getReceiveAddressForCoin(coin_type)
self.setStringKV(key_str, addr)
return addr
def getCachedMainWalletAddress(self, ci):
db_key = 'main_wallet_addr_' + ci.coin_name().lower()
cached_addr = self.getStringKV(db_key)
if cached_addr is not None:
return cached_addr
self.log.warning(f'Setting {db_key}')
main_address = ci.getMainWalletAddress()
self.setStringKV(db_key, main_address)
return main_address
def checkWalletSeed(self, c):
ci = self.ci(c)
if c == Coins.PART:
return True # TODO
if c == Coins.XMR:
expect_address = self.getCachedMainWalletAddress(ci)
if expect_address is None:
self.log.warning('Can\'t find expected main wallet address for coin {}'.format(ci.coin_name()))
return False
if expect_address == ci.getMainWalletAddress():
ci.setWalletSeedWarning(False)
return True
self.log.warning('Wallet for coin {} not derived from swap seed.'.format(ci.coin_name()))
return False
expect_seedid = self.getStringKV('main_wallet_seedid_' + ci.coin_name().lower())
if expect_seedid is None:
self.log.warning('Can\'t find expected wallet seed id for coin {}'.format(ci.coin_name()))
return False
if expect_seedid == ci.getWalletSeedID():
ci.setWalletSeedWarning(False)
return True
self.log.warning('Wallet for coin {} not derived from swap seed.'.format(ci.coin_name()))
return False
def reseedWallet(self, coin_type):
self.log.info('reseedWallet %s', coin_type)
ci = self.ci(coin_type)
if ci.knownWalletSeed():
raise ValueError('{} wallet seed is already derived from the particl mnemonic'.format(ci.coin_name()))
self.initialiseWallet(coin_type)
# TODO: How to scan pruned blocks?
if not self.checkWalletSeed(coin_type):
if coin_type == Coins.XMR:
raise ValueError('TODO: How to reseed XMR wallet?')
else:
raise ValueError('Wallet seed doesn\'t match expected.')
def getCachedAddressForCoin(self, coin_type):
self.log.debug('getCachedAddressForCoin %s', coin_type)
# TODO: auto refresh after used
key_str = 'receive_addr_' + chainparams[coin_type]['name']
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
try:
addr = session.query(DBKVString).filter_by(key=key_str).first().value
except Exception:
addr = self.getReceiveAddressForCoin(coin_type)
session.add(DBKVString(
key=key_str,
value=addr
))
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
return addr
def getCachedStealthAddressForCoin(self, coin_type):
self.log.debug('getCachedStealthAddressForCoin %s', coin_type)
ci = self.ci(coin_type)
key_str = 'stealth_addr_' + ci.coin_name().lower()
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
try:
addr = session.query(DBKVString).filter_by(key=key_str).first().value
except Exception:
addr = ci.getNewStealthAddress()
self.log.info('Generated new stealth address for %s', coin_type)
session.add(DBKVString(
key=key_str,
value=addr
))
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
return addr
def getCachedWalletRestoreHeight(self, ci):
self.log.debug('getCachedWalletRestoreHeight %s', ci.coin_name())
key_str = 'restore_height_' + ci.coin_name().lower()
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
try:
wrh = session.query(DBKVInt).filter_by(key=key_str).first().value
except Exception:
wrh = ci.getWalletRestoreHeight()
self.log.info('Found restore height for %s, block %d', ci.coin_name(), wrh)
session.add(DBKVInt(
key=key_str,
value=wrh
))
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
return wrh
def getWalletRestoreHeight(self, ci):
wrh = ci._restore_height
if wrh is not None:
return wrh
found_height = self.getCachedWalletRestoreHeight(ci)
ci.setWalletRestoreHeight(found_height)
return found_height
def getNewContractId(self):
self.mxDB.acquire()
try:
self._contract_count += 1
session = scoped_session(self.session_factory)
session.execute('UPDATE kv_int SET value = {} WHERE KEY="contract_count"'.format(self._contract_count))
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
return self._contract_count
def getUnspentsByAddr(self, coin_type):
ci = self.ci(coin_type)
unspent_addr = dict()
unspent = self.callcoinrpc(coin_type, 'listunspent')
for u in unspent:
if u['spendable'] is not True:
continue
unspent_addr[u['address']] = unspent_addr.get(u['address'], 0) + ci.make_int(u['amount'], r=1)
return unspent_addr
def getProofOfFunds(self, coin_type, amount_for, extra_commit_bytes):
ci = self.ci(coin_type)
self.log.debug('getProofOfFunds %s %s', ci.coin_name(), ci.format_amount(amount_for))
if self.coin_clients[coin_type]['connection_type'] != 'rpc':
return (None, None)
# TODO: Lock unspent and use same output/s to fund bid
unspent_addr = self.getUnspentsByAddr(coin_type)
sign_for_addr = None
for addr, value in unspent_addr.items():
if value >= amount_for:
sign_for_addr = addr
break
ensure(sign_for_addr is not None, 'Could not find address with enough funds for proof')
self.log.debug('sign_for_addr %s', sign_for_addr)
if self.coin_clients[coin_type]['use_segwit']: # TODO: Use isSegwitAddress when scantxoutset can use combo
# 'Address does not refer to key' for non p2pkh
addrinfo = self.callcoinrpc(coin_type, 'getaddressinfo', [sign_for_addr])
pkh = addrinfo['scriptPubKey'][4:]
sign_for_addr = encodeAddress(bytes((chainparams[coin_type][self.chain]['pubkey_address'],)) + bytes.fromhex(pkh))
self.log.debug('sign_for_addr converted %s', sign_for_addr)
signature = self.callcoinrpc(coin_type, 'signmessage', [sign_for_addr, sign_for_addr + '_swap_proof_' + extra_commit_bytes.hex()])
return (sign_for_addr, signature)
def saveBidInSession(self, bid_id, bid, session, xmr_swap=None, save_in_progress=None):
session.add(bid)
if bid.initiate_tx:
session.add(bid.initiate_tx)
if bid.participate_tx:
session.add(bid.participate_tx)
if bid.xmr_a_lock_tx:
session.add(bid.xmr_a_lock_tx)
if bid.xmr_a_lock_spend_tx:
session.add(bid.xmr_a_lock_spend_tx)
if bid.xmr_b_lock_tx:
session.add(bid.xmr_b_lock_tx)
for tx_type, tx in bid.txns.items():
session.add(tx)
if xmr_swap is not None:
session.add(xmr_swap)
if save_in_progress is not None:
if not isinstance(save_in_progress, Offer):
raise ValueError('Must specify offer for save_in_progress')
self.swaps_in_progress[bid_id] = (bid, save_in_progress) # (bid, offer)
def saveBid(self, bid_id, bid, xmr_swap=None):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def saveToDB(self, db_record):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
session.add(db_record)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def createEventInSession(self, delay, event_type, linked_id, session):
self.log.debug('createEvent %d %s', event_type, linked_id.hex())
now = int(time.time())
event = EventQueue(
active_ind=1,
created_at=now,
trigger_at=now + delay,
event_type=event_type,
linked_id=linked_id)
session.add(event)
def createEvent(self, delay, event_type, linked_id):
# self.log.debug('createEvent %d %s', event_type, linked_id.hex())
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
self.createEventInSession(delay, event_type, linked_id, session)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def logBidEvent(self, bid_id, event_type, event_msg, session):
self.log.debug('logBidEvent %s %s', bid_id.hex(), event_type)
entry = EventLog(
active_ind=1,
created_at=int(time.time()),
linked_type=TableTypes.BID,
linked_id=bid_id,
event_type=int(event_type),
event_msg=event_msg)
if session is not None:
session.add(entry)
return
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
session.add(entry)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def countBidEvents(self, bid, event_type, session):
q = session.execute('SELECT COUNT(*) FROM eventlog WHERE linked_type = {} AND linked_id = x\'{}\' AND event_type = {}'.format(int(TableTypes.BID), bid.bid_id.hex(), int(event_type))).first()
return q[0]
def postBid(self, offer_id, amount, addr_send_from=None, extra_options={}):
# Bid to send bid.amount * bid.rate of coin_to in exchange for bid.amount of coin_from
self.log.debug('postBid %s', offer_id.hex())
offer = self.getOffer(offer_id)
ensure(offer, 'Offer not found: {}.'.format(offer_id.hex()))
ensure(offer.expire_at > int(time.time()), 'Offer has expired')
if offer.swap_type == SwapTypes.XMR_SWAP:
return self.postXmrBid(offer_id, amount, addr_send_from, extra_options)
valid_for_seconds = extra_options.get('valid_for_seconds', 60 * 10)
self.validateBidValidTime(offer.swap_type, offer.coin_from, offer.coin_to, valid_for_seconds)
bid_rate = extra_options.get('bid_rate', offer.rate)
self.validateBidAmount(offer, amount, bid_rate)
self.mxDB.acquire()
try:
msg_buf = BidMessage()
msg_buf.protocol_version = 1
msg_buf.offer_msg_id = offer_id
msg_buf.time_valid = valid_for_seconds
msg_buf.amount = int(amount) # amount of coin_from
msg_buf.rate = bid_rate
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
self.checkSynced(coin_from, coin_to)
amount_to = int((msg_buf.amount * bid_rate) // ci_from.COIN())
now = int(time.time())
if offer.swap_type == SwapTypes.SELLER_FIRST:
proof_addr, proof_sig = self.getProofOfFunds(coin_to, amount_to, offer_id)
msg_buf.proof_address = proof_addr
msg_buf.proof_signature = proof_sig
contract_count = self.getNewContractId()
msg_buf.pkhash_buyer = getKeyID(self.getContractPubkey(dt.datetime.fromtimestamp(now).date(), contract_count))
else:
raise ValueError('TODO')
bid_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.BID) + bid_bytes.hex()
bid_addr = self.newSMSGAddress(use_type=AddressTypes.BID)[0] if addr_send_from is None else addr_send_from
options = {'decodehex': True, 'ttl_is_seconds': True}
msg_valid = max(self.SMSG_SECONDS_IN_HOUR * 1, valid_for_seconds)
ro = self.callrpc('smsgsend', [bid_addr, offer.addr_from, payload_hex, False, msg_valid, False, options])
msg_id = ro['msgid']
bid_id = bytes.fromhex(msg_id)
bid = Bid(
protocol_version=msg_buf.protocol_version,
active_ind=1,
bid_id=bid_id,
offer_id=offer_id,
amount=msg_buf.amount,
rate=msg_buf.rate,
pkhash_buyer=msg_buf.pkhash_buyer,
proof_address=msg_buf.proof_address,
created_at=now,
contract_count=contract_count,
amount_to=amount_to,
expire_at=now + msg_buf.time_valid,
bid_addr=bid_addr,
was_sent=True,
chain_a_height_start=ci_from.getChainHeight(),
chain_b_height_start=ci_to.getChainHeight(),
)
bid.setState(BidStates.BID_SENT)
try:
session = scoped_session(self.session_factory)
self.saveBidInSession(bid_id, bid, session)
session.commit()
finally:
session.close()
session.remove()
self.log.info('Sent BID %s', bid_id.hex())
return bid_id
finally:
self.mxDB.release()
def getOffer(self, offer_id, sent=False):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
return session.query(Offer).filter_by(offer_id=offer_id).first()
finally:
session.close()
session.remove()
self.mxDB.release()
def loadBidTxns(self, bid, session):
bid.txns = {}
for stx in session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid.bid_id)):
if stx.tx_type == TxTypes.ITX:
bid.initiate_tx = stx
elif stx.tx_type == TxTypes.PTX:
bid.participate_tx = stx
elif stx.tx_type == TxTypes.XMR_SWAP_A_LOCK:
bid.xmr_a_lock_tx = stx
elif stx.tx_type == TxTypes.XMR_SWAP_A_LOCK_SPEND:
bid.xmr_a_lock_spend_tx = stx
elif stx.tx_type == TxTypes.XMR_SWAP_B_LOCK:
bid.xmr_b_lock_tx = stx
else:
bid.txns[stx.tx_type] = stx
def getXmrBidFromSession(self, session, bid_id, sent=False):
bid = session.query(Bid).filter_by(bid_id=bid_id).first()
xmr_swap = None
if bid:
xmr_swap = session.query(XmrSwap).filter_by(bid_id=bid_id).first()
self.loadBidTxns(bid, session)
return bid, xmr_swap
def getXmrBid(self, bid_id, sent=False):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
return self.getXmrBidFromSession(session, bid_id, sent)
finally:
session.close()
session.remove()
self.mxDB.release()
def getXmrOfferFromSession(self, session, offer_id, sent=False):
offer = session.query(Offer).filter_by(offer_id=offer_id).first()
xmr_offer = None
if offer:
xmr_offer = session.query(XmrOffer).filter_by(offer_id=offer_id).first()
return offer, xmr_offer
def getXmrOffer(self, offer_id, sent=False):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
return self.getXmrOfferFromSession(session, offer_id, sent)
finally:
session.close()
session.remove()
self.mxDB.release()
def getBid(self, bid_id):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
bid = session.query(Bid).filter_by(bid_id=bid_id).first()
if bid:
self.loadBidTxns(bid, session)
return bid
finally:
session.close()
session.remove()
self.mxDB.release()
def getBidAndOffer(self, bid_id):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
bid = session.query(Bid).filter_by(bid_id=bid_id).first()
offer = None
if bid:
offer = session.query(Offer).filter_by(offer_id=bid.offer_id).first()
if offer and offer.swap_type == SwapTypes.XMR_SWAP:
self.loadBidTxns(bid, session)
else:
bid.initiate_tx = session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid_id, SwapTx.tx_type == TxTypes.ITX)).first()
bid.participate_tx = session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid_id, SwapTx.tx_type == TxTypes.PTX)).first()
return bid, offer
finally:
session.close()
session.remove()
self.mxDB.release()
def getXmrBidAndOffer(self, bid_id, list_events=True):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
xmr_swap = None
offer = None
xmr_offer = None
events = []
bid = session.query(Bid).filter_by(bid_id=bid_id).first()
if bid:
offer = session.query(Offer).filter_by(offer_id=bid.offer_id).first()
if offer and offer.swap_type == SwapTypes.XMR_SWAP:
xmr_swap = session.query(XmrSwap).filter_by(bid_id=bid.bid_id).first()
xmr_offer = session.query(XmrOffer).filter_by(offer_id=bid.offer_id).first()
self.loadBidTxns(bid, session)
else:
bid.initiate_tx = session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid_id, SwapTx.tx_type == TxTypes.ITX)).first()
bid.participate_tx = session.query(SwapTx).filter(sa.and_(SwapTx.bid_id == bid_id, SwapTx.tx_type == TxTypes.PTX)).first()
if list_events:
events = self.list_bid_events(bid.bid_id, session)
return bid, xmr_swap, offer, xmr_offer, events
finally:
session.close()
session.remove()
self.mxDB.release()
def getIdentity(self, address):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
identity = session.query(KnownIdentity).filter_by(address=address).first()
return identity
finally:
session.close()
session.remove()
self.mxDB.release()
def updateIdentity(self, address, label):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
identity = session.query(KnownIdentity).filter_by(address=address).first()
if identity is None:
identity = KnownIdentity(address=address)
identity.label = label
session.add(identity)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def list_bid_events(self, bid_id, session):
query_str = 'SELECT created_at, event_type, event_msg FROM eventlog ' + \
'WHERE active_ind = 1 AND linked_type = {} AND linked_id = x\'{}\' '.format(TableTypes.BID, bid_id.hex())
q = session.execute(query_str)
events = []
for row in q:
events.append({'at': row[0], 'desc': describeEventEntry(row[1], row[2])})
query_str = 'SELECT created_at, trigger_at FROM eventqueue ' + \
'WHERE active_ind = 1 AND linked_id = x\'{}\' '.format(bid_id.hex())
q = session.execute(query_str)
for row in q:
events.append({'at': row[0], 'desc': 'Delaying until: {}'.format(format_timestamp(row[1], with_seconds=True))})
return events
def acceptBid(self, bid_id):
self.log.info('Accepting bid %s', bid_id.hex())
bid, offer = self.getBidAndOffer(bid_id)
ensure(bid, 'Bid not found')
ensure(offer, 'Offer not found')
# Ensure bid is still valid
now = int(time.time())
ensure(bid.expire_at > now, 'Bid expired')
ensure(bid.state == BidStates.BID_RECEIVED, 'Wrong bid state: {}'.format(str(BidStates(bid.state))))
if offer.swap_type == SwapTypes.XMR_SWAP:
return self.acceptXmrBid(bid_id)
if bid.contract_count is None:
bid.contract_count = self.getNewContractId()
coin_from = Coins(offer.coin_from)
ci_from = self.ci(coin_from)
bid_date = dt.datetime.fromtimestamp(bid.created_at).date()
secret = self.getContractSecret(bid_date, bid.contract_count)
secret_hash = hashlib.sha256(secret).digest()
pubkey_refund = self.getContractPubkey(bid_date, bid.contract_count)
pkhash_refund = getKeyID(pubkey_refund)
if bid.initiate_tx is not None:
self.log.warning('Initiate txn %s already exists for bid %s', bid.initiate_tx.txid, bid_id.hex())
txid = bid.initiate_tx.txid
script = bid.initiate_tx.script
else:
if offer.lock_type < TxLockTypes.ABS_LOCK_BLOCKS:
sequence = ci_from.getExpectedSequence(offer.lock_type, offer.lock_value)
script = atomic_swap_1.buildContractScript(sequence, secret_hash, bid.pkhash_buyer, pkhash_refund)
else:
if offer.lock_type == TxLockTypes.ABS_LOCK_BLOCKS:
lock_value = self.callcoinrpc(coin_from, 'getblockcount') + offer.lock_value
else:
lock_value = int(time.time()) + offer.lock_value
self.log.debug('Initiate %s lock_value %d %d', coin_from, offer.lock_value, lock_value)
script = atomic_swap_1.buildContractScript(lock_value, secret_hash, bid.pkhash_buyer, pkhash_refund, OpCodes.OP_CHECKLOCKTIMEVERIFY)
p2sh = self.callcoinrpc(Coins.PART, 'decodescript', [script.hex()])['p2sh']
bid.pkhash_seller = pkhash_refund
txn = self.createInitiateTxn(coin_from, bid_id, bid, script)
# Store the signed refund txn in case wallet is locked when refund is possible
refund_txn = self.createRefundTxn(coin_from, txn, offer, bid, script)
bid.initiate_txn_refund = bytes.fromhex(refund_txn)
txid = self.submitTxn(coin_from, txn)
self.log.debug('Submitted initiate txn %s to %s chain for bid %s', txid, ci_from.coin_name(), bid_id.hex())
bid.initiate_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.ITX,
txid=bytes.fromhex(txid),
tx_data=bytes.fromhex(txn),
script=script,
)
bid.setITxState(TxStates.TX_SENT)
# Check non-bip68 final
try:
txid = self.submitTxn(coin_from, bid.initiate_txn_refund.hex())
self.log.error('Submit refund_txn unexpectedly worked: ' + txid)
except Exception as ex:
if 'non-BIP68-final' not in str(ex) and 'non-final' not in str(ex):
self.log.error('Submit refund_txn unexpected error' + str(ex))
if txid is not None:
msg_buf = BidAcceptMessage()
msg_buf.bid_msg_id = bid_id
msg_buf.initiate_txid = bytes.fromhex(txid)
msg_buf.contract_script = bytes(script)
bid_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.BID_ACCEPT) + bid_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
# TODO: set msg_valid based on bid / offer parameters
msg_valid = self.SMSG_SECONDS_IN_HOUR * 48
ro = self.callrpc('smsgsend', [offer.addr_from, bid.bid_addr, payload_hex, False, msg_valid, False, options])
msg_id = ro['msgid']
accept_msg_id = bytes.fromhex(msg_id)
bid.accept_msg_id = accept_msg_id
bid.setState(BidStates.BID_ACCEPTED)
self.log.info('Sent BID_ACCEPT %s', accept_msg_id.hex())
self.saveBid(bid_id, bid)
self.swaps_in_progress[bid_id] = (bid, offer)
def postXmrBid(self, offer_id, amount, addr_send_from=None, extra_options={}):
# Bid to send bid.amount * bid.rate of coin_to in exchange for bid.amount of coin_from
# Send MSG1L F -> L
self.log.debug('postXmrBid %s', offer_id.hex())
self.mxDB.acquire()
try:
offer, xmr_offer = self.getXmrOffer(offer_id)
ensure(offer, 'Offer not found: {}.'.format(offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(offer_id.hex()))
ensure(offer.expire_at > int(time.time()), 'Offer has expired')
valid_for_seconds = extra_options.get('valid_for_seconds', 60 * 10)
self.validateBidValidTime(offer.swap_type, offer.coin_from, offer.coin_to, valid_for_seconds)
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
bid_rate = extra_options.get('bid_rate', offer.rate)
self.validateBidAmount(offer, amount, bid_rate)
self.checkSynced(coin_from, coin_to)
amount_to = int((int(amount) * bid_rate) // ci_from.COIN())
balance_to = ci_to.getSpendableBalance()
ensure(balance_to > amount_to, '{} spendable balance is too low: {}'.format(ci_to.coin_name(), ci_to.format_amount(balance_to)))
msg_buf = XmrBidMessage()
msg_buf.protocol_version = 1
msg_buf.offer_msg_id = offer_id
msg_buf.time_valid = valid_for_seconds
msg_buf.amount = int(amount) # Amount of coin_from
msg_buf.rate = bid_rate
address_out = self.getReceiveAddressFromPool(coin_from, offer_id, TxTypes.XMR_SWAP_A_LOCK)
if coin_from == Coins.PART_BLIND:
addrinfo = ci_from.rpc_callback('getaddressinfo', [address_out])
msg_buf.dest_af = bytes.fromhex(addrinfo['pubkey'])
else:
msg_buf.dest_af = ci_from.decodeAddress(address_out)
bid_created_at = int(time.time())
if offer.swap_type != SwapTypes.XMR_SWAP:
raise ValueError('TODO')
# Follower to leader
xmr_swap = XmrSwap()
xmr_swap.contract_count = self.getNewContractId()
xmr_swap.dest_af = msg_buf.dest_af
for_ed25519 = True if coin_to == Coins.XMR else False
kbvf = self.getPathKey(coin_from, coin_to, bid_created_at, xmr_swap.contract_count, KeyTypes.KBVF, for_ed25519)
kbsf = self.getPathKey(coin_from, coin_to, bid_created_at, xmr_swap.contract_count, KeyTypes.KBSF, for_ed25519)
kaf = self.getPathKey(coin_from, coin_to, bid_created_at, xmr_swap.contract_count, KeyTypes.KAF)
xmr_swap.vkbvf = kbvf
xmr_swap.pkbvf = ci_to.getPubkey(kbvf)
xmr_swap.pkbsf = ci_to.getPubkey(kbsf)
xmr_swap.pkaf = ci_from.getPubkey(kaf)
if coin_to == Coins.XMR:
xmr_swap.kbsf_dleag = ci_to.proveDLEAG(kbsf)
else:
xmr_swap.kbsf_dleag = xmr_swap.pkbsf
xmr_swap.pkasf = xmr_swap.kbsf_dleag[0: 33]
assert(xmr_swap.pkasf == ci_from.getPubkey(kbsf))
msg_buf.pkaf = xmr_swap.pkaf
msg_buf.kbvf = kbvf
if coin_to == Coins.XMR:
msg_buf.kbsf_dleag = xmr_swap.kbsf_dleag[:16000]
else:
msg_buf.kbsf_dleag = xmr_swap.kbsf_dleag
bid_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_FL) + bid_bytes.hex()
bid_addr = self.newSMSGAddress(use_type=AddressTypes.BID)[0] if addr_send_from is None else addr_send_from
options = {'decodehex': True, 'ttl_is_seconds': True}
msg_valid = max(self.SMSG_SECONDS_IN_HOUR * 1, valid_for_seconds)
ro = self.callrpc('smsgsend', [bid_addr, offer.addr_from, payload_hex, False, msg_valid, False, options])
xmr_swap.bid_id = bytes.fromhex(ro['msgid'])
if coin_to == Coins.XMR:
msg_buf2 = XmrSplitMessage(
msg_id=xmr_swap.bid_id,
msg_type=XmrSplitMsgTypes.BID,
sequence=2,
dleag=xmr_swap.kbsf_dleag[16000:32000]
)
msg_bytes = msg_buf2.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_SPLIT) + msg_bytes.hex()
ro = self.callrpc('smsgsend', [bid_addr, offer.addr_from, payload_hex, False, msg_valid, False, options])
xmr_swap.bid_msg_id2 = bytes.fromhex(ro['msgid'])
msg_buf3 = XmrSplitMessage(
msg_id=xmr_swap.bid_id,
msg_type=XmrSplitMsgTypes.BID,
sequence=3,
dleag=xmr_swap.kbsf_dleag[32000:]
)
msg_bytes = msg_buf3.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_SPLIT) + msg_bytes.hex()
ro = self.callrpc('smsgsend', [bid_addr, offer.addr_from, payload_hex, False, msg_valid, False, options])
xmr_swap.bid_msg_id3 = bytes.fromhex(ro['msgid'])
bid = Bid(
protocol_version=msg_buf.protocol_version,
active_ind=1,
bid_id=xmr_swap.bid_id,
offer_id=offer_id,
amount=msg_buf.amount,
rate=msg_buf.rate,
created_at=bid_created_at,
contract_count=xmr_swap.contract_count,
amount_to=(msg_buf.amount * msg_buf.rate) // ci_from.COIN(),
expire_at=bid_created_at + msg_buf.time_valid,
bid_addr=bid_addr,
was_sent=True,
)
bid.chain_a_height_start = ci_from.getChainHeight()
bid.chain_b_height_start = ci_to.getChainHeight()
wallet_restore_height = self.getWalletRestoreHeight(ci_to)
if bid.chain_b_height_start < wallet_restore_height:
bid.chain_b_height_start = wallet_restore_height
self.log.warning('XMR swap restore height clamped to {}'.format(wallet_restore_height))
bid.setState(BidStates.BID_SENT)
try:
session = scoped_session(self.session_factory)
self.saveBidInSession(xmr_swap.bid_id, bid, session, xmr_swap)
session.commit()
finally:
session.close()
session.remove()
self.log.info('Sent XMR_BID_FL %s', xmr_swap.bid_id.hex())
return xmr_swap.bid_id
finally:
self.mxDB.release()
def acceptXmrBid(self, bid_id):
# MSG1F and MSG2F L -> F
self.log.info('Accepting xmr bid %s', bid_id.hex())
now = int(time.time())
self.mxDB.acquire()
try:
bid, xmr_swap = self.getXmrBid(bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
ensure(bid.expire_at > now, 'Bid expired')
last_bid_state = bid.state
if last_bid_state == BidStates.SWAP_DELAYING:
last_bid_state = getLastBidState(bid.states)
ensure(last_bid_state == BidStates.BID_RECEIVED, 'Wrong bid state: {}'.format(str(BidStates(last_bid_state))))
offer, xmr_offer = self.getXmrOffer(bid.offer_id)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
ensure(offer.expire_at > now, 'Offer has expired')
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
if xmr_swap.contract_count is None:
xmr_swap.contract_count = self.getNewContractId()
for_ed25519 = True if coin_to == Coins.XMR else False
kbvl = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KBVL, for_ed25519)
kbsl = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KBSL, for_ed25519)
kal = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KAL)
xmr_swap.vkbvl = kbvl
xmr_swap.pkbvl = ci_to.getPubkey(kbvl)
xmr_swap.pkbsl = ci_to.getPubkey(kbsl)
xmr_swap.vkbv = ci_to.sumKeys(kbvl, xmr_swap.vkbvf)
ensure(ci_to.verifyKey(xmr_swap.vkbv), 'Invalid key, vkbv')
xmr_swap.pkbv = ci_to.sumPubkeys(xmr_swap.pkbvl, xmr_swap.pkbvf)
xmr_swap.pkbs = ci_to.sumPubkeys(xmr_swap.pkbsl, xmr_swap.pkbsf)
xmr_swap.pkal = ci_from.getPubkey(kal)
if coin_to == Coins.XMR:
xmr_swap.kbsl_dleag = ci_to.proveDLEAG(kbsl)
else:
xmr_swap.kbsl_dleag = xmr_swap.pkbsl
# MSG2F
xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script = ci_from.createScriptLockTx(
bid.amount,
xmr_swap.pkal, xmr_swap.pkaf, xmr_swap.vkbv
)
xmr_swap.a_lock_tx = ci_from.fundScriptLockTx(xmr_swap.a_lock_tx, xmr_offer.a_fee_rate, xmr_swap.vkbv)
xmr_swap.a_lock_tx_id = ci_from.getTxid(xmr_swap.a_lock_tx)
a_lock_tx_dest = ci_from.getScriptDest(xmr_swap.a_lock_tx_script)
xmr_swap.a_lock_refund_tx, xmr_swap.a_lock_refund_tx_script, xmr_swap.a_swap_refund_value = ci_from.createScriptLockRefundTx(
xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script,
xmr_swap.pkal, xmr_swap.pkaf,
xmr_offer.lock_time_1, xmr_offer.lock_time_2,
xmr_offer.a_fee_rate, xmr_swap.vkbv
)
xmr_swap.a_lock_refund_tx_id = ci_from.getTxid(xmr_swap.a_lock_refund_tx)
prevout_amount = ci_from.getLockTxSwapOutputValue(bid, xmr_swap)
xmr_swap.al_lock_refund_tx_sig = ci_from.signTx(kal, xmr_swap.a_lock_refund_tx, 0, xmr_swap.a_lock_tx_script, prevout_amount)
v = ci_from.verifyTxSig(xmr_swap.a_lock_refund_tx, xmr_swap.al_lock_refund_tx_sig, xmr_swap.pkal, 0, xmr_swap.a_lock_tx_script, prevout_amount)
ensure(v, 'Invalid coin A lock refund tx leader sig')
pkh_refund_to = ci_from.decodeAddress(self.getReceiveAddressForCoin(coin_from))
xmr_swap.a_lock_refund_spend_tx = ci_from.createScriptLockRefundSpendTx(
xmr_swap.a_lock_refund_tx, xmr_swap.a_lock_refund_tx_script,
pkh_refund_to,
xmr_offer.a_fee_rate, xmr_swap.vkbv
)
xmr_swap.a_lock_refund_spend_tx_id = ci_from.getTxid(xmr_swap.a_lock_refund_spend_tx)
# Double check txns before sending
self.log.debug('Bid: {} - Double checking chain A lock txns are valid before sending bid accept.'.format(bid_id.hex()))
check_lock_tx_inputs = False # TODO: check_lock_tx_inputs without txindex
_, xmr_swap.a_lock_tx_vout = ci_from.verifyLockTx(
xmr_swap.a_lock_tx,
xmr_swap.a_lock_tx_script,
bid.amount,
xmr_swap.pkal,
xmr_swap.pkaf,
xmr_offer.a_fee_rate,
check_lock_tx_inputs,
xmr_swap.vkbv)
_, _, lock_refund_vout = ci_from.verifyLockRefundTx(
xmr_swap.a_lock_refund_tx,
xmr_swap.a_lock_tx,
xmr_swap.a_lock_refund_tx_script,
xmr_swap.a_lock_tx_id,
xmr_swap.a_lock_tx_vout,
xmr_offer.lock_time_1,
xmr_swap.a_lock_tx_script,
xmr_swap.pkal,
xmr_swap.pkaf,
xmr_offer.lock_time_2,
bid.amount,
xmr_offer.a_fee_rate,
xmr_swap.vkbv)
ci_from.verifyLockRefundSpendTx(
xmr_swap.a_lock_refund_spend_tx, xmr_swap.a_lock_refund_tx,
xmr_swap.a_lock_refund_tx_id, xmr_swap.a_lock_refund_tx_script,
xmr_swap.pkal,
lock_refund_vout, xmr_swap.a_swap_refund_value, xmr_offer.a_fee_rate,
xmr_swap.vkbv)
msg_buf = XmrBidAcceptMessage()
msg_buf.bid_msg_id = bid_id
msg_buf.pkal = xmr_swap.pkal
msg_buf.kbvl = kbvl
if coin_to == Coins.XMR:
msg_buf.kbsl_dleag = xmr_swap.kbsl_dleag[:16000]
else:
msg_buf.kbsl_dleag = xmr_swap.kbsl_dleag
# MSG2F
msg_buf.a_lock_tx = xmr_swap.a_lock_tx
msg_buf.a_lock_tx_script = xmr_swap.a_lock_tx_script
msg_buf.a_lock_refund_tx = xmr_swap.a_lock_refund_tx
msg_buf.a_lock_refund_tx_script = xmr_swap.a_lock_refund_tx_script
msg_buf.a_lock_refund_spend_tx = xmr_swap.a_lock_refund_spend_tx
msg_buf.al_lock_refund_tx_sig = xmr_swap.al_lock_refund_tx_sig
msg_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_ACCEPT_LF) + msg_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
msg_valid = self.SMSG_SECONDS_IN_HOUR * 48
ro = self.callrpc('smsgsend', [offer.addr_from, bid.bid_addr, payload_hex, False, msg_valid, False, options])
msg_id = ro['msgid']
bid.accept_msg_id = bytes.fromhex(msg_id)
xmr_swap.bid_accept_msg_id = bid.accept_msg_id
if coin_to == Coins.XMR:
msg_buf2 = XmrSplitMessage(
msg_id=bid_id,
msg_type=XmrSplitMsgTypes.BID_ACCEPT,
sequence=2,
dleag=xmr_swap.kbsl_dleag[16000:32000]
)
msg_bytes = msg_buf2.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_SPLIT) + msg_bytes.hex()
ro = self.callrpc('smsgsend', [offer.addr_from, bid.bid_addr, payload_hex, False, msg_valid, False, options])
xmr_swap.bid_accept_msg_id2 = bytes.fromhex(ro['msgid'])
msg_buf3 = XmrSplitMessage(
msg_id=bid_id,
msg_type=XmrSplitMsgTypes.BID_ACCEPT,
sequence=3,
dleag=xmr_swap.kbsl_dleag[32000:]
)
msg_bytes = msg_buf3.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_SPLIT) + msg_bytes.hex()
ro = self.callrpc('smsgsend', [offer.addr_from, bid.bid_addr, payload_hex, False, msg_valid, False, options])
xmr_swap.bid_accept_msg_id3 = bytes.fromhex(ro['msgid'])
bid.setState(BidStates.BID_ACCEPTED)
self.saveBid(bid_id, bid, xmr_swap=xmr_swap)
# Add to swaps_in_progress only when waiting on txns
self.log.info('Sent XMR_BID_ACCEPT_LF %s', bid_id.hex())
return bid_id
finally:
self.mxDB.release()
def abandonBid(self, bid_id):
self.log.info('Abandoning Bid %s', bid_id.hex())
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
bid = session.query(Bid).filter_by(bid_id=bid_id).first()
ensure(bid, 'Bid not found')
offer = session.query(Offer).filter_by(offer_id=bid.offer_id).first()
ensure(offer, 'Offer not found')
# Mark bid as abandoned, no further processing will be done
bid.setState(BidStates.BID_ABANDONED)
self.deactivateBid(session, offer, bid)
session.add(bid)
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def setBidError(self, bid_id, bid, error_str, save_bid=True, xmr_swap=None):
self.log.error('Bid %s - Error: %s', bid_id.hex(), error_str)
bid.setState(BidStates.BID_ERROR)
bid.state_note = 'error msg: ' + error_str
if save_bid:
self.saveBid(bid_id, bid, xmr_swap=xmr_swap)
def createInitiateTxn(self, coin_type, bid_id, bid, initiate_script):
if self.coin_clients[coin_type]['connection_type'] != 'rpc':
return None
ci = self.ci(coin_type)
if self.coin_clients[coin_type]['use_segwit']:
addr_to = ci.encode_p2wsh(getP2WSH(initiate_script))
else:
addr_to = ci.encode_p2sh(initiate_script)
self.log.debug('Create initiate txn for coin %s to %s for bid %s', str(coin_type), addr_to, bid_id.hex())
txn = self.callcoinrpc(coin_type, 'createrawtransaction', [[], {addr_to: ci.format_amount(bid.amount)}])
options = {
'lockUnspents': True,
'conf_target': self.coin_clients[coin_type]['conf_target'],
}
txn_funded = self.callcoinrpc(coin_type, 'fundrawtransaction', [txn, options])['hex']
txn_signed = self.callcoinrpc(coin_type, 'signrawtransactionwithwallet', [txn_funded])['hex']
return txn_signed
def deriveParticipateScript(self, bid_id, bid, offer):
self.log.debug('deriveParticipateScript for bid %s', bid_id.hex())
coin_to = Coins(offer.coin_to)
ci_to = self.ci(coin_to)
bid_date = dt.datetime.fromtimestamp(bid.created_at).date()
secret_hash = atomic_swap_1.extractScriptSecretHash(bid.initiate_tx.script)
pkhash_seller = bid.pkhash_seller
pkhash_buyer_refund = bid.pkhash_buyer
# Participate txn is locked for half the time of the initiate txn
lock_value = offer.lock_value // 2
if offer.lock_type < TxLockTypes.ABS_LOCK_BLOCKS:
sequence = ci_to.getExpectedSequence(offer.lock_type, lock_value)
participate_script = atomic_swap_1.buildContractScript(sequence, secret_hash, pkhash_seller, pkhash_buyer_refund)
else:
# Lock from the height or time of the block containing the initiate txn
coin_from = Coins(offer.coin_from)
initiate_tx_block_hash = self.callcoinrpc(coin_from, 'getblockhash', [bid.initiate_tx.chain_height, ])
initiate_tx_block_time = int(self.callcoinrpc(coin_from, 'getblock', [initiate_tx_block_hash, ])['time'])
if offer.lock_type == TxLockTypes.ABS_LOCK_BLOCKS:
# Walk the coin_to chain back until block time matches
blockchaininfo = self.callcoinrpc(coin_to, 'getblockchaininfo')
cblock_hash = blockchaininfo['bestblockhash']
cblock_height = blockchaininfo['blocks']
max_tries = 1000
for i in range(max_tries):
prev_block = self.callcoinrpc(coin_to, 'getblock', [cblock_hash, ])
self.log.debug('prev_block %s', str(prev_block))
if prev_block['time'] <= initiate_tx_block_time:
break
# cblock_hash and height are out of step unless loop breaks
cblock_hash = prev_block['previousblockhash']
cblock_height = prev_block['height']
ensure(prev_block['time'] <= initiate_tx_block_time, 'Block not found for lock height')
self.log.debug('Setting lock value from height of block %s %s', coin_to, cblock_hash)
contract_lock_value = cblock_height + lock_value
else:
self.log.debug('Setting lock value from time of block %s %s', coin_from, initiate_tx_block_hash)
contract_lock_value = initiate_tx_block_time + lock_value
self.log.debug('participate %s lock_value %d %d', coin_to, lock_value, contract_lock_value)
participate_script = atomic_swap_1.buildContractScript(contract_lock_value, secret_hash, pkhash_seller, pkhash_buyer_refund, OpCodes.OP_CHECKLOCKTIMEVERIFY)
return participate_script
def createParticipateTxn(self, bid_id, bid, offer, participate_script):
self.log.debug('createParticipateTxn')
offer_id = bid.offer_id
coin_to = Coins(offer.coin_to)
if self.coin_clients[coin_to]['connection_type'] != 'rpc':
return None
ci = self.ci(coin_to)
amount_to = bid.amount_to
# Check required?
assert(amount_to == (bid.amount * bid.rate) // self.ci(offer.coin_from).COIN())
if bid.debug_ind == DebugTypes.MAKE_INVALID_PTX:
amount_to -= 1
self.log.debug('bid %s: Make invalid PTx for testing: %d.', bid_id.hex(), bid.debug_ind)
self.logBidEvent(bid.bid_id, EventLogTypes.DEBUG_TWEAK_APPLIED, 'ind {}'.format(bid.debug_ind), None)
if self.coin_clients[coin_to]['use_segwit']:
p2wsh = getP2WSH(participate_script)
addr_to = ci.encode_p2wsh(p2wsh)
else:
addr_to = ci.encode_p2sh(participate_script)
txn = self.callcoinrpc(coin_to, 'createrawtransaction', [[], {addr_to: ci.format_amount(amount_to)}])
options = {
'lockUnspents': True,
'conf_target': self.coin_clients[coin_to]['conf_target'],
}
txn_funded = self.callcoinrpc(coin_to, 'fundrawtransaction', [txn, options])['hex']
txn_signed = self.callcoinrpc(coin_to, 'signrawtransactionwithwallet', [txn_funded])['hex']
refund_txn = self.createRefundTxn(coin_to, txn_signed, offer, bid, participate_script, tx_type=TxTypes.PTX_REFUND)
bid.participate_txn_refund = bytes.fromhex(refund_txn)
chain_height = self.callcoinrpc(coin_to, 'getblockcount')
txjs = self.callcoinrpc(coin_to, 'decoderawtransaction', [txn_signed])
txid = txjs['txid']
if self.coin_clients[coin_to]['use_segwit']:
vout = getVoutByP2WSH(txjs, p2wsh.hex())
else:
vout = getVoutByAddress(txjs, addr_to)
self.addParticipateTxn(bid_id, bid, coin_to, txid, vout, chain_height)
bid.participate_tx.script = participate_script
bid.participate_tx.tx_data = bytes.fromhex(txn_signed)
return txn_signed
def getContractSpendTxVSize(self, coin_type, redeem=True):
tx_vsize = 5 # Add a few bytes, sequence in script takes variable amount of bytes
if coin_type == Coins.PART:
tx_vsize += 204 if redeem else 187
if self.coin_clients[coin_type]['use_segwit']:
tx_vsize += 143 if redeem else 134
else:
tx_vsize += 323 if redeem else 287
return tx_vsize
def createRedeemTxn(self, coin_type, bid, for_txn_type='participate', addr_redeem_out=None, fee_rate=None):
self.log.debug('createRedeemTxn for coin %s', str(coin_type))
ci = self.ci(coin_type)
if for_txn_type == 'participate':
prev_txnid = bid.participate_tx.txid.hex()
prev_n = bid.participate_tx.vout
txn_script = bid.participate_tx.script
prev_amount = bid.amount_to
else:
prev_txnid = bid.initiate_tx.txid.hex()
prev_n = bid.initiate_tx.vout
txn_script = bid.initiate_tx.script
prev_amount = bid.amount
if self.coin_clients[coin_type]['use_segwit']:
prev_p2wsh = getP2WSH(txn_script)
script_pub_key = prev_p2wsh.hex()
else:
script_pub_key = getP2SHScriptForHash(getKeyID(txn_script)).hex()
prevout = {
'txid': prev_txnid,
'vout': prev_n,
'scriptPubKey': script_pub_key,
'redeemScript': txn_script.hex(),
'amount': ci.format_amount(prev_amount)}
bid_date = dt.datetime.fromtimestamp(bid.created_at).date()
wif_prefix = chainparams[Coins.PART][self.chain]['key_prefix']
pubkey = self.getContractPubkey(bid_date, bid.contract_count)
privkey = toWIF(wif_prefix, self.getContractPrivkey(bid_date, bid.contract_count))
secret = bid.recovered_secret
if secret is None:
secret = self.getContractSecret(bid_date, bid.contract_count)
ensure(len(secret) == 32, 'Bad secret length')
if self.coin_clients[coin_type]['connection_type'] != 'rpc':
return None
prevout_s = ' in={}:{}'.format(prev_txnid, prev_n)
if fee_rate is None:
fee_rate, fee_src = self.getFeeRateForCoin(coin_type)
tx_vsize = self.getContractSpendTxVSize(coin_type)
tx_fee = (fee_rate * tx_vsize) / 1000
self.log.debug('Redeem tx fee %s, rate %s', ci.format_amount(tx_fee, conv_int=True, r=1), str(fee_rate))
amount_out = prev_amount - ci.make_int(tx_fee, r=1)
ensure(amount_out > 0, 'Amount out <= 0')
if addr_redeem_out is None:
addr_redeem_out = self.getReceiveAddressFromPool(coin_type, bid.bid_id, TxTypes.PTX_REDEEM if for_txn_type == 'participate' else TxTypes.ITX_REDEEM)
assert(addr_redeem_out is not None)
if self.coin_clients[coin_type]['use_segwit']:
# Change to btc hrp
addr_redeem_out = self.encodeSegwit(Coins.PART, self.decodeSegwit(coin_type, addr_redeem_out))
else:
addr_redeem_out = replaceAddrPrefix(addr_redeem_out, Coins.PART, self.chain)
self.log.debug('addr_redeem_out %s', addr_redeem_out)
output_to = ' outaddr={}:{}'.format(ci.format_amount(amount_out), addr_redeem_out)
if coin_type == Coins.PART:
redeem_txn = self.calltx('-create' + prevout_s + output_to)
else:
redeem_txn = self.calltx('-btcmode -create nversion=2' + prevout_s + output_to)
options = {}
if self.coin_clients[coin_type]['use_segwit']:
options['force_segwit'] = True
redeem_sig = self.callcoinrpc(Coins.PART, 'createsignaturewithkey', [redeem_txn, prevout, privkey, 'ALL', options])
if coin_type == Coins.PART or self.coin_clients[coin_type]['use_segwit']:
witness_stack = [
redeem_sig,
pubkey.hex(),
secret.hex(),
'01',
txn_script.hex()]
redeem_txn = self.calltx(redeem_txn + ' witness=0:' + ':'.join(witness_stack))
else:
script = format(len(redeem_sig) // 2, '02x') + redeem_sig
script += format(33, '02x') + pubkey.hex()
script += format(32, '02x') + secret.hex()
script += format(OpCodes.OP_1, '02x')
script += format(OpCodes.OP_PUSHDATA1, '02x') + format(len(txn_script), '02x') + txn_script.hex()
redeem_txn = self.calltx(redeem_txn + ' scriptsig=0:' + script)
ro = self.callcoinrpc(Coins.PART, 'verifyrawtransaction', [redeem_txn, [prevout]])
ensure(ro['inputs_valid'] is True, 'inputs_valid is false')
# outputs_valid will be false if not a Particl txn
# ensure(ro['complete'] is True, 'complete is false')
ensure(ro['validscripts'] == 1, 'validscripts != 1')
if self.debug:
# Check fee
if self.coin_clients[coin_type]['connection_type'] == 'rpc':
redeem_txjs = self.callcoinrpc(coin_type, 'decoderawtransaction', [redeem_txn])
self.log.debug('vsize paid, actual vsize %d %d', tx_vsize, redeem_txjs['vsize'])
ensure(tx_vsize >= redeem_txjs['vsize'], 'Underpaid fee')
redeem_txjs = self.callcoinrpc(Coins.PART, 'decoderawtransaction', [redeem_txn])
self.log.debug('Have valid redeem txn %s for contract %s tx %s', redeem_txjs['txid'], for_txn_type, prev_txnid)
return redeem_txn
def createRefundTxn(self, coin_type, txn, offer, bid, txn_script, addr_refund_out=None, tx_type=TxTypes.ITX_REFUND):
self.log.debug('createRefundTxn for coin %s', Coins(coin_type).name)
if self.coin_clients[coin_type]['connection_type'] != 'rpc':
return None
txjs = self.callcoinrpc(Coins.PART, 'decoderawtransaction', [txn])
if self.coin_clients[coin_type]['use_segwit']:
p2wsh = getP2WSH(txn_script)
vout = getVoutByP2WSH(txjs, p2wsh.hex())
else:
addr_to = self.ci(Coins.PART).encode_p2sh(txn_script)
vout = getVoutByAddress(txjs, addr_to)
bid_date = dt.datetime.fromtimestamp(bid.created_at).date()
wif_prefix = chainparams[Coins.PART][self.chain]['key_prefix']
pubkey = self.getContractPubkey(bid_date, bid.contract_count)
privkey = toWIF(wif_prefix, self.getContractPrivkey(bid_date, bid.contract_count))
prev_amount = txjs['vout'][vout]['value']
prevout = {
'txid': txjs['txid'],
'vout': vout,
'scriptPubKey': txjs['vout'][vout]['scriptPubKey']['hex'],
'redeemScript': txn_script.hex(),
'amount': prev_amount}
lock_value = DeserialiseNum(txn_script, 64)
if offer.lock_type < TxLockTypes.ABS_LOCK_BLOCKS:
sequence = lock_value
else:
sequence = 1
prevout_s = ' in={}:{}:{}'.format(txjs['txid'], vout, sequence)
fee_rate, fee_src = self.getFeeRateForCoin(coin_type)
tx_vsize = self.getContractSpendTxVSize(coin_type, False)
tx_fee = (fee_rate * tx_vsize) / 1000
ci = self.ci(coin_type)
self.log.debug('Refund tx fee %s, rate %s', ci.format_amount(tx_fee, conv_int=True, r=1), str(fee_rate))
amount_out = ci.make_int(prev_amount, r=1) - ci.make_int(tx_fee, r=1)
if amount_out <= 0:
raise ValueError('Refund amount out <= 0')
if addr_refund_out is None:
addr_refund_out = self.getReceiveAddressFromPool(coin_type, bid.bid_id, tx_type)
ensure(addr_refund_out is not None, 'addr_refund_out is null')
if self.coin_clients[coin_type]['use_segwit']:
# Change to btc hrp
addr_refund_out = self.encodeSegwit(Coins.PART, self.decodeSegwit(coin_type, addr_refund_out))
else:
addr_refund_out = replaceAddrPrefix(addr_refund_out, Coins.PART, self.chain)
self.log.debug('addr_refund_out %s', addr_refund_out)
output_to = ' outaddr={}:{}'.format(ci.format_amount(amount_out), addr_refund_out)
if coin_type == Coins.PART:
refund_txn = self.calltx('-create' + prevout_s + output_to)
else:
refund_txn = self.calltx('-btcmode -create nversion=2' + prevout_s + output_to)
if offer.lock_type == TxLockTypes.ABS_LOCK_BLOCKS or offer.lock_type == TxLockTypes.ABS_LOCK_TIME:
refund_txn = self.calltx('{} locktime={}'.format(refund_txn, lock_value))
options = {}
if self.coin_clients[coin_type]['use_segwit']:
options['force_segwit'] = True
refund_sig = self.callcoinrpc(Coins.PART, 'createsignaturewithkey', [refund_txn, prevout, privkey, 'ALL', options])
if coin_type == Coins.PART or self.coin_clients[coin_type]['use_segwit']:
witness_stack = [
refund_sig,
pubkey.hex(),
'', # SCRIPT_VERIFY_MINIMALIF
txn_script.hex()]
refund_txn = self.calltx(refund_txn + ' witness=0:' + ':'.join(witness_stack))
else:
script = format(len(refund_sig) // 2, '02x') + refund_sig
script += format(33, '02x') + pubkey.hex()
script += format(OpCodes.OP_0, '02x')
script += format(OpCodes.OP_PUSHDATA1, '02x') + format(len(txn_script), '02x') + txn_script.hex()
refund_txn = self.calltx(refund_txn + ' scriptsig=0:' + script)
ro = self.callcoinrpc(Coins.PART, 'verifyrawtransaction', [refund_txn, [prevout]])
ensure(ro['inputs_valid'] is True, 'inputs_valid is false')
# outputs_valid will be false if not a Particl txn
# ensure(ro['complete'] is True, 'complete is false')
ensure(ro['validscripts'] == 1, 'validscripts != 1')
if self.debug:
# Check fee
if self.coin_clients[coin_type]['connection_type'] == 'rpc':
refund_txjs = self.callcoinrpc(coin_type, 'decoderawtransaction', [refund_txn])
self.log.debug('vsize paid, actual vsize %d %d', tx_vsize, refund_txjs['vsize'])
ensure(tx_vsize >= refund_txjs['vsize'], 'underpaid fee')
refund_txjs = self.callcoinrpc(Coins.PART, 'decoderawtransaction', [refund_txn])
self.log.debug('Have valid refund txn %s for contract tx %s', refund_txjs['txid'], txjs['txid'])
return refund_txn
def submitTxn(self, coin_type, txn):
# self.log.debug('submitTxn %s', str(coin_type))
if txn is None:
return None
if self.coin_clients[coin_type]['connection_type'] != 'rpc':
return None
return self.callcoinrpc(coin_type, 'sendrawtransaction', [txn])
def initiateTxnConfirmed(self, bid_id, bid, offer):
self.log.debug('initiateTxnConfirmed for bid %s', bid_id.hex())
bid.setState(BidStates.SWAP_INITIATED)
bid.setITxState(TxStates.TX_CONFIRMED)
if bid.debug_ind == DebugTypes.BUYER_STOP_AFTER_ITX:
self.log.debug('bid %s: Abandoning bid for testing: %d, %s.', bid_id.hex(), bid.debug_ind, DebugTypes(bid.debug_ind).name)
bid.setState(BidStates.BID_ABANDONED)
self.logBidEvent(bid.bid_id, EventLogTypes.DEBUG_TWEAK_APPLIED, 'ind {}'.format(bid.debug_ind), None)
return # Bid saved in checkBidState
# Seller first mode, buyer participates
participate_script = self.deriveParticipateScript(bid_id, bid, offer)
if bid.was_sent:
if bid.participate_tx is not None:
self.log.warning('Participate txn %s already exists for bid %s', bid.participate_tx.txid, bid_id.hex())
else:
self.log.debug('Preparing participate txn for bid %s', bid_id.hex())
coin_to = Coins(offer.coin_to)
txn = self.createParticipateTxn(bid_id, bid, offer, participate_script)
txid = self.submitTxn(coin_to, txn)
self.log.debug('Submitted participate txn %s to %s chain for bid %s', txid, chainparams[coin_to]['name'], bid_id.hex())
bid.setPTxState(TxStates.TX_SENT)
else:
bid.participate_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.PTX,
script=participate_script,
)
# Bid saved in checkBidState
def setLastHeightChecked(self, coin_type, tx_height):
coin_name = self.ci(coin_type).coin_name()
if tx_height < 1:
tx_height = self.lookupChainHeight(coin_type)
if len(self.coin_clients[coin_type]['watched_outputs']) == 0:
self.coin_clients[coin_type]['last_height_checked'] = tx_height
self.log.debug('Start checking %s chain at height %d', coin_name, tx_height)
if self.coin_clients[coin_type]['last_height_checked'] > tx_height:
self.coin_clients[coin_type]['last_height_checked'] = tx_height
self.log.debug('Rewind checking of %s chain to height %d', coin_name, tx_height)
return tx_height
def addParticipateTxn(self, bid_id, bid, coin_type, txid_hex, vout, tx_height):
# TODO: Check connection type
participate_txn_height = self.setLastHeightChecked(coin_type, tx_height)
if bid.participate_tx is None:
bid.participate_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.PTX,
)
bid.participate_tx.txid = bytes.fromhex(txid_hex)
bid.participate_tx.vout = vout
bid.participate_tx.chain_height = participate_txn_height
# Start checking for spends of participate_txn before fully confirmed
self.log.debug('Watching %s chain for spend of output %s %d', chainparams[coin_type]['name'], txid_hex, vout)
self.addWatchedOutput(coin_type, bid_id, txid_hex, vout, BidStates.SWAP_PARTICIPATING)
def participateTxnConfirmed(self, bid_id, bid, offer):
self.log.debug('participateTxnConfirmed for bid %s', bid_id.hex())
bid.setState(BidStates.SWAP_PARTICIPATING)
bid.setPTxState(TxStates.TX_CONFIRMED)
# Seller redeems from participate txn
if bid.was_received:
coin_to = Coins(offer.coin_to)
txn = self.createRedeemTxn(coin_to, bid)
txid = self.submitTxn(coin_to, txn)
self.log.debug('Submitted participate redeem txn %s to %s chain for bid %s', txid, chainparams[coin_to]['name'], bid_id.hex())
# TX_REDEEMED will be set when spend is detected
# TODO: Wait for depth?
# bid saved in checkBidState
def getAddressBalance(self, coin_type, address):
if self.coin_clients[coin_type]['chain_lookups'] == 'explorer':
explorers = self.coin_clients[coin_type]['explorers']
# TODO: random offset into explorers, try blocks
for exp in explorers:
return exp.getBalance(address)
return self.lookupUnspentByAddress(coin_type, address, sum_output=True)
def lookupChainHeight(self, coin_type):
return self.callcoinrpc(coin_type, 'getblockcount')
def lookupUnspentByAddress(self, coin_type, address, sum_output=False, assert_amount=None, assert_txid=None):
ci = self.ci(coin_type)
if self.coin_clients[coin_type]['chain_lookups'] == 'explorer':
explorers = self.coin_clients[coin_type]['explorers']
# TODO: random offset into explorers, try blocks
for exp in explorers:
# TODO: ExplorerBitAps use only gettransaction if assert_txid is set
rv = exp.lookupUnspentByAddress(address)
if assert_amount is not None:
ensure(rv['value'] == int(assert_amount), 'Incorrect output amount in txn {}: {} != {}.'.format(assert_txid, rv['value'], int(assert_amount)))
if assert_txid is not None:
ensure(rv['txid)'] == assert_txid, 'Incorrect txid')
return rv
raise ValueError('No explorer for lookupUnspentByAddress {}'.format(str(coin_type)))
if self.coin_clients[coin_type]['connection_type'] != 'rpc':
raise ValueError('No RPC connection for lookupUnspentByAddress {}'.format(str(coin_type)))
if assert_txid is not None:
try:
ro = self.callcoinrpc(coin_type, 'getmempoolentry', [assert_txid])
self.log.debug('Tx %s found in mempool, fee %s', assert_txid, ro['fee'])
# TODO: Save info
return None
except Exception:
pass
num_blocks = self.callcoinrpc(coin_type, 'getblockcount')
sum_unspent = 0
self.log.debug('[rm] scantxoutset start') # scantxoutset is slow
ro = self.callcoinrpc(coin_type, 'scantxoutset', ['start', ['addr({})'.format(address)]]) # TODO: Use combo(address) where possible
self.log.debug('[rm] scantxoutset end')
for o in ro['unspents']:
if assert_txid and o['txid'] != assert_txid:
continue
# Verify amount
if assert_amount:
ensure(make_int(o['amount']) == int(assert_amount), 'Incorrect output amount in txn {}: {} != {}.'.format(assert_txid, make_int(o['amount']), int(assert_amount)))
if not sum_output:
if o['height'] > 0:
n_conf = num_blocks - o['height']
else:
n_conf = -1
return {
'txid': o['txid'],
'index': o['vout'],
'height': o['height'],
'n_conf': n_conf,
'value': ci.make_int(o['amount']),
}
else:
sum_unspent += ci.make_int(o['amount'])
if sum_output:
return sum_unspent
return None
def checkXmrBidState(self, bid_id, bid, offer):
rv = False
ci_from = self.ci(Coins(offer.coin_from))
ci_to = self.ci(Coins(offer.coin_to))
session = None
try:
self.mxDB.acquire()
session = scoped_session(self.session_factory)
xmr_offer = session.query(XmrOffer).filter_by(offer_id=offer.offer_id).first()
ensure(xmr_offer, 'XMR offer not found: {}.'.format(offer.offer_id.hex()))
xmr_swap = session.query(XmrSwap).filter_by(bid_id=bid.bid_id).first()
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid.bid_id.hex()))
if TxTypes.XMR_SWAP_A_LOCK_REFUND in bid.txns:
refund_tx = bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND]
if bid.was_received:
if bid.debug_ind == DebugTypes.BID_DONT_SPEND_COIN_A_LOCK_REFUND:
self.log.debug('XMR bid %s: Stalling bid for testing: %d.', bid_id.hex(), bid.debug_ind)
bid.setState(BidStates.BID_STALLED_FOR_TEST)
rv = True
self.saveBidInSession(bid_id, bid, session, xmr_swap)
self.logBidEvent(bid.bid_id, EventLogTypes.DEBUG_TWEAK_APPLIED, 'ind {}'.format(bid.debug_ind), session)
session.commit()
return rv
if TxTypes.XMR_SWAP_A_LOCK_REFUND_SPEND not in bid.txns:
try:
txid = ci_from.publishTx(xmr_swap.a_lock_refund_spend_tx)
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_REFUND_SPEND_TX_PUBLISHED, '', session)
self.log.info('Submitted coin a lock refund spend tx for bid {}, txid {}'.format(bid_id.hex(), txid.hex()))
bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND_SPEND] = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK_REFUND_SPEND,
txid=bytes.fromhex(txid),
)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
except Exception as ex:
self.log.debug('Trying to publish coin a lock refund spend tx: %s', str(ex))
if bid.was_sent:
if xmr_swap.a_lock_refund_swipe_tx is None:
self.createCoinALockRefundSwipeTx(ci_from, bid, offer, xmr_swap, xmr_offer)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
if TxTypes.XMR_SWAP_A_LOCK_REFUND_SWIPE not in bid.txns:
try:
txid = ci_from.publishTx(xmr_swap.a_lock_refund_swipe_tx)
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_REFUND_SWIPE_TX_PUBLISHED, '', session)
self.log.info('Submitted coin a lock refund swipe tx for bid {}'.format(bid_id.hex()))
bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND_SWIPE] = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK_REFUND_SWIPE,
txid=bytes.fromhex(txid),
)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
except Exception as ex:
self.log.debug('Trying to publish coin a lock refund swipe tx: %s', str(ex))
if BidStates(bid.state) == BidStates.XMR_SWAP_NOSCRIPT_TX_RECOVERED:
txid_hex = bid.xmr_b_lock_tx.spend_txid.hex()
found_tx = ci_to.findTxnByHash(txid_hex)
if found_tx is not None:
self.log.info('Found coin b lock recover tx bid %s', bid_id.hex())
rv = True # Remove from swaps_in_progress
bid.setState(BidStates.XMR_SWAP_FAILED_REFUNDED)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
return rv
else: # not XMR_SWAP_A_LOCK_REFUND in bid.txns
if len(xmr_swap.al_lock_refund_tx_sig) > 0 and len(xmr_swap.af_lock_refund_tx_sig) > 0:
try:
txid = ci_from.publishTx(xmr_swap.a_lock_refund_tx)
self.log.info('Submitted coin a lock refund tx for bid {}'.format(bid_id.hex()))
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_REFUND_TX_PUBLISHED, '', session)
bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND] = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK_REFUND,
txid=bytes.fromhex(txid),
)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
return rv
except Exception as ex:
if 'Transaction already in block chain' in str(ex):
self.log.info('Found coin a lock refund tx for bid {}'.format(bid_id.hex()))
txid = ci_from.getTxid(xmr_swap.a_lock_refund_tx)
bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND] = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK_REFUND,
txid=txid,
)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
return rv
state = BidStates(bid.state)
if state == BidStates.SWAP_COMPLETED:
rv = True # Remove from swaps_in_progress
elif state == BidStates.XMR_SWAP_FAILED_REFUNDED:
rv = True # Remove from swaps_in_progress
elif state == BidStates.XMR_SWAP_FAILED_SWIPED:
rv = True # Remove from swaps_in_progress
elif state == BidStates.XMR_SWAP_HAVE_SCRIPT_COIN_SPEND_TX:
if bid.xmr_a_lock_tx is None:
return rv
# TODO: Timeout waiting for transactions
bid_changed = False
a_lock_tx_dest = ci_from.getScriptDest(xmr_swap.a_lock_tx_script)
# Changed from ci_from.getOutput(bid.xmr_a_lock_tx.txid, a_lock_tx_dest, bid.amount, xmr_swap)
p2wsh_addr = ci_from.encode_p2wsh(a_lock_tx_dest)
lock_tx_chain_info = ci_from.getLockTxHeight(bid.xmr_a_lock_tx.txid, p2wsh_addr, bid.amount, bid.chain_a_height_start)
if lock_tx_chain_info is None:
return rv
if not bid.xmr_a_lock_tx.chain_height and lock_tx_chain_info['height'] != 0:
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_SEEN, '', session)
block_header = ci_from.getBlockHeaderFromHeight(lock_tx_chain_info['height'])
bid.xmr_a_lock_tx.block_hash = bytes.fromhex(block_header['hash'])
bid.xmr_a_lock_tx.block_height = block_header['height']
bid.xmr_a_lock_tx.block_time = block_header['time'] # Or median_time?
bid_changed = True
if bid.xmr_a_lock_tx.chain_height != lock_tx_chain_info['height'] and lock_tx_chain_info['height'] != 0:
bid.xmr_a_lock_tx.chain_height = lock_tx_chain_info['height']
bid_changed = True
if lock_tx_chain_info['depth'] >= ci_from.blocks_confirmed:
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_CONFIRMED, '', session)
bid.xmr_a_lock_tx.setState(TxStates.TX_CONFIRMED)
bid.setState(BidStates.XMR_SWAP_SCRIPT_COIN_LOCKED)
bid_changed = True
if bid.was_sent:
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Sending xmr swap chain B lock tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.SEND_XMR_SWAP_LOCK_TX_B, bid_id, session)
# bid.setState(BidStates.SWAP_DELAYING)
if bid_changed:
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
elif state == BidStates.XMR_SWAP_SCRIPT_COIN_LOCKED:
if bid.was_sent and bid.xmr_b_lock_tx is None:
return rv
bid_changed = False
# Have to use findTxB instead of relying on the first seen height to detect chain reorgs
found_tx = ci_to.findTxB(xmr_swap.vkbv, xmr_swap.pkbs, bid.amount_to, ci_to.blocks_confirmed, bid.chain_b_height_start, bid.was_sent)
if isinstance(found_tx, int) and found_tx == -1:
if self.countBidEvents(bid, EventLogTypes.LOCK_TX_B_INVALID, session) < 1:
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_B_INVALID, 'Detected invalid lock tx B', session)
bid_changed = True
elif found_tx is not None:
if bid.xmr_b_lock_tx is None or not bid.xmr_b_lock_tx.chain_height:
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_B_SEEN, '', session)
if bid.xmr_b_lock_tx is None:
self.log.debug('Found {} lock tx in chain'.format(ci_to.coin_name()))
b_lock_tx_id = bytes.fromhex(found_tx['txid'])
bid.xmr_b_lock_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_B_LOCK,
txid=b_lock_tx_id,
chain_height=found_tx['height'],
)
bid_changed = True
else:
bid.xmr_b_lock_tx.chain_height = found_tx['height']
bid_changed = True
if bid.xmr_b_lock_tx and bid.xmr_b_lock_tx.chain_height is not None and bid.xmr_b_lock_tx.chain_height > 0:
chain_height = ci_to.getChainHeight()
if chain_height - bid.xmr_b_lock_tx.chain_height >= ci_to.blocks_confirmed:
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_B_CONFIRMED, '', session)
bid.xmr_b_lock_tx.setState(TxStates.TX_CONFIRMED)
bid.setState(BidStates.XMR_SWAP_NOSCRIPT_COIN_LOCKED)
if bid.was_received:
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Releasing xmr script coin lock tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.SEND_XMR_LOCK_RELEASE, bid_id, session)
if bid_changed:
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
elif state == BidStates.XMR_SWAP_LOCK_RELEASED:
# Wait for script spend tx to confirm
# TODO: Use explorer to get tx / block hash for getrawtransaction
if bid.was_received:
try:
txn_hex = ci_from.getMempoolTx(xmr_swap.a_lock_spend_tx_id)
self.log.info('Found lock spend txn in %s mempool, %s', ci_from.coin_name(), xmr_swap.a_lock_spend_tx_id.hex())
self.process_XMR_SWAP_A_LOCK_tx_spend(bid_id, xmr_swap.a_lock_spend_tx_id.hex(), txn_hex)
except Exception as e:
self.log.debug('getrawtransaction lock spend tx failed: %s', str(e))
elif state == BidStates.XMR_SWAP_SCRIPT_TX_REDEEMED:
if bid.was_received and self.countQueuedEvents(session, bid_id, EventTypes.REDEEM_XMR_SWAP_LOCK_TX_B) < 1:
bid.setState(BidStates.SWAP_DELAYING)
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Redeeming coin b lock tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.REDEEM_XMR_SWAP_LOCK_TX_B, bid_id, session)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
elif state == BidStates.XMR_SWAP_NOSCRIPT_TX_REDEEMED:
txid_hex = bid.xmr_b_lock_tx.spend_txid.hex()
found_tx = ci_to.findTxnByHash(txid_hex)
if found_tx is not None:
self.log.info('Found coin b lock spend tx bid %s', bid_id.hex())
rv = True # Remove from swaps_in_progress
bid.setState(BidStates.SWAP_COMPLETED)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
elif state == BidStates.XMR_SWAP_SCRIPT_TX_PREREFUND:
if TxTypes.XMR_SWAP_A_LOCK_REFUND in bid.txns:
refund_tx = bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND]
if refund_tx.block_time is None:
a_lock_refund_tx_dest = ci_from.getScriptDest(xmr_swap.a_lock_refund_tx_script)
p2wsh_addr = ci_from.encode_p2wsh(a_lock_refund_tx_dest)
lock_refund_tx_chain_info = ci_from.getLockTxHeight(refund_tx.txid, p2wsh_addr, 0, bid.chain_a_height_start)
if lock_refund_tx_chain_info is not None:
block_header = ci_from.getBlockHeaderFromHeight(lock_refund_tx_chain_info['height'])
refund_tx.block_hash = bytes.fromhex(block_header['hash'])
refund_tx.block_height = block_header['height']
refund_tx.block_time = block_header['time'] # Or median_time?
self.saveBidInSession(bid_id, bid, session, xmr_swap)
session.commit()
except Exception as ex:
raise ex
finally:
if session:
session.close()
session.remove()
self.mxDB.release()
return rv
def checkBidState(self, bid_id, bid, offer):
# assert(self.mxDB.locked())
# Return True to remove bid from in-progress list
state = BidStates(bid.state)
self.log.debug('checkBidState %s %s', bid_id.hex(), str(state))
if offer.swap_type == SwapTypes.XMR_SWAP:
return self.checkXmrBidState(bid_id, bid, offer)
save_bid = False
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
# TODO: Batch calls to scantxoutset
# TODO: timeouts
if state == BidStates.BID_ABANDONED:
self.log.info('Deactivating abandoned bid: %s', bid_id.hex())
return True # Mark bid for archiving
if state == BidStates.BID_ACCEPTED:
# Waiting for initiate txn to be confirmed in 'from' chain
initiate_txnid_hex = bid.initiate_tx.txid.hex()
p2sh = ci_from.encode_p2sh(bid.initiate_tx.script)
index = None
tx_height = None
last_initiate_txn_conf = bid.initiate_tx.conf
if coin_from == Coins.PART: # Has txindex
try:
initiate_txn = self.callcoinrpc(coin_from, 'getrawtransaction', [initiate_txnid_hex, True])
# Verify amount
vout = getVoutByAddress(initiate_txn, p2sh)
out_value = make_int(initiate_txn['vout'][vout]['value'])
ensure(out_value == int(bid.amount), 'Incorrect output amount in initiate txn {}: {} != {}.'.format(initiate_txnid_hex, out_value, int(bid.amount)))
bid.initiate_tx.conf = initiate_txn['confirmations']
try:
tx_height = initiate_txn['height']
except Exception:
tx_height = -1
index = vout
except Exception:
pass
else:
if self.coin_clients[coin_from]['use_segwit']:
addr = ci_from.encode_p2wsh(getP2WSH(bid.initiate_tx.script))
else:
addr = p2sh
ci_from = self.ci(coin_from)
found = ci_from.getLockTxHeight(bytes.fromhex(initiate_txnid_hex), addr, bid.amount, bid.chain_a_height_start, find_index=True)
if found:
bid.initiate_tx.conf = found['depth']
index = found['index']
tx_height = found['height']
if bid.initiate_tx.conf != last_initiate_txn_conf:
save_bid = True
if bid.initiate_tx.conf is not None:
self.log.debug('initiate_txnid %s confirms %d', initiate_txnid_hex, bid.initiate_tx.conf)
if bid.initiate_tx.vout is None:
bid.initiate_tx.vout = index
# Start checking for spends of initiate_txn before fully confirmed
bid.initiate_tx.chain_height = self.setLastHeightChecked(coin_from, tx_height)
self.addWatchedOutput(coin_from, bid_id, initiate_txnid_hex, bid.initiate_tx.vout, BidStates.SWAP_INITIATED)
if bid.getITxState() is None or bid.getITxState() < TxStates.TX_SENT:
bid.setITxState(TxStates.TX_SENT)
save_bid = True
if bid.initiate_tx.conf >= self.coin_clients[coin_from]['blocks_confirmed']:
self.initiateTxnConfirmed(bid_id, bid, offer)
save_bid = True
# Bid times out if buyer doesn't see tx in chain within INITIATE_TX_TIMEOUT seconds
if bid.initiate_tx is None and \
bid.state_time + atomic_swap_1.INITIATE_TX_TIMEOUT < int(time.time()):
self.log.info('Swap timed out waiting for initiate tx for bid %s', bid_id.hex())
bid.setState(BidStates.SWAP_TIMEDOUT, 'Timed out waiting for initiate tx')
self.saveBid(bid_id, bid)
return True # Mark bid for archiving
elif state == BidStates.SWAP_INITIATED:
# Waiting for participate txn to be confirmed in 'to' chain
if self.coin_clients[coin_to]['use_segwit']:
addr = ci_to.encode_p2wsh(getP2WSH(bid.participate_tx.script))
else:
addr = ci_to.encode_p2sh(bid.participate_tx.script)
ci_to = self.ci(coin_to)
participate_txid = None if bid.participate_tx is None or bid.participate_tx.txid is None else bid.participate_tx.txid
found = ci_to.getLockTxHeight(participate_txid, addr, bid.amount_to, bid.chain_b_height_start, find_index=True)
if found:
if bid.participate_tx.conf != found['depth']:
save_bid = True
bid.participate_tx.conf = found['depth']
index = found['index']
if bid.participate_tx is None or bid.participate_tx.txid is None:
self.log.debug('Found bid %s participate txn %s in chain %s', bid_id.hex(), found['txid'], coin_to)
self.addParticipateTxn(bid_id, bid, coin_to, found['txid'], found['index'], found['height'])
bid.setPTxState(TxStates.TX_SENT)
save_bid = True
if bid.participate_tx.conf is not None:
self.log.debug('participate txid %s confirms %d', bid.participate_tx.txid.hex(), bid.participate_tx.conf)
if bid.participate_tx.conf >= self.coin_clients[coin_to]['blocks_confirmed']:
self.participateTxnConfirmed(bid_id, bid, offer)
save_bid = True
elif state == BidStates.SWAP_PARTICIPATING:
# Waiting for initiate txn spend
pass
elif state == BidStates.BID_ERROR:
# Wait for user input
pass
else:
self.log.warning('checkBidState unknown state %s', state)
if state > BidStates.BID_ACCEPTED:
# Wait for spend of all known swap txns
if (bid.getITxState() is None or bid.getITxState() >= TxStates.TX_REDEEMED) \
and (bid.getPTxState() is None or bid.getPTxState() >= TxStates.TX_REDEEMED):
self.log.info('Swap completed for bid %s', bid_id.hex())
if bid.getITxState() == TxStates.TX_REDEEMED:
self.returnAddressToPool(bid_id, TxTypes.ITX_REFUND)
else:
self.returnAddressToPool(bid_id, TxTypes.ITX_REDEEM)
if bid.getPTxState() == TxStates.TX_REDEEMED:
self.returnAddressToPool(bid_id, TxTypes.PTX_REFUND)
else:
self.returnAddressToPool(bid_id, TxTypes.PTX_REDEEM)
bid.setState(BidStates.SWAP_COMPLETED)
self.saveBid(bid_id, bid)
return True # Mark bid for archiving
if save_bid:
self.saveBid(bid_id, bid)
# Try refund, keep trying until sent tx is spent
if (bid.getITxState() == TxStates.TX_SENT or bid.getITxState() == TxStates.TX_CONFIRMED) \
and bid.initiate_txn_refund is not None:
try:
txid = self.submitTxn(coin_from, bid.initiate_txn_refund.hex())
self.log.debug('Submitted initiate refund txn %s to %s chain for bid %s', txid, chainparams[coin_from]['name'], bid_id.hex())
# State will update when spend is detected
except Exception as ex:
if 'non-BIP68-final' not in str(ex) and 'non-final' not in str(ex):
self.log.warning('Error trying to submit initiate refund txn: %s', str(ex))
if (bid.getPTxState() == TxStates.TX_SENT or bid.getPTxState() == TxStates.TX_CONFIRMED) \
and bid.participate_txn_refund is not None:
try:
txid = self.submitTxn(coin_to, bid.participate_txn_refund.hex())
self.log.debug('Submitted participate refund txn %s to %s chain for bid %s', txid, chainparams[coin_to]['name'], bid_id.hex())
# State will update when spend is detected
except Exception as ex:
if 'non-BIP68-final' not in str(ex) and 'non-final' not in str(ex):
self.log.warning('Error trying to submit participate refund txn: %s', str(ex))
return False # Bid is still active
def extractSecret(self, coin_type, bid, spend_in):
try:
if coin_type == Coins.PART or self.coin_clients[coin_type]['use_segwit']:
ensure(len(spend_in['txinwitness']) == 5, 'Bad witness size')
return bytes.fromhex(spend_in['txinwitness'][2])
else:
script_sig = spend_in['scriptSig']['asm'].split(' ')
ensure(len(script_sig) == 5, 'Bad witness size')
return bytes.fromhex(script_sig[2])
except Exception:
return None
def addWatchedOutput(self, coin_type, bid_id, txid_hex, vout, tx_type, swap_type=None):
self.log.debug('Adding watched output %s bid %s tx %s type %s', coin_type, bid_id.hex(), txid_hex, tx_type)
watched = self.coin_clients[coin_type]['watched_outputs']
for wo in watched:
if wo.bid_id == bid_id and wo.txid_hex == txid_hex and wo.vout == vout:
self.log.debug('Output already being watched.')
return
watched.append(WatchedOutput(bid_id, txid_hex, vout, tx_type, swap_type))
def removeWatchedOutput(self, coin_type, bid_id, txid_hex):
# Remove all for bid if txid is None
self.log.debug('removeWatchedOutput %s %s %s', str(coin_type), bid_id.hex(), txid_hex)
old_len = len(self.coin_clients[coin_type]['watched_outputs'])
for i in range(old_len - 1, -1, -1):
wo = self.coin_clients[coin_type]['watched_outputs'][i]
if wo.bid_id == bid_id and (txid_hex is None or wo.txid_hex == txid_hex):
del self.coin_clients[coin_type]['watched_outputs'][i]
self.log.debug('Removed watched output %s %s %s', str(coin_type), bid_id.hex(), wo.txid_hex)
def initiateTxnSpent(self, bid_id, spend_txid, spend_n, spend_txn):
self.log.debug('Bid %s initiate txn spent by %s %d', bid_id.hex(), spend_txid, spend_n)
if bid_id in self.swaps_in_progress:
bid = self.swaps_in_progress[bid_id][0]
offer = self.swaps_in_progress[bid_id][1]
bid.initiate_tx.spend_txid = bytes.fromhex(spend_txid)
bid.initiate_tx.spend_n = spend_n
spend_in = spend_txn['vin'][spend_n]
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
secret = self.extractSecret(coin_from, bid, spend_in)
if secret is None:
self.log.info('Bid %s initiate txn refunded by %s %d', bid_id.hex(), spend_txid, spend_n)
# TODO: Wait for depth?
bid.setITxState(TxStates.TX_REFUNDED)
else:
self.log.info('Bid %s initiate txn redeemed by %s %d', bid_id.hex(), spend_txid, spend_n)
# TODO: Wait for depth?
bid.setITxState(TxStates.TX_REDEEMED)
self.removeWatchedOutput(coin_from, bid_id, bid.initiate_tx.txid.hex())
self.saveBid(bid_id, bid)
def participateTxnSpent(self, bid_id, spend_txid, spend_n, spend_txn):
self.log.debug('Bid %s participate txn spent by %s %d', bid_id.hex(), spend_txid, spend_n)
# TODO: More SwapTypes
if bid_id in self.swaps_in_progress:
bid = self.swaps_in_progress[bid_id][0]
offer = self.swaps_in_progress[bid_id][1]
bid.participate_tx.spend_txid = bytes.fromhex(spend_txid)
bid.participate_tx.spend_n = spend_n
spend_in = spend_txn['vin'][spend_n]
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
secret = self.extractSecret(coin_to, bid, spend_in)
if secret is None:
self.log.info('Bid %s participate txn refunded by %s %d', bid_id.hex(), spend_txid, spend_n)
# TODO: Wait for depth?
bid.setPTxState(TxStates.TX_REFUNDED)
else:
self.log.debug('Secret %s extracted from participate spend %s %d', secret.hex(), spend_txid, spend_n)
bid.recovered_secret = secret
# TODO: Wait for depth?
bid.setPTxState(TxStates.TX_REDEEMED)
if bid.was_sent:
txn = self.createRedeemTxn(coin_from, bid, for_txn_type='initiate')
if bid.debug_ind == DebugTypes.DONT_SPEND_ITX:
self.log.debug('bid %s: Abandoning bid for testing: %d, %s.', bid_id.hex(), bid.debug_ind, DebugTypes(bid.debug_ind).name)
bid.setState(BidStates.BID_ABANDONED)
self.logBidEvent(bid.bid_id, EventLogTypes.DEBUG_TWEAK_APPLIED, 'ind {}'.format(bid.debug_ind), None)
else:
txid = self.submitTxn(coin_from, txn)
bid.initiate_tx.spend_txid = bytes.fromhex(txid)
# bid.initiate_txn_redeem = bytes.fromhex(txn) # Worth keeping?
self.log.debug('Submitted initiate redeem txn %s to %s chain for bid %s', txid, chainparams[coin_from]['name'], bid_id.hex())
# TODO: Wait for depth? new state SWAP_TXI_REDEEM_SENT?
self.removeWatchedOutput(coin_to, bid_id, bid.participate_tx.txid.hex())
self.saveBid(bid_id, bid)
def process_XMR_SWAP_A_LOCK_tx_spend(self, bid_id, spend_txid_hex, spend_txn_hex):
self.log.debug('Detected spend of XMR swap coin a lock tx for bid %s', bid_id.hex())
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
state = BidStates(bid.state)
spending_txid = bytes.fromhex(spend_txid_hex)
if spending_txid == xmr_swap.a_lock_spend_tx_id:
if state == BidStates.XMR_SWAP_LOCK_RELEASED:
xmr_swap.a_lock_spend_tx = bytes.fromhex(spend_txn_hex)
bid.setState(BidStates.XMR_SWAP_SCRIPT_TX_REDEEMED) # TODO: Wait for confirmation?
if not bid.was_received:
bid.setState(BidStates.SWAP_COMPLETED)
else:
# Could already be processed if spend was detected in the mempool
self.log.warning('Coin a lock tx spend ignored due to bid state for bid {}'.format(bid_id.hex()))
elif spending_txid == xmr_swap.a_lock_refund_tx_id:
self.log.debug('Coin a lock tx spent by lock refund tx.')
bid.setState(BidStates.XMR_SWAP_SCRIPT_TX_PREREFUND)
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_REFUND_TX_SEEN, '', session)
else:
self.setBidError(bid.bid_id, bid, 'Unexpected txn spent coin a lock tx: {}'.format(spend_txid_hex), save_bid=False)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
session.commit()
except Exception as ex:
self.log.error('process_XMR_SWAP_A_LOCK_tx_spend %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
finally:
session.close()
session.remove()
self.mxDB.release()
def process_XMR_SWAP_A_LOCK_REFUND_tx_spend(self, bid_id, spend_txid_hex, spend_txn):
self.log.debug('Detected spend of XMR swap coin a lock refund tx for bid %s', bid_id.hex())
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
state = BidStates(bid.state)
spending_txid = bytes.fromhex(spend_txid_hex)
if spending_txid == xmr_swap.a_lock_refund_spend_tx_id:
self.log.info('Found coin a lock refund spend tx, bid {}'.format(bid_id.hex()))
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_REFUND_SPEND_TX_SEEN, '', session)
if bid.was_sent:
xmr_swap.a_lock_refund_spend_tx = bytes.fromhex(spend_txn['hex']) # Replace with fully signed tx
if TxTypes.XMR_SWAP_A_LOCK_REFUND_SPEND not in bid.txns:
bid.txns[TxTypes.XMR_SWAP_A_LOCK_REFUND_SPEND] = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK_REFUND_SPEND,
txid=xmr_swap.a_lock_refund_spend_tx_id,
)
if bid.xmr_b_lock_tx is not None:
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Recovering xmr swap chain B lock tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.RECOVER_XMR_SWAP_LOCK_TX_B, bid_id, session)
else:
bid.setState(BidStates.XMR_SWAP_FAILED_REFUNDED)
if bid.was_received:
if not bid.was_sent:
bid.setState(BidStates.XMR_SWAP_FAILED_REFUNDED)
else:
self.log.info('Coin a lock refund spent by unknown tx, bid {}'.format(bid_id.hex()))
bid.setState(BidStates.XMR_SWAP_FAILED_SWIPED)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
session.commit()
except Exception as ex:
self.log.error('process_XMR_SWAP_A_LOCK_REFUND_tx_spend %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
finally:
session.close()
session.remove()
self.mxDB.release()
def processSpentOutput(self, coin_type, watched_output, spend_txid_hex, spend_n, spend_txn):
if watched_output.swap_type == SwapTypes.XMR_SWAP:
if watched_output.tx_type == TxTypes.XMR_SWAP_A_LOCK:
self.process_XMR_SWAP_A_LOCK_tx_spend(watched_output.bid_id, spend_txid_hex, spend_txn['hex'])
elif watched_output.tx_type == TxTypes.XMR_SWAP_A_LOCK_REFUND:
self.process_XMR_SWAP_A_LOCK_REFUND_tx_spend(watched_output.bid_id, spend_txid_hex, spend_txn)
self.removeWatchedOutput(coin_type, watched_output.bid_id, watched_output.txid_hex)
return
if watched_output.tx_type == BidStates.SWAP_PARTICIPATING:
self.participateTxnSpent(watched_output.bid_id, spend_txid_hex, spend_n, spend_txn)
else:
self.initiateTxnSpent(watched_output.bid_id, spend_txid_hex, spend_n, spend_txn)
def checkForSpends(self, coin_type, c):
# assert(self.mxDB.locked())
self.log.debug('checkForSpends %s', coin_type)
# TODO: Check for spends on watchonly txns where possible
if 'have_spent_index' in self.coin_clients[coin_type] and self.coin_clients[coin_type]['have_spent_index']:
# TODO: batch getspentinfo
for o in c['watched_outputs']:
found_spend = None
try:
found_spend = self.callcoinrpc(Coins.PART, 'getspentinfo', [{'txid': o.txid_hex, 'index': o.vout}])
except Exception as ex:
if 'Unable to get spent info' not in str(ex):
self.log.warning('getspentinfo %s', str(ex))
if found_spend is not None:
self.log.debug('Found spend in spentindex %s %d in %s %d', o.txid_hex, o.vout, found_spend['txid'], found_spend['index'])
spend_txid = found_spend['txid']
spend_n = found_spend['index']
spend_txn = self.callcoinrpc(Coins.PART, 'getrawtransaction', [spend_txid, True])
self.processSpentOutput(coin_type, o, spend_txid, spend_n, spend_txn)
else:
chain_blocks = self.callcoinrpc(coin_type, 'getblockcount')
last_height_checked = c['last_height_checked']
self.log.debug('chain_blocks, last_height_checked %s %s', chain_blocks, last_height_checked)
while last_height_checked < chain_blocks:
block_hash = self.callcoinrpc(coin_type, 'getblockhash', [last_height_checked + 1])
try:
block = self.callcoinrpc(coin_type, 'getblock', [block_hash, 2])
except Exception as e:
if 'Block not available (pruned data)' in str(e):
# TODO: Better solution?
bci = self.callcoinrpc(coin_type, 'getblockchaininfo')
self.log.error('Coin %s last_height_checked %d set to pruneheight %d', self.ci(coin_type).coin_name(), last_height_checked, bci['pruneheight'])
last_height_checked = bci['pruneheight']
continue
for tx in block['tx']:
for i, inp in enumerate(tx['vin']):
for o in c['watched_outputs']:
inp_txid = inp.get('txid', None)
if inp_txid is None: # Coinbase
continue
if inp_txid == o.txid_hex and inp['vout'] == o.vout:
self.log.debug('Found spend from search %s %d in %s %d', o.txid_hex, o.vout, tx['txid'], i)
self.processSpentOutput(coin_type, o, tx['txid'], i, tx)
last_height_checked += 1
if c['last_height_checked'] != last_height_checked:
c['last_height_checked'] = last_height_checked
self.setIntKV('last_height_checked_' + chainparams[coin_type]['name'], last_height_checked)
def expireMessages(self):
self.mxDB.acquire()
try:
now = int(time.time())
options = {'encoding': 'none'}
ro = self.callrpc('smsginbox', ['all', '', options])
num_messages = 0
num_removed = 0
for msg in ro['messages']:
num_messages += 1
expire_at = msg['sent'] + msg['ttl']
if expire_at < now:
options = {'encoding': 'none', 'delete': True}
del_msg = self.callrpc('smsg', [msg['msgid'], options])
num_removed += 1
if num_messages + num_removed > 0:
self.log.info('Expired {} / {} messages.'.format(num_removed, num_messages))
self.log.debug('TODO: Expire records from db')
finally:
self.mxDB.release()
def countQueuedEvents(self, session, bid_id, event_type):
q = session.query(EventQueue).filter(sa.and_(EventQueue.active_ind == 1, EventQueue.linked_id == bid_id, EventQueue.event_type == event_type))
return q.count()
def checkEvents(self):
self.mxDB.acquire()
now = int(time.time())
session = None
try:
session = scoped_session(self.session_factory)
q = session.query(EventQueue).filter(sa.and_(EventQueue.active_ind == 1, EventQueue.trigger_at <= now))
for row in q:
try:
if row.event_type == EventTypes.ACCEPT_BID:
self.acceptBid(row.linked_id)
elif row.event_type == EventTypes.ACCEPT_XMR_BID:
self.acceptXmrBid(row.linked_id)
elif row.event_type == EventTypes.SIGN_XMR_SWAP_LOCK_TX_A:
self.sendXmrBidTxnSigsFtoL(row.linked_id, session)
elif row.event_type == EventTypes.SEND_XMR_SWAP_LOCK_TX_A:
self.sendXmrBidCoinALockTx(row.linked_id, session)
elif row.event_type == EventTypes.SEND_XMR_SWAP_LOCK_TX_B:
self.sendXmrBidCoinBLockTx(row.linked_id, session)
elif row.event_type == EventTypes.SEND_XMR_LOCK_RELEASE:
self.sendXmrBidLockRelease(row.linked_id, session)
elif row.event_type == EventTypes.REDEEM_XMR_SWAP_LOCK_TX_A:
self.redeemXmrBidCoinALockTx(row.linked_id, session)
elif row.event_type == EventTypes.REDEEM_XMR_SWAP_LOCK_TX_B:
self.redeemXmrBidCoinBLockTx(row.linked_id, session)
elif row.event_type == EventTypes.RECOVER_XMR_SWAP_LOCK_TX_B:
self.recoverXmrBidCoinBLockTx(row.linked_id, session)
else:
self.log.warning('Unknown event type: %d', row.event_type)
except Exception as ex:
if self.debug:
self.log.error(traceback.format_exc())
self.log.error('checkEvents failed: {}'.format(str(ex)))
if self.debug:
session.execute('UPDATE eventqueue SET active_ind = 2 WHERE trigger_at <= {}'.format(now))
else:
session.execute('DELETE FROM eventqueue WHERE trigger_at <= {}'.format(now))
session.commit()
finally:
if session:
session.close()
session.remove()
self.mxDB.release()
def checkXmrSwaps(self):
self.mxDB.acquire()
now = int(time.time())
ttl_xmr_split_messages = 60 * 60
session = None
try:
session = scoped_session(self.session_factory)
q = session.query(Bid).filter(Bid.state == BidStates.BID_RECEIVING)
for bid in q:
q = session.execute('SELECT COUNT(*) FROM xmr_split_data WHERE bid_id = x\'{}\' AND msg_type = {}'.format(bid.bid_id.hex(), XmrSplitMsgTypes.BID)).first()
num_segments = q[0]
if num_segments > 1:
try:
self.receiveXmrBid(bid, session)
except Exception as ex:
self.log.info('Verify xmr bid {} failed: {}'.format(bid.bid_id.hex(), str(ex)))
bid.setState(BidStates.BID_ERROR, 'Failed validation: ' + str(ex))
session.add(bid)
self.updateBidInProgress(bid)
continue
if bid.created_at + ttl_xmr_split_messages < now:
self.log.debug('Expiring partially received bid: {}'.format(bid.bid_id.hex()))
bid.setState(BidStates.BID_ERROR, 'Timed out')
session.add(bid)
q = session.query(Bid).filter(Bid.state == BidStates.BID_RECEIVING_ACC)
for bid in q:
q = session.execute('SELECT COUNT(*) FROM xmr_split_data WHERE bid_id = x\'{}\' AND msg_type = {}'.format(bid.bid_id.hex(), XmrSplitMsgTypes.BID_ACCEPT)).first()
num_segments = q[0]
if num_segments > 1:
try:
self.receiveXmrBidAccept(bid, session)
except Exception as ex:
self.log.info('Verify xmr bid accept {} failed: {}'.format(bid.bid_id.hex(), str(ex)))
bid.setState(BidStates.BID_ERROR, 'Failed accept validation: ' + str(ex))
session.add(bid)
self.updateBidInProgress(bid)
continue
if bid.created_at + ttl_xmr_split_messages < now:
self.log.debug('Expiring partially received bid accept: {}'.format(bid.bid_id.hex()))
bid.setState(BidStates.BID_ERROR, 'Timed out')
session.add(bid)
# Expire old records
q = session.query(XmrSplitData).filter(XmrSplitData.created_at + ttl_xmr_split_messages < now)
q.delete(synchronize_session=False)
session.commit()
finally:
if session:
session.close()
session.remove()
self.mxDB.release()
def processOffer(self, msg):
offer_bytes = bytes.fromhex(msg['hex'][2:-2])
offer_data = OfferMessage()
offer_data.ParseFromString(offer_bytes)
# Validate data
now = int(time.time())
coin_from = Coins(offer_data.coin_from)
ci_from = self.ci(coin_from)
coin_to = Coins(offer_data.coin_to)
ci_to = self.ci(coin_to)
ensure(offer_data.coin_from != offer_data.coin_to, 'coin_from == coin_to')
self.validateSwapType(coin_from, coin_to, offer_data.swap_type)
self.validateOfferAmounts(coin_from, coin_to, offer_data.amount_from, offer_data.rate, offer_data.min_bid_amount)
self.validateOfferLockValue(coin_from, coin_to, offer_data.lock_type, offer_data.lock_value)
self.validateOfferValidTime(offer_data.swap_type, coin_from, coin_to, offer_data.time_valid)
ensure(msg['sent'] + offer_data.time_valid >= now, 'Offer expired')
if offer_data.swap_type == SwapTypes.SELLER_FIRST:
ensure(len(offer_data.proof_address) == 0, 'Unexpected data')
ensure(len(offer_data.proof_signature) == 0, 'Unexpected data')
ensure(len(offer_data.pkhash_seller) == 0, 'Unexpected data')
ensure(len(offer_data.secret_hash) == 0, 'Unexpected data')
elif offer_data.swap_type == SwapTypes.BUYER_FIRST:
raise ValueError('TODO')
elif offer_data.swap_type == SwapTypes.XMR_SWAP:
ensure(coin_from not in non_script_type_coins, 'Invalid coin from type')
ensure(coin_to in non_script_type_coins, 'Invalid coin to type')
self.log.debug('TODO - More restrictions')
else:
raise ValueError('Unknown swap type {}.'.format(offer_data.swap_type))
offer_id = bytes.fromhex(msg['msgid'])
if self.isOfferRevoked(offer_id, msg['from']):
raise ValueError('Offer has been revoked {}.'.format(offer_id.hex()))
session = scoped_session(self.session_factory)
try:
# Offers must be received on the public network_addr or manually created addresses
if msg['to'] != self.network_addr:
# Double check active_ind, shouldn't be possible to receive message if not active
query_str = 'SELECT COUNT(addr_id) FROM smsgaddresses WHERE addr = "{}" AND use_type = {} AND active_ind = 1'.format(msg['to'], AddressTypes.RECV_OFFER)
rv = session.execute(query_str).first()
if rv[0] < 1:
raise ValueError('Offer received on incorrect address')
# Check for sent
existing_offer = self.getOffer(offer_id)
if existing_offer is None:
offer = Offer(
offer_id=offer_id,
active_ind=1,
protocol_version=offer_data.protocol_version,
coin_from=offer_data.coin_from,
coin_to=offer_data.coin_to,
amount_from=offer_data.amount_from,
rate=offer_data.rate,
min_bid_amount=offer_data.min_bid_amount,
time_valid=offer_data.time_valid,
lock_type=int(offer_data.lock_type),
lock_value=offer_data.lock_value,
swap_type=offer_data.swap_type,
amount_negotiable=offer_data.amount_negotiable,
rate_negotiable=offer_data.rate_negotiable,
addr_to=msg['to'],
addr_from=msg['from'],
created_at=msg['sent'],
expire_at=msg['sent'] + offer_data.time_valid,
was_sent=False)
offer.setState(OfferStates.OFFER_RECEIVED)
session.add(offer)
if offer.swap_type == SwapTypes.XMR_SWAP:
xmr_offer = XmrOffer()
xmr_offer.offer_id = offer_id
xmr_offer.lock_time_1 = ci_from.getExpectedSequence(offer_data.lock_type, offer_data.lock_value)
xmr_offer.lock_time_2 = ci_from.getExpectedSequence(offer_data.lock_type, offer_data.lock_value)
xmr_offer.a_fee_rate = offer_data.fee_rate_from
xmr_offer.b_fee_rate = offer_data.fee_rate_to
session.add(xmr_offer)
self.log.debug('Received new offer %s', offer_id.hex())
else:
existing_offer.setState(OfferStates.OFFER_RECEIVED)
session.add(existing_offer)
session.commit()
finally:
session.close()
session.remove()
def processOfferRevoke(self, msg):
ensure(msg['to'] == self.network_addr, 'Message received on wrong address')
msg_bytes = bytes.fromhex(msg['hex'][2:-2])
msg_data = OfferRevokeMessage()
msg_data.ParseFromString(msg_bytes)
now = int(time.time())
self.mxDB.acquire()
session = None
try:
session = scoped_session(self.session_factory)
if len(msg_data.offer_msg_id) != 28:
raise ValueError('Invalid msg_id length')
if len(msg_data.signature) != 65:
raise ValueError('Invalid signature length')
offer = session.query(Offer).filter_by(offer_id=msg_data.offer_msg_id).first()
if offer is None:
self.storeOfferRevoke(msg_data.offer_msg_id, msg_data.signature)
raise ValueError('Offer not found: {}'.format(msg_data.offer_msg_id.hex()))
if offer.expire_at <= now:
raise ValueError('Offer already expired: {}'.format(msg_data.offer_msg_id.hex()))
signature_enc = base64.b64encode(msg_data.signature).decode('utf-8')
passed = self.callcoinrpc(Coins.PART, 'verifymessage', [offer.addr_from, signature_enc, msg_data.offer_msg_id.hex() + '_revoke'])
ensure(passed is True, 'Signature invalid')
offer.active_ind = 2
# TODO: Remove message, or wait for expire
session.add(offer)
session.commit()
finally:
if session:
session.close()
session.remove()
self.mxDB.release()
def processBid(self, msg):
self.log.debug('Processing bid msg %s', msg['msgid'])
now = int(time.time())
bid_bytes = bytes.fromhex(msg['hex'][2:-2])
bid_data = BidMessage()
bid_data.ParseFromString(bid_bytes)
# Validate data
ensure(len(bid_data.offer_msg_id) == 28, 'Bad offer_id length')
offer_id = bid_data.offer_msg_id
offer = self.getOffer(offer_id, sent=True)
ensure(offer and offer.was_sent, 'Unknown offer')
ensure(offer.state == OfferStates.OFFER_RECEIVED, 'Bad offer state')
ensure(msg['to'] == offer.addr_from, 'Received on incorrect address')
ensure(now <= offer.expire_at, 'Offer expired')
self.validateBidValidTime(offer.swap_type, offer.coin_from, offer.coin_to, bid_data.time_valid)
ensure(now <= msg['sent'] + bid_data.time_valid, 'Bid expired')
self.validateBidAmount(offer, bid_data.amount, bid_data.rate)
# TODO: Allow higher bids
# assert(bid_data.rate != offer['data'].rate), 'Bid rate mismatch'
coin_to = Coins(offer.coin_to)
ci_from = self.ci(offer.coin_from)
ci_to = self.ci(coin_to)
amount_to = int((bid_data.amount * bid_data.rate) // ci_from.COIN())
swap_type = offer.swap_type
if swap_type == SwapTypes.SELLER_FIRST:
ensure(len(bid_data.pkhash_buyer) == 20, 'Bad pkhash_buyer length')
# Verify proof of funds
bid_proof_address = replaceAddrPrefix(bid_data.proof_address, Coins.PART, self.chain)
mm = chainparams[coin_to]['message_magic']
passed = self.ci(Coins.PART).verifyMessage(bid_proof_address, bid_data.proof_address + '_swap_proof_' + offer_id.hex(), bid_data.proof_signature, mm)
ensure(passed is True, 'Proof of funds signature invalid')
if self.coin_clients[coin_to]['use_segwit']:
addr_search = self.encodeSegwit(coin_to, decodeAddress(bid_data.proof_address)[1:])
else:
addr_search = bid_data.proof_address
sum_unspent = self.getAddressBalance(coin_to, addr_search)
self.log.debug('Proof of funds %s %s', bid_data.proof_address, self.ci(coin_to).format_amount(sum_unspent))
ensure(sum_unspent >= amount_to, 'Proof of funds failed')
elif swap_type == SwapTypes.BUYER_FIRST:
raise ValueError('TODO')
else:
raise ValueError('Unknown swap type {}.'.format(swap_type))
bid_id = bytes.fromhex(msg['msgid'])
bid = self.getBid(bid_id)
if bid is None:
bid = Bid(
active_ind=1,
bid_id=bid_id,
offer_id=offer_id,
protocol_version=bid_data.protocol_version,
amount=bid_data.amount,
rate=bid_data.rate,
pkhash_buyer=bid_data.pkhash_buyer,
created_at=msg['sent'],
amount_to=amount_to,
expire_at=msg['sent'] + bid_data.time_valid,
bid_addr=msg['from'],
was_received=True,
chain_a_height_start=ci_from.getChainHeight(),
chain_b_height_start=ci_to.getChainHeight(),
)
else:
ensure(bid.state == BidStates.BID_SENT, 'Wrong bid state: {}'.format(str(BidStates(bid.state))))
bid.created_at = msg['sent']
bid.expire_at = msg['sent'] + bid_data.time_valid
bid.was_received = True
if len(bid_data.proof_address) > 0:
bid.proof_address = bid_data.proof_address
bid.setState(BidStates.BID_RECEIVED)
self.log.info('Received valid bid %s for offer %s', bid_id.hex(), bid_data.offer_msg_id.hex())
self.saveBid(bid_id, bid)
# Auto accept bid if set and no other non-abandoned bid for this order exists
if offer.auto_accept_bids:
if self.countAcceptedBids(offer_id) > 0:
self.log.info('Not auto accepting bid %s, already have', bid_id.hex())
elif bid_data.amount != offer.amount_from:
self.log.info('Not auto accepting bid %s, want exact amount match', bid_id.hex())
else:
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Auto accepting bid %s in %d seconds', bid_id.hex(), delay)
self.createEvent(delay, EventTypes.ACCEPT_BID, bid_id)
def processBidAccept(self, msg):
self.log.debug('Processing bid accepted msg %s', msg['msgid'])
now = int(time.time())
bid_accept_bytes = bytes.fromhex(msg['hex'][2:-2])
bid_accept_data = BidAcceptMessage()
bid_accept_data.ParseFromString(bid_accept_bytes)
ensure(len(bid_accept_data.bid_msg_id) == 28, 'Bad bid_msg_id length')
ensure(len(bid_accept_data.initiate_txid) == 32, 'Bad initiate_txid length')
ensure(len(bid_accept_data.contract_script) < 100, 'Bad contract_script length')
self.log.debug('for bid %s', bid_accept_data.bid_msg_id.hex())
bid_id = bid_accept_data.bid_msg_id
bid, offer = self.getBidAndOffer(bid_id)
ensure(bid is not None and bid.was_sent is True, 'Unknown bidid')
ensure(offer, 'Offer not found ' + bid.offer_id.hex())
coin_from = Coins(offer.coin_from)
ci_from = self.ci(coin_from)
ensure(bid.expire_at > now + self._bid_expired_leeway, 'Bid expired')
if bid.state >= BidStates.BID_ACCEPTED:
if bid.was_received: # Sent to self
self.log.info('Received valid bid accept %s for bid %s sent to self', bid.accept_msg_id.hex(), bid_id.hex())
return
raise ValueError('Wrong bid state: {}'.format(str(BidStates(bid.state))))
use_csv = True if offer.lock_type < TxLockTypes.ABS_LOCK_BLOCKS else False
# TODO: Verify script without decoding?
decoded_script = self.callcoinrpc(Coins.PART, 'decodescript', [bid_accept_data.contract_script.hex()])
lock_check_op = 'OP_CHECKSEQUENCEVERIFY' if use_csv else 'OP_CHECKLOCKTIMEVERIFY'
prog = re.compile(r'OP_IF OP_SIZE 32 OP_EQUALVERIFY OP_SHA256 (\w+) OP_EQUALVERIFY OP_DUP OP_HASH160 (\w+) OP_ELSE (\d+) {} OP_DROP OP_DUP OP_HASH160 (\w+) OP_ENDIF OP_EQUALVERIFY OP_CHECKSIG'.format(lock_check_op))
rr = prog.match(decoded_script['asm'])
if not rr:
raise ValueError('Bad script')
scriptvalues = rr.groups()
ensure(len(scriptvalues[0]) == 64, 'Bad secret_hash length')
ensure(bytes.fromhex(scriptvalues[1]) == bid.pkhash_buyer, 'pkhash_buyer mismatch')
script_lock_value = int(scriptvalues[2])
if use_csv:
expect_sequence = ci_from.getExpectedSequence(offer.lock_type, offer.lock_value)
ensure(script_lock_value == expect_sequence, 'sequence mismatch')
else:
if offer.lock_type == TxLockTypes.ABS_LOCK_BLOCKS:
self.log.warning('TODO: validate absolute lock values')
else:
ensure(script_lock_value <= bid.created_at + offer.lock_value + atomic_swap_1.INITIATE_TX_TIMEOUT, 'script lock time too high')
ensure(script_lock_value >= bid.created_at + offer.lock_value, 'script lock time too low')
ensure(len(scriptvalues[3]) == 40, 'pkhash_refund bad length')
ensure(bid.accept_msg_id is None, 'Bid already accepted')
bid.accept_msg_id = bytes.fromhex(msg['msgid'])
bid.initiate_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.ITX,
txid=bid_accept_data.initiate_txid,
script=bid_accept_data.contract_script,
)
bid.pkhash_seller = bytes.fromhex(scriptvalues[3])
bid.setState(BidStates.BID_ACCEPTED)
bid.setITxState(TxStates.TX_NONE)
self.log.info('Received valid bid accept %s for bid %s', bid.accept_msg_id.hex(), bid_id.hex())
self.saveBid(bid_id, bid)
self.swaps_in_progress[bid_id] = (bid, offer)
def receiveXmrBid(self, bid, session):
self.log.debug('Receiving xmr bid %s', bid.bid_id.hex())
now = int(time.time())
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=True)
ensure(offer and offer.was_sent, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
xmr_swap = session.query(XmrSwap).filter_by(bid_id=bid.bid_id).first()
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid.bid_id.hex()))
ci_from = self.ci(Coins(offer.coin_from))
ci_to = self.ci(Coins(offer.coin_to))
if offer.coin_to == Coins.XMR:
if len(xmr_swap.kbsf_dleag) < ci_to.lengthDLEAG():
q = session.query(XmrSplitData).filter(sa.and_(XmrSplitData.bid_id == bid.bid_id, XmrSplitData.msg_type == XmrSplitMsgTypes.BID)).order_by(XmrSplitData.msg_sequence.asc())
for row in q:
xmr_swap.kbsf_dleag += row.dleag
if not ci_to.verifyDLEAG(xmr_swap.kbsf_dleag):
raise ValueError('Invalid DLEAG proof.')
# Extract pubkeys from MSG1L DLEAG
xmr_swap.pkasf = xmr_swap.kbsf_dleag[0: 33]
if not ci_from.verifyPubkey(xmr_swap.pkasf):
raise ValueError('Invalid coin a pubkey.')
xmr_swap.pkbsf = xmr_swap.kbsf_dleag[33: 33 + 32]
if not ci_to.verifyPubkey(xmr_swap.pkbsf):
raise ValueError('Invalid coin b pubkey.')
else:
xmr_swap.pkasf = xmr_swap.kbsf_dleag[0: 33]
if not ci_from.verifyPubkey(xmr_swap.pkasf):
raise ValueError('Invalid coin a pubkey.')
xmr_swap.pkbsf = xmr_swap.pkasf
ensure(ci_to.verifyKey(xmr_swap.vkbvf), 'Invalid key, vkbvf')
ensure(ci_from.verifyPubkey(xmr_swap.pkaf), 'Invalid pubkey, pkaf')
self.log.info('Received valid bid %s for xmr offer %s', bid.bid_id.hex(), bid.offer_id.hex())
bid.setState(BidStates.BID_RECEIVED)
# Auto accept bid if set and no other non-abandoned bid for this order exists
if offer.auto_accept_bids:
if self.countAcceptedBids(bid.offer_id) > 0:
self.log.info('Not auto accepting bid %s, already have', bid.bid_id.hex())
elif bid.amount != offer.amount_from:
self.log.info('Not auto accepting bid %s, want exact amount match', bid.bid_id.hex())
else:
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Auto accepting xmr bid %s in %d seconds', bid.bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.ACCEPT_XMR_BID, bid.bid_id, session)
bid.setState(BidStates.SWAP_DELAYING)
self.saveBidInSession(bid.bid_id, bid, session, xmr_swap)
def receiveXmrBidAccept(self, bid, session):
# Follower receiving MSG1F and MSG2F
self.log.debug('Receiving xmr bid accept %s', bid.bid_id.hex())
now = int(time.time())
offer, xmr_offer = self.getXmrOffer(bid.offer_id, sent=True)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
xmr_swap = session.query(XmrSwap).filter_by(bid_id=bid.bid_id).first()
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid.bid_id.hex()))
ci_from = self.ci(offer.coin_from)
ci_to = self.ci(offer.coin_to)
if offer.coin_to == Coins.XMR:
if len(xmr_swap.kbsl_dleag) < ci_to.lengthDLEAG():
q = session.query(XmrSplitData).filter(sa.and_(XmrSplitData.bid_id == bid.bid_id, XmrSplitData.msg_type == XmrSplitMsgTypes.BID_ACCEPT)).order_by(XmrSplitData.msg_sequence.asc())
for row in q:
xmr_swap.kbsl_dleag += row.dleag
if not ci_to.verifyDLEAG(xmr_swap.kbsl_dleag):
raise ValueError('Invalid DLEAG proof.')
# Extract pubkeys from MSG1F DLEAG
xmr_swap.pkasl = xmr_swap.kbsl_dleag[0: 33]
if not ci_from.verifyPubkey(xmr_swap.pkasl):
raise ValueError('Invalid coin a pubkey.')
xmr_swap.pkbsl = xmr_swap.kbsl_dleag[33: 33 + 32]
if not ci_to.verifyPubkey(xmr_swap.pkbsl):
raise ValueError('Invalid coin b pubkey.')
else:
xmr_swap.pkasl = xmr_swap.kbsl_dleag[0: 33]
if not ci_from.verifyPubkey(xmr_swap.pkasl):
raise ValueError('Invalid coin a pubkey.')
xmr_swap.pkbsl = xmr_swap.pkasl
# vkbv and vkbvl are verified in processXmrBidAccept
xmr_swap.pkbv = ci_to.sumPubkeys(xmr_swap.pkbvl, xmr_swap.pkbvf)
xmr_swap.pkbs = ci_to.sumPubkeys(xmr_swap.pkbsl, xmr_swap.pkbsf)
if not ci_from.verifyPubkey(xmr_swap.pkal):
raise ValueError('Invalid pubkey.')
if xmr_swap.pkbvl == xmr_swap.pkbvf:
raise ValueError('Duplicate scriptless view pubkey.')
if xmr_swap.pkbsl == xmr_swap.pkbsf:
raise ValueError('Duplicate scriptless spend pubkey.')
if xmr_swap.pkal == xmr_swap.pkaf:
raise ValueError('Duplicate script spend pubkey.')
bid.setState(BidStates.SWAP_DELAYING)
self.saveBidInSession(bid.bid_id, bid, session, xmr_swap)
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Responding to xmr bid accept %s in %d seconds', bid.bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.SIGN_XMR_SWAP_LOCK_TX_A, bid.bid_id, session)
def processXmrBid(self, msg):
# MSG1L
self.log.debug('Processing xmr bid msg %s', msg['msgid'])
now = int(time.time())
bid_bytes = bytes.fromhex(msg['hex'][2:-2])
bid_data = XmrBidMessage()
bid_data.ParseFromString(bid_bytes)
# Validate data
ensure(len(bid_data.offer_msg_id) == 28, 'Bad offer_id length')
offer_id = bid_data.offer_msg_id
offer, xmr_offer = self.getXmrOffer(offer_id, sent=True)
ensure(offer and offer.was_sent, 'Offer not found: {}.'.format(offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(offer_id.hex()))
ci_from = self.ci(offer.coin_from)
ci_to = self.ci(offer.coin_to)
if not validOfferStateToReceiveBid(offer.state):
raise ValueError('Bad offer state')
ensure(msg['to'] == offer.addr_from, 'Received on incorrect address')
ensure(now <= offer.expire_at, 'Offer expired')
self.validateBidValidTime(offer.swap_type, offer.coin_from, offer.coin_to, bid_data.time_valid)
ensure(now <= msg['sent'] + bid_data.time_valid, 'Bid expired')
self.validateBidAmount(offer, bid_data.amount, bid_data.rate)
ensure(ci_to.verifyKey(bid_data.kbvf), 'Invalid chain B follower view key')
ensure(ci_from.verifyPubkey(bid_data.pkaf), 'Invalid chain A follower public key')
bid_id = bytes.fromhex(msg['msgid'])
bid, xmr_swap = self.getXmrBid(bid_id)
if bid is None:
bid = Bid(
active_ind=1,
bid_id=bid_id,
offer_id=offer_id,
protocol_version=bid_data.protocol_version,
amount=bid_data.amount,
rate=bid_data.rate,
created_at=msg['sent'],
amount_to=(bid_data.amount * bid_data.rate) // ci_from.COIN(),
expire_at=msg['sent'] + bid_data.time_valid,
bid_addr=msg['from'],
was_received=True,
chain_a_height_start=ci_from.getChainHeight(),
chain_b_height_start=ci_to.getChainHeight(),
)
xmr_swap = XmrSwap(
bid_id=bid_id,
dest_af=bid_data.dest_af,
pkaf=bid_data.pkaf,
vkbvf=bid_data.kbvf,
pkbvf=ci_to.getPubkey(bid_data.kbvf),
kbsf_dleag=bid_data.kbsf_dleag,
)
wallet_restore_height = self.getWalletRestoreHeight(ci_to)
if bid.chain_b_height_start < wallet_restore_height:
bid.chain_b_height_start = wallet_restore_height
self.log.warning('XMR swap restore height clamped to {}'.format(wallet_restore_height))
else:
ensure(bid.state == BidStates.BID_SENT, 'Wrong bid state: {}'.format(str(BidStates(bid.state))))
bid.created_at = msg['sent']
bid.expire_at = msg['sent'] + bid_data.time_valid
bid.was_received = True
bid.setState(BidStates.BID_RECEIVING)
self.log.info('Receiving xmr bid %s for offer %s', bid_id.hex(), bid_data.offer_msg_id.hex())
self.saveBid(bid_id, bid, xmr_swap=xmr_swap)
if offer.coin_to != Coins.XMR:
with self.mxDB:
try:
session = scoped_session(self.session_factory)
self.receiveXmrBid(bid, session)
session.commit()
finally:
session.close()
session.remove()
def processXmrBidAccept(self, msg):
# F receiving MSG1F and MSG2F
self.log.debug('Processing xmr bid accept msg %s', msg['msgid'])
now = int(time.time())
msg_bytes = bytes.fromhex(msg['hex'][2:-2])
msg_data = XmrBidAcceptMessage()
msg_data.ParseFromString(msg_bytes)
ensure(len(msg_data.bid_msg_id) == 28, 'Bad bid_msg_id length')
self.log.debug('for bid %s', msg_data.bid_msg_id.hex())
bid, xmr_swap = self.getXmrBid(msg_data.bid_msg_id)
ensure(bid, 'Bid not found: {}.'.format(msg_data.bid_msg_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(msg_data.bid_msg_id.hex()))
offer, xmr_offer = self.getXmrOffer(bid.offer_id, sent=True)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
ci_from = self.ci(offer.coin_from)
ci_to = self.ci(offer.coin_to)
try:
xmr_swap.pkal = msg_data.pkal
xmr_swap.vkbvl = msg_data.kbvl
ensure(ci_to.verifyKey(xmr_swap.vkbvl), 'Invalid key, vkbvl')
xmr_swap.vkbv = ci_to.sumKeys(xmr_swap.vkbvl, xmr_swap.vkbvf)
ensure(ci_to.verifyKey(xmr_swap.vkbv), 'Invalid key, vkbv')
xmr_swap.pkbvl = ci_to.getPubkey(msg_data.kbvl)
xmr_swap.kbsl_dleag = msg_data.kbsl_dleag
xmr_swap.a_lock_tx = msg_data.a_lock_tx
xmr_swap.a_lock_tx_script = msg_data.a_lock_tx_script
xmr_swap.a_lock_refund_tx = msg_data.a_lock_refund_tx
xmr_swap.a_lock_refund_tx_script = msg_data.a_lock_refund_tx_script
xmr_swap.a_lock_refund_spend_tx = msg_data.a_lock_refund_spend_tx
xmr_swap.a_lock_refund_spend_tx_id = ci_from.getTxid(xmr_swap.a_lock_refund_spend_tx)
xmr_swap.al_lock_refund_tx_sig = msg_data.al_lock_refund_tx_sig
# TODO: check_lock_tx_inputs without txindex
check_a_lock_tx_inputs = False
xmr_swap.a_lock_tx_id, xmr_swap.a_lock_tx_vout = ci_from.verifyLockTx(
xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script,
bid.amount,
xmr_swap.pkal, xmr_swap.pkaf,
xmr_offer.a_fee_rate,
check_a_lock_tx_inputs, xmr_swap.vkbv)
a_lock_tx_dest = ci_from.getScriptDest(xmr_swap.a_lock_tx_script)
xmr_swap.a_lock_refund_tx_id, xmr_swap.a_swap_refund_value, lock_refund_vout = ci_from.verifyLockRefundTx(
xmr_swap.a_lock_refund_tx, xmr_swap.a_lock_tx, xmr_swap.a_lock_refund_tx_script,
xmr_swap.a_lock_tx_id, xmr_swap.a_lock_tx_vout, xmr_offer.lock_time_1, xmr_swap.a_lock_tx_script,
xmr_swap.pkal, xmr_swap.pkaf,
xmr_offer.lock_time_2,
bid.amount, xmr_offer.a_fee_rate, xmr_swap.vkbv)
ci_from.verifyLockRefundSpendTx(
xmr_swap.a_lock_refund_spend_tx, xmr_swap.a_lock_refund_tx,
xmr_swap.a_lock_refund_tx_id, xmr_swap.a_lock_refund_tx_script,
xmr_swap.pkal,
lock_refund_vout, xmr_swap.a_swap_refund_value, xmr_offer.a_fee_rate, xmr_swap.vkbv)
self.log.info('Checking leader\'s lock refund tx signature')
prevout_amount = ci_from.getLockTxSwapOutputValue(bid, xmr_swap)
v = ci_from.verifyTxSig(xmr_swap.a_lock_refund_tx, xmr_swap.al_lock_refund_tx_sig, xmr_swap.pkal, 0, xmr_swap.a_lock_tx_script, prevout_amount)
ensure(v, 'Invalid coin A lock refund tx leader sig')
bid.setState(BidStates.BID_RECEIVING_ACC)
self.saveBid(bid.bid_id, bid, xmr_swap=xmr_swap)
if offer.coin_to != Coins.XMR:
with self.mxDB:
try:
session = scoped_session(self.session_factory)
self.receiveXmrBidAccept(bid, session)
session.commit()
finally:
session.close()
session.remove()
except Exception as ex:
if self.debug:
self.log.error(traceback.format_exc())
self.setBidError(bid.bid_id, bid, str(ex), xmr_swap=xmr_swap)
def watchXmrSwap(self, bid, offer, xmr_swap):
self.log.debug('XMR swap in progress, bid %s', bid.bid_id.hex())
self.swaps_in_progress[bid.bid_id] = (bid, offer)
coin_from = Coins(offer.coin_from)
self.setLastHeightChecked(coin_from, bid.chain_a_height_start)
self.addWatchedOutput(coin_from, bid.bid_id, bid.xmr_a_lock_tx.txid.hex(), bid.xmr_a_lock_tx.vout, TxTypes.XMR_SWAP_A_LOCK, SwapTypes.XMR_SWAP)
lock_refund_vout = self.ci(coin_from).getLockRefundTxSwapOutput(xmr_swap)
self.addWatchedOutput(coin_from, bid.bid_id, xmr_swap.a_lock_refund_tx_id.hex(), lock_refund_vout, TxTypes.XMR_SWAP_A_LOCK_REFUND, SwapTypes.XMR_SWAP)
bid.in_progress = 1
def sendXmrBidTxnSigsFtoL(self, bid_id, session):
# F -> L: Sending MSG3L
self.log.debug('Signing xmr bid lock txns %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
try:
kaf = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KAF)
prevout_amount = ci_from.getLockRefundTxSwapOutputValue(bid, xmr_swap)
xmr_swap.af_lock_refund_spend_tx_esig = ci_from.signTxOtVES(kaf, xmr_swap.pkasl, xmr_swap.a_lock_refund_spend_tx, 0, xmr_swap.a_lock_refund_tx_script, prevout_amount)
prevout_amount = ci_from.getLockTxSwapOutputValue(bid, xmr_swap)
xmr_swap.af_lock_refund_tx_sig = ci_from.signTx(kaf, xmr_swap.a_lock_refund_tx, 0, xmr_swap.a_lock_tx_script, prevout_amount)
addLockRefundSigs(self, xmr_swap, ci_from)
msg_buf = XmrBidLockTxSigsMessage(
bid_msg_id=bid_id,
af_lock_refund_spend_tx_esig=xmr_swap.af_lock_refund_spend_tx_esig,
af_lock_refund_tx_sig=xmr_swap.af_lock_refund_tx_sig
)
msg_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_TXN_SIGS_FL) + msg_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
# TODO: set msg_valid based on bid / offer parameters
msg_valid = self.SMSG_SECONDS_IN_HOUR * 48
ro = self.callrpc('smsgsend', [bid.bid_addr, offer.addr_from, payload_hex, False, msg_valid, False, options])
xmr_swap.coin_a_lock_tx_sigs_l_msg_id = bytes.fromhex(ro['msgid'])
self.log.info('Sent XMR_BID_TXN_SIGS_FL %s', xmr_swap.coin_a_lock_tx_sigs_l_msg_id.hex())
a_lock_tx_id = ci_from.getTxid(xmr_swap.a_lock_tx)
a_lock_tx_vout = ci_from.getTxOutputPos(xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script)
self.log.debug('Waiting for lock txn %s to %s chain for bid %s', a_lock_tx_id.hex(), ci_from.coin_name(), bid_id.hex())
bid.xmr_a_lock_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK,
txid=a_lock_tx_id,
vout=a_lock_tx_vout,
)
bid.xmr_a_lock_tx.setState(TxStates.TX_NONE)
bid.setState(BidStates.BID_ACCEPTED) # XMR
self.watchXmrSwap(bid, offer, xmr_swap)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
except Exception as ex:
if self.debug:
self.log.error(traceback.format_exc())
def sendXmrBidCoinALockTx(self, bid_id, session):
# Send coin A lock tx and MSG4F L -> F
self.log.debug('Sending coin A lock tx for xmr bid %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
kal = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KAL)
xmr_swap.a_lock_spend_tx = ci_from.createScriptLockSpendTx(
xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script,
xmr_swap.dest_af,
xmr_offer.a_fee_rate, xmr_swap.vkbv)
xmr_swap.a_lock_spend_tx_id = ci_from.getTxid(xmr_swap.a_lock_spend_tx)
prevout_amount = ci_from.getLockTxSwapOutputValue(bid, xmr_swap)
xmr_swap.al_lock_spend_tx_esig = ci_from.signTxOtVES(kal, xmr_swap.pkasf, xmr_swap.a_lock_spend_tx, 0, xmr_swap.a_lock_tx_script, prevout_amount)
# Prove leader can sign for kal
xmr_swap.kal_sig = ci_from.signCompact(kal, 'proof key owned for swap')
msg_buf = XmrBidLockSpendTxMessage(
bid_msg_id=bid_id,
a_lock_spend_tx=xmr_swap.a_lock_spend_tx,
kal_sig=xmr_swap.kal_sig)
msg_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_LOCK_SPEND_TX_LF) + msg_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
# TODO: set msg_valid based on bid / offer parameters
msg_valid = self.SMSG_SECONDS_IN_HOUR * 48
ro = self.callrpc('smsgsend', [offer.addr_from, bid.bid_addr, payload_hex, False, msg_valid, False, options])
xmr_swap.coin_a_lock_refund_spend_tx_msg_id = bytes.fromhex(ro['msgid'])
# TODO: Separate MSG4F and txn sending
# publishalocktx
lock_tx_signed = ci_from.signTxWithWallet(xmr_swap.a_lock_tx)
txid_hex = ci_from.publishTx(lock_tx_signed)
vout_pos = ci_from.getTxOutputPos(xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script)
self.log.debug('Submitted lock txn %s to %s chain for bid %s', txid_hex, ci_from.coin_name(), bid_id.hex())
bid.xmr_a_lock_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK,
txid=bytes.fromhex(txid_hex),
vout=vout_pos,
)
bid.xmr_a_lock_tx.setState(TxStates.TX_SENT)
bid.setState(BidStates.XMR_SWAP_HAVE_SCRIPT_COIN_SPEND_TX)
self.watchXmrSwap(bid, offer, xmr_swap)
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_PUBLISHED, '', session)
self.saveBidInSession(bid_id, bid, session, xmr_swap)
def sendXmrBidCoinBLockTx(self, bid_id, session):
# Follower sending coin B lock tx
self.log.debug('Sending coin B lock tx for xmr bid %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
if bid.debug_ind == DebugTypes.BID_STOP_AFTER_COIN_A_LOCK:
self.log.debug('XMR bid %s: Stalling bid for testing: %d.', bid_id.hex(), bid.debug_ind)
bid.setState(BidStates.BID_STALLED_FOR_TEST)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
self.logBidEvent(bid.bid_id, EventLogTypes.DEBUG_TWEAK_APPLIED, 'ind {}'.format(bid.debug_ind), session)
return
if bid.debug_ind == DebugTypes.CREATE_INVALID_COIN_B_LOCK:
bid.amount_to -= int(bid.amount_to * 0.1)
self.log.debug('XMR bid %s: Debug %d - Reducing lock b txn amount by 10%% to %s.', bid_id.hex(), bid.debug_ind, ci_to.format_amount(bid.amount_to))
self.logBidEvent(bid.bid_id, EventLogTypes.DEBUG_TWEAK_APPLIED, 'ind {}'.format(bid.debug_ind), session)
try:
b_lock_tx_id = ci_to.publishBLockTx(xmr_swap.pkbv, xmr_swap.pkbs, bid.amount_to, xmr_offer.b_fee_rate)
except Exception as ex:
error_msg = 'publishBLockTx failed for bid {} with error {}'.format(bid_id.hex(), str(ex))
num_retries = self.countBidEvents(bid, EventLogTypes.FAILED_TX_B_LOCK_PUBLISH, session)
if num_retries > 0:
error_msg += ', retry no. {}'.format(num_retries)
self.log.error(error_msg)
if num_retries < 5 and (ci_to.is_transient_error(ex) or self.is_transient_error(ex)):
delay = random.randrange(self.min_delay_retry, self.max_delay_retry)
self.log.info('Retrying sending xmr swap chain B lock tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.SEND_XMR_SWAP_LOCK_TX_B, bid_id, session)
else:
self.setBidError(bid_id, bid, 'publishBLockTx failed: ' + str(ex), save_bid=False)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
self.logBidEvent(bid.bid_id, EventLogTypes.FAILED_TX_B_LOCK_PUBLISH, str(ex), session)
return
self.log.debug('Submitted lock txn %s to %s chain for bid %s', b_lock_tx_id.hex(), ci_to.coin_name(), bid_id.hex())
bid.xmr_b_lock_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_B_LOCK,
txid=b_lock_tx_id,
)
bid.xmr_b_lock_tx.setState(TxStates.TX_NONE)
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_B_PUBLISHED, '', session)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
def sendXmrBidLockRelease(self, bid_id, session):
# Leader sending lock tx a release secret (MSG5F)
self.log.debug('Sending bid secret for xmr bid %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
msg_buf = XmrBidLockReleaseMessage(
bid_msg_id=bid_id,
al_lock_spend_tx_esig=xmr_swap.al_lock_spend_tx_esig)
msg_bytes = msg_buf.SerializeToString()
payload_hex = str.format('{:02x}', MessageTypes.XMR_BID_LOCK_RELEASE_LF) + msg_bytes.hex()
options = {'decodehex': True, 'ttl_is_seconds': True}
# TODO: set msg_valid based on bid / offer parameters
msg_valid = self.SMSG_SECONDS_IN_HOUR * 48
ro = self.callrpc('smsgsend', [offer.addr_from, bid.bid_addr, payload_hex, False, msg_valid, False, options])
xmr_swap.coin_a_lock_refund_spend_tx_msg_id = bytes.fromhex(ro['msgid'])
bid.setState(BidStates.XMR_SWAP_LOCK_RELEASED)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
def redeemXmrBidCoinALockTx(self, bid_id, session):
# Follower redeeming A lock tx
self.log.debug('Redeeming coin A lock tx for xmr bid %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
for_ed25519 = True if coin_to == Coins.XMR else False
kbsf = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KBSF, for_ed25519)
kaf = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KAF)
al_lock_spend_sig = ci_from.decryptOtVES(kbsf, xmr_swap.al_lock_spend_tx_esig)
prevout_amount = ci_from.getLockTxSwapOutputValue(bid, xmr_swap)
v = ci_from.verifyTxSig(xmr_swap.a_lock_spend_tx, al_lock_spend_sig, xmr_swap.pkal, 0, xmr_swap.a_lock_tx_script, prevout_amount)
ensure(v, 'Invalid coin A lock tx spend tx leader sig')
af_lock_spend_sig = ci_from.signTx(kaf, xmr_swap.a_lock_spend_tx, 0, xmr_swap.a_lock_tx_script, prevout_amount)
v = ci_from.verifyTxSig(xmr_swap.a_lock_spend_tx, af_lock_spend_sig, xmr_swap.pkaf, 0, xmr_swap.a_lock_tx_script, prevout_amount)
ensure(v, 'Invalid coin A lock tx spend tx follower sig')
witness_stack = [
b'',
al_lock_spend_sig,
af_lock_spend_sig,
xmr_swap.a_lock_tx_script,
]
xmr_swap.a_lock_spend_tx = ci_from.setTxSignature(xmr_swap.a_lock_spend_tx, witness_stack)
txid = bytes.fromhex(ci_from.publishTx(xmr_swap.a_lock_spend_tx))
self.log.debug('Submitted lock spend txn %s to %s chain for bid %s', txid.hex(), ci_from.coin_name(), bid_id.hex())
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_A_SPEND_TX_PUBLISHED, '', session)
bid.xmr_a_lock_spend_tx = SwapTx(
bid_id=bid_id,
tx_type=TxTypes.XMR_SWAP_A_LOCK_SPEND,
txid=txid,
)
bid.xmr_a_lock_spend_tx.setState(TxStates.TX_NONE)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
def redeemXmrBidCoinBLockTx(self, bid_id, session):
# Leader redeeming B lock tx
self.log.debug('Redeeming coin B lock tx for xmr bid %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
try:
chain_height = ci_to.getChainHeight()
lock_tx_depth = (chain_height - bid.xmr_b_lock_tx.chain_height) + 1
if lock_tx_depth < ci_to.depth_spendable():
raise TemporaryError(f'Chain B lock tx depth {lock_tx_depth} < required for spending.')
# Extract the leader's decrypted signature and use it to recover the follower's privatekey
xmr_swap.al_lock_spend_tx_sig = ci_from.extractLeaderSig(xmr_swap.a_lock_spend_tx)
kbsf = ci_from.recoverEncKey(xmr_swap.al_lock_spend_tx_esig, xmr_swap.al_lock_spend_tx_sig, xmr_swap.pkasf)
assert(kbsf is not None)
for_ed25519 = True if coin_to == Coins.XMR else False
kbsl = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KBSL, for_ed25519)
vkbs = ci_to.sumKeys(kbsl, kbsf)
if coin_to == Coins.XMR:
address_to = self.getCachedMainWalletAddress(ci_to)
else:
address_to = self.getCachedStealthAddressForCoin(coin_to)
txid = ci_to.spendBLockTx(xmr_swap.b_lock_tx_id, address_to, xmr_swap.vkbv, vkbs, bid.amount_to, xmr_offer.b_fee_rate, bid.chain_b_height_start)
self.log.debug('Submitted lock B spend txn %s to %s chain for bid %s', txid.hex(), ci_to.coin_name(), bid_id.hex())
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_B_SPEND_TX_PUBLISHED, '', session)
except Exception as ex:
error_msg = 'spendBLockTx failed for bid {} with error {}'.format(bid_id.hex(), str(ex))
num_retries = self.countBidEvents(bid, EventLogTypes.FAILED_TX_B_SPEND, session)
if num_retries > 0:
error_msg += ', retry no. {}'.format(num_retries)
self.log.error(error_msg)
if num_retries < 100 and (ci_to.is_transient_error(ex) or self.is_transient_error(ex)):
delay = random.randrange(self.min_delay_retry, self.max_delay_retry)
self.log.info('Retrying sending xmr swap chain B spend tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.REDEEM_XMR_SWAP_LOCK_TX_B, bid_id, session)
else:
self.setBidError(bid_id, bid, 'spendBLockTx failed: ' + str(ex), save_bid=False)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
self.logBidEvent(bid.bid_id, EventLogTypes.FAILED_TX_B_SPEND, str(ex), session)
return
bid.xmr_b_lock_tx.spend_txid = txid
bid.setState(BidStates.XMR_SWAP_NOSCRIPT_TX_REDEEMED)
# TODO: Why does using bid.txns error here?
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
def recoverXmrBidCoinBLockTx(self, bid_id, session):
# Follower recovering B lock tx
self.log.debug('Recovering coin B lock tx for xmr bid %s', bid_id.hex())
bid, xmr_swap = self.getXmrBidFromSession(session, bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOfferFromSession(session, bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
# Extract the follower's decrypted signature and use it to recover the leader's privatekey
af_lock_refund_spend_tx_sig = ci_from.extractFollowerSig(xmr_swap.a_lock_refund_spend_tx)
kbsl = ci_from.recoverEncKey(xmr_swap.af_lock_refund_spend_tx_esig, af_lock_refund_spend_tx_sig, xmr_swap.pkasl)
assert(kbsl is not None)
for_ed25519 = True if coin_to == Coins.XMR else False
kbsf = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KBSF, for_ed25519)
vkbs = ci_to.sumKeys(kbsl, kbsf)
try:
if offer.coin_to == Coins.XMR:
address_to = self.getCachedMainWalletAddress(ci_to)
else:
address_to = self.getCachedStealthAddressForCoin(coin_to)
txid = ci_to.spendBLockTx(xmr_swap.b_lock_tx_id, address_to, xmr_swap.vkbv, vkbs, bid.amount_to, xmr_offer.b_fee_rate, bid.chain_b_height_start)
self.log.debug('Submitted lock B refund txn %s to %s chain for bid %s', txid.hex(), ci_to.coin_name(), bid_id.hex())
self.logBidEvent(bid.bid_id, EventLogTypes.LOCK_TX_B_REFUND_TX_PUBLISHED, '', session)
except Exception as ex:
# TODO: Make min-conf 10?
error_msg = 'spendBLockTx refund failed for bid {} with error {}'.format(bid_id.hex(), str(ex))
num_retries = self.countBidEvents(bid, EventLogTypes.FAILED_TX_B_REFUND, session)
if num_retries > 0:
error_msg += ', retry no. {}'.format(num_retries)
self.log.error(error_msg)
str_error = str(ex)
if num_retries < 100 and (ci_to.is_transient_error(ex) or self.is_transient_error(ex)):
delay = random.randrange(self.min_delay_retry, self.max_delay_retry)
self.log.info('Retrying sending xmr swap chain B refund tx for bid %s in %d seconds', bid_id.hex(), delay)
self.createEventInSession(delay, EventTypes.RECOVER_XMR_SWAP_LOCK_TX_B, bid_id, session)
else:
self.setBidError(bid_id, bid, 'spendBLockTx for refund failed: ' + str(ex), save_bid=False)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
self.logBidEvent(bid.bid_id, EventLogTypes.FAILED_TX_B_REFUND, str_error, session)
return
bid.xmr_b_lock_tx.spend_txid = txid
bid.setState(BidStates.XMR_SWAP_NOSCRIPT_TX_RECOVERED)
self.saveBidInSession(bid_id, bid, session, xmr_swap, save_in_progress=offer)
def processXmrBidCoinALockSigs(self, msg):
# Leader processing MSG3L
self.log.debug('Processing xmr coin a follower lock sigs msg %s', msg['msgid'])
now = int(time.time())
msg_bytes = bytes.fromhex(msg['hex'][2:-2])
msg_data = XmrBidLockTxSigsMessage()
msg_data.ParseFromString(msg_bytes)
ensure(len(msg_data.bid_msg_id) == 28, 'Bad bid_msg_id length')
bid_id = msg_data.bid_msg_id
bid, xmr_swap = self.getXmrBid(bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOffer(bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
coin_from = Coins(offer.coin_from)
coin_to = Coins(offer.coin_to)
ci_from = self.ci(coin_from)
ci_to = self.ci(coin_to)
try:
xmr_swap.af_lock_refund_spend_tx_esig = msg_data.af_lock_refund_spend_tx_esig
xmr_swap.af_lock_refund_tx_sig = msg_data.af_lock_refund_tx_sig
for_ed25519 = True if coin_to == Coins.XMR else False
kbsl = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KBSL, for_ed25519)
kal = self.getPathKey(coin_from, coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KAL)
xmr_swap.af_lock_refund_spend_tx_sig = ci_from.decryptOtVES(kbsl, xmr_swap.af_lock_refund_spend_tx_esig)
prevout_amount = ci_from.getLockRefundTxSwapOutputValue(bid, xmr_swap)
al_lock_refund_spend_tx_sig = ci_from.signTx(kal, xmr_swap.a_lock_refund_spend_tx, 0, xmr_swap.a_lock_refund_tx_script, prevout_amount)
self.log.debug('Setting lock refund spend tx sigs')
witness_stack = [
b'',
al_lock_refund_spend_tx_sig,
xmr_swap.af_lock_refund_spend_tx_sig,
bytes((1,)),
xmr_swap.a_lock_refund_tx_script,
]
signed_tx = ci_from.setTxSignature(xmr_swap.a_lock_refund_spend_tx, witness_stack)
ensure(signed_tx, 'setTxSignature failed')
xmr_swap.a_lock_refund_spend_tx = signed_tx
v = ci_from.verifyTxSig(xmr_swap.a_lock_refund_spend_tx, xmr_swap.af_lock_refund_spend_tx_sig, xmr_swap.pkaf, 0, xmr_swap.a_lock_refund_tx_script, prevout_amount)
ensure(v, 'Invalid signature for lock refund spend txn')
addLockRefundSigs(self, xmr_swap, ci_from)
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Sending coin A lock tx for xmr bid %s in %d seconds', bid_id.hex(), delay)
self.createEvent(delay, EventTypes.SEND_XMR_SWAP_LOCK_TX_A, bid_id)
bid.setState(BidStates.SWAP_DELAYING)
self.saveBid(bid_id, bid, xmr_swap=xmr_swap)
except Exception as ex:
if self.debug:
self.log.error(traceback.format_exc())
self.setBidError(bid_id, bid, str(ex))
def processXmrBidLockSpendTx(self, msg):
# Follower receiving MSG4F
self.log.debug('Processing xmr bid lock spend tx msg %s', msg['msgid'])
now = int(time.time())
msg_bytes = bytes.fromhex(msg['hex'][2:-2])
msg_data = XmrBidLockSpendTxMessage()
msg_data.ParseFromString(msg_bytes)
ensure(len(msg_data.bid_msg_id) == 28, 'Bad bid_msg_id length')
bid_id = msg_data.bid_msg_id
bid, xmr_swap = self.getXmrBid(bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOffer(bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
ci_from = self.ci(Coins(offer.coin_from))
ci_to = self.ci(Coins(offer.coin_to))
try:
xmr_swap.a_lock_spend_tx = msg_data.a_lock_spend_tx
xmr_swap.a_lock_spend_tx_id = ci_from.getTxid(xmr_swap.a_lock_spend_tx)
xmr_swap.kal_sig = msg_data.kal_sig
ci_from.verifyLockSpendTx(
xmr_swap.a_lock_spend_tx,
xmr_swap.a_lock_tx, xmr_swap.a_lock_tx_script,
xmr_swap.dest_af, xmr_offer.a_fee_rate, xmr_swap.vkbv)
ci_from.verifyCompact(xmr_swap.pkal, 'proof key owned for swap', xmr_swap.kal_sig)
bid.setState(BidStates.XMR_SWAP_HAVE_SCRIPT_COIN_SPEND_TX)
self.saveBid(bid_id, bid, xmr_swap=xmr_swap)
except Exception as ex:
if self.debug:
self.log.error(traceback.format_exc())
self.setBidError(bid_id, bid, str(ex))
# Update copy of bid in swaps_in_progress
self.swaps_in_progress[bid_id] = (bid, offer)
def processXmrSplitMessage(self, msg):
self.log.debug('Processing xmr split msg %s', msg['msgid'])
now = int(time.time())
msg_bytes = bytes.fromhex(msg['hex'][2:-2])
msg_data = XmrSplitMessage()
msg_data.ParseFromString(msg_bytes)
# Validate data
ensure(len(msg_data.msg_id) == 28, 'Bad msg_id length')
if msg_data.msg_type == XmrSplitMsgTypes.BID or msg_data.msg_type == XmrSplitMsgTypes.BID_ACCEPT:
try:
session = scoped_session(self.session_factory)
q = session.execute('SELECT COUNT(*) FROM xmr_split_data WHERE bid_id = x\'{}\' AND msg_type = {} AND msg_sequence = {}'.format(msg_data.msg_id.hex(), msg_data.msg_type, msg_data.sequence)).first()
num_exists = q[0]
if num_exists > 0:
self.log.warning('Ignoring duplicate xmr_split_data entry: ({}, {}, {})'.format(msg_data.msg_id.hex(), msg_data.msg_type, msg_data.sequence))
return
dbr = XmrSplitData()
dbr.bid_id = msg_data.msg_id
dbr.msg_type = msg_data.msg_type
dbr.msg_sequence = msg_data.sequence
dbr.dleag = msg_data.dleag
dbr.created_at = now
session.add(dbr)
session.commit()
finally:
session.close()
session.remove()
def processXmrLockReleaseMessage(self, msg):
self.log.debug('Processing xmr secret msg %s', msg['msgid'])
now = int(time.time())
msg_bytes = bytes.fromhex(msg['hex'][2:-2])
msg_data = XmrBidLockReleaseMessage()
msg_data.ParseFromString(msg_bytes)
# Validate data
ensure(len(msg_data.bid_msg_id) == 28, 'Bad msg_id length')
bid_id = msg_data.bid_msg_id
bid, xmr_swap = self.getXmrBid(bid_id)
ensure(bid, 'Bid not found: {}.'.format(bid_id.hex()))
ensure(xmr_swap, 'XMR swap not found: {}.'.format(bid_id.hex()))
offer, xmr_offer = self.getXmrOffer(bid.offer_id, sent=False)
ensure(offer, 'Offer not found: {}.'.format(bid.offer_id.hex()))
ensure(xmr_offer, 'XMR offer not found: {}.'.format(bid.offer_id.hex()))
ci_from = self.ci(Coins(offer.coin_from))
xmr_swap.al_lock_spend_tx_esig = msg_data.al_lock_spend_tx_esig
try:
prevout_amount = ci_from.getLockTxSwapOutputValue(bid, xmr_swap)
v = ci_from.verifyTxOtVES(
xmr_swap.a_lock_spend_tx, xmr_swap.al_lock_spend_tx_esig,
xmr_swap.pkal, xmr_swap.pkasf, 0, xmr_swap.a_lock_tx_script, prevout_amount)
ensure(v, 'verifyTxOtVES failed for chain a lock tx leader esig')
except Exception as ex:
if self.debug:
self.log.error(traceback.format_exc())
self.setBidError(bid_id, bid, str(ex))
self.swaps_in_progress[bid_id] = (bid, offer)
return
delay = random.randrange(self.min_delay_event, self.max_delay_event)
self.log.info('Redeeming coin A lock tx for xmr bid %s in %d seconds', bid_id.hex(), delay)
self.createEvent(delay, EventTypes.REDEEM_XMR_SWAP_LOCK_TX_A, bid_id)
bid.setState(BidStates.XMR_SWAP_LOCK_RELEASED)
self.saveBid(bid_id, bid, xmr_swap=xmr_swap)
self.swaps_in_progress[bid_id] = (bid, offer)
def processMsg(self, msg):
self.mxDB.acquire()
try:
msg_type = int(msg['hex'][:2], 16)
rv = None
if msg_type == MessageTypes.OFFER:
self.processOffer(msg)
elif msg_type == MessageTypes.BID:
self.processBid(msg)
elif msg_type == MessageTypes.BID_ACCEPT:
self.processBidAccept(msg)
elif msg_type == MessageTypes.XMR_BID_FL:
self.processXmrBid(msg)
elif msg_type == MessageTypes.XMR_BID_ACCEPT_LF:
self.processXmrBidAccept(msg)
elif msg_type == MessageTypes.XMR_BID_TXN_SIGS_FL:
self.processXmrBidCoinALockSigs(msg)
elif msg_type == MessageTypes.XMR_BID_LOCK_SPEND_TX_LF:
self.processXmrBidLockSpendTx(msg)
elif msg_type == MessageTypes.XMR_BID_SPLIT:
self.processXmrSplitMessage(msg)
elif msg_type == MessageTypes.XMR_BID_LOCK_RELEASE_LF:
self.processXmrLockReleaseMessage(msg)
if msg_type == MessageTypes.OFFER_REVOKE:
self.processOfferRevoke(msg)
except Exception as ex:
self.log.error('processMsg %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
finally:
self.mxDB.release()
def processZmqSmsg(self):
message = self.zmqSubscriber.recv()
clear = self.zmqSubscriber.recv()
if message[0] == 3: # Paid smsg
return # TODO: Switch to paid?
msg_id = message[2:]
options = {'encoding': 'hex', 'setread': True}
num_tries = 5
for i in range(num_tries + 1):
try:
msg = self.callrpc('smsg', [msg_id.hex(), options])
break
except Exception as e:
if 'Unknown message id' in str(e) and i < num_tries:
time.sleep(1)
else:
raise e
self.processMsg(msg)
def update(self):
try:
# while True:
message = self.zmqSubscriber.recv(flags=zmq.NOBLOCK)
if message == b'smsg':
self.processZmqSmsg()
except zmq.Again as ex:
pass
except Exception as ex:
self.log.error('smsg zmq %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
self.mxDB.acquire()
try:
# TODO: Wait for blocks / txns, would need to check multiple coins
now = int(time.time())
if now - self._last_checked_progress >= self.check_progress_seconds:
to_remove = []
for bid_id, v in self.swaps_in_progress.items():
try:
if self.checkBidState(bid_id, v[0], v[1]) is True:
to_remove.append((bid_id, v[0], v[1]))
except Exception as ex:
if self.debug:
self.log.error('checkBidState %s', traceback.format_exc())
if self.is_transient_error(ex):
self.log.warning('checkBidState %s %s', bid_id.hex(), str(ex))
self.logBidEvent(bid_id, EventLogTypes.SYSTEM_WARNING, 'No connection to daemon', session=None)
else:
self.log.error('checkBidState %s %s', bid_id.hex(), str(ex))
self.setBidError(bid_id, v[0], str(ex))
for bid_id, bid, offer in to_remove:
self.deactivateBid(None, offer, bid)
self._last_checked_progress = now
if now - self._last_checked_watched >= self.check_watched_seconds:
for k, c in self.coin_clients.items():
if k == Coins.PART_ANON or k == Coins.PART_BLIND:
continue
if len(c['watched_outputs']) > 0:
self.checkForSpends(k, c)
self._last_checked_watched = now
if now - self._last_checked_expired >= self.check_expired_seconds:
self.expireMessages()
self._last_checked_expired = now
if now - self._last_checked_events >= self.check_events_seconds:
self.checkEvents()
self._last_checked_events = now
if now - self._last_checked_xmr_swaps >= self.check_xmr_swaps_seconds:
self.checkXmrSwaps()
self._last_checked_xmr_swaps = now
except Exception as ex:
self.log.error('update %s', str(ex))
if self.debug:
self.log.error(traceback.format_exc())
finally:
self.mxDB.release()
def manualBidUpdate(self, bid_id, data):
self.log.info('Manually updating bid %s', bid_id.hex())
self.mxDB.acquire()
try:
bid, offer = self.getBidAndOffer(bid_id)
ensure(bid, 'Bid not found {}'.format(bid_id.hex()))
ensure(offer, 'Offer not found {}'.format(bid.offer_id.hex()))
has_changed = False
if bid.state != data['bid_state']:
bid.setState(data['bid_state'])
self.log.debug('Set state to %s', strBidState(bid.state))
has_changed = True
if bid.debug_ind != data['debug_ind']:
if bid.debug_ind is None and data['debug_ind'] == -1:
pass # Already unset
else:
self.log.debug('Bid %s Setting debug flag: %s', bid_id.hex(), data['debug_ind'])
bid.debug_ind = data['debug_ind']
has_changed = True
if data['kbs_other'] is not None:
return recoverNoScriptTxnWithKey(self, bid_id, data['kbs_other'])
if has_changed:
session = scoped_session(self.session_factory)
try:
activate_bid = False
if offer.swap_type == SwapTypes.SELLER_FIRST:
if bid.state and bid.state > BidStates.BID_RECEIVED and bid.state < BidStates.SWAP_COMPLETED:
activate_bid = True
else:
self.log.debug('TODO - determine in-progress for manualBidUpdate')
if offer.swap_type == SwapTypes.XMR_SWAP:
if bid.state and isActiveBidState(bid.state):
activate_bid = True
if activate_bid:
self.activateBid(session, bid)
else:
self.deactivateBid(session, offer, bid)
self.saveBidInSession(bid_id, bid, session)
session.commit()
finally:
session.close()
session.remove()
else:
raise ValueError('No changes')
finally:
self.mxDB.release()
def editSettings(self, coin_name, data):
self.log.info('Updating settings %s', coin_name)
with self.mxDB:
settings_cc = self.settings['chainclients'][coin_name]
settings_changed = False
suggest_reboot = False
if 'lookups' in data:
if settings_cc.get('chain_lookups', 'local') != data['lookups']:
settings_changed = True
settings_cc['chain_lookups'] = data['lookups']
for coin, cc in self.coin_clients.items():
if cc['name'] == coin_name:
cc['chain_lookups'] = data['lookups']
break
for setting in ('manage_daemon', 'rpchost', 'rpcport', 'automatically_select_daemon'):
if setting not in data:
continue
if settings_cc.get(setting) != data[setting]:
settings_changed = True
suggest_reboot = True
settings_cc[setting] = data[setting]
if 'remotedaemonurls' in data:
remotedaemonurls_in = data['remotedaemonurls'].split('\n')
remotedaemonurls = set()
for url in remotedaemonurls_in:
if url.count(':') > 0:
remotedaemonurls.add(url.strip())
if set(settings_cc.get('remote_daemon_urls', [])) != remotedaemonurls:
settings_cc['remote_daemon_urls'] = list(remotedaemonurls)
settings_changed = True
suggest_reboot = True
if 'fee_priority' in data:
new_fee_priority = data['fee_priority']
ensure(new_fee_priority >= 0 and new_fee_priority < 4, 'Invalid priority')
if settings_cc.get('fee_priority', 0) != new_fee_priority:
settings_changed = True
settings_cc['fee_priority'] = new_fee_priority
for coin, cc in self.coin_clients.items():
if cc['name'] == coin_name:
cc['fee_priority'] = new_fee_priority
self.ci(coin).setFeePriority(new_fee_priority)
break
if 'conf_target' in data:
new_conf_target = data['conf_target']
ensure(new_conf_target >= 1 and new_conf_target < 33, 'Invalid conf_target')
if settings_cc.get('conf_target', 2) != new_conf_target:
settings_changed = True
settings_cc['conf_target'] = new_conf_target
for coin, cc in self.coin_clients.items():
if cc['name'] == coin_name:
cc['conf_target'] = new_conf_target
self.ci(coin).setConfTarget(new_conf_target)
break
if settings_changed:
settings_path = os.path.join(self.data_dir, cfg.CONFIG_FILENAME)
shutil.copyfile(settings_path, settings_path + '.last')
with open(settings_path, 'w') as fp:
json.dump(self.settings, fp, indent=4)
return settings_changed, suggest_reboot
def enableCoin(self, coin_name):
self.log.info('Enabling coin %s', coin_name)
coin_id = self.getCoinIdFromName(coin_name)
if coin_id in (Coins.PART, Coins.PART_BLIND, Coins.PART_ANON):
raise ValueError('Invalid coin')
settings_cc = self.settings['chainclients'][coin_name]
if 'connection_type_prev' not in settings_cc:
raise ValueError('Can\'t find previous value.')
settings_cc['connection_type'] = settings_cc['connection_type_prev']
del settings_cc['connection_type_prev']
if 'manage_daemon_prev' in settings_cc:
settings_cc['manage_daemon'] = settings_cc['manage_daemon_prev']
del settings_cc['manage_daemon_prev']
if 'manage_wallet_daemon_prev' in settings_cc:
settings_cc['manage_wallet_daemon'] = settings_cc['manage_wallet_daemon_prev']
del settings_cc['manage_wallet_daemon_prev']
settings_path = os.path.join(self.data_dir, cfg.CONFIG_FILENAME)
shutil.copyfile(settings_path, settings_path + '.last')
with open(settings_path, 'w') as fp:
json.dump(self.settings, fp, indent=4)
# Client must be restarted
def disableCoin(self, coin_name):
self.log.info('Disabling coin %s', coin_name)
coin_id = self.getCoinIdFromName(coin_name)
if coin_id in (Coins.PART, Coins.PART_BLIND, Coins.PART_ANON):
raise ValueError('Invalid coin')
settings_cc = self.settings['chainclients'][coin_name]
if settings_cc['connection_type'] != 'rpc':
raise ValueError('Already disabled.')
settings_cc['manage_daemon_prev'] = settings_cc['manage_daemon']
settings_cc['manage_daemon'] = False
settings_cc['connection_type_prev'] = settings_cc['connection_type']
settings_cc['connection_type'] = 'none'
if 'manage_wallet_daemon' in settings_cc:
settings_cc['manage_wallet_daemon_prev'] = settings_cc['manage_wallet_daemon']
settings_cc['manage_wallet_daemon'] = False
settings_path = os.path.join(self.data_dir, cfg.CONFIG_FILENAME)
shutil.copyfile(settings_path, settings_path + '.last')
with open(settings_path, 'w') as fp:
json.dump(self.settings, fp, indent=4)
# Client must be restarted
def getSummary(self, opts=None):
num_watched_outputs = 0
for c, v in self.coin_clients.items():
if c in (Coins.PART_ANON, Coins.PART_BLIND):
continue
num_watched_outputs += len(v['watched_outputs'])
bids_sent = 0
bids_received = 0
q = self.engine.execute('SELECT was_sent, was_received, COUNT(*) FROM bids GROUP BY was_sent, was_received ')
for r in q:
if r[0]:
bids_sent += r[2]
if r[1]:
bids_received += r[2]
now = int(time.time())
q = self.engine.execute('SELECT COUNT(*) FROM offers WHERE active_ind = 1 AND expire_at > {}'.format(now)).first()
num_offers = q[0]
q = self.engine.execute('SELECT COUNT(*) FROM offers WHERE was_sent = 1').first()
num_sent_offers = q[0]
rv = {
'network': self.chain,
'num_swapping': len(self.swaps_in_progress),
'num_network_offers': num_offers,
'num_sent_offers': num_sent_offers,
'num_recv_bids': bids_received,
'num_sent_bids': bids_sent,
'num_watched_outputs': num_watched_outputs,
}
return rv
def getWalletInfo(self, coin):
ci = self.ci(coin)
blockchaininfo = ci.getBlockchainInfo()
walletinfo = ci.getWalletInfo()
scale = chainparams[coin]['decimal_places']
rv = {
'version': self.coin_clients[coin]['core_version'],
'deposit_address': self.getCachedAddressForCoin(coin),
'name': ci.coin_name(),
'blocks': blockchaininfo['blocks'],
'balance': format_amount(make_int(walletinfo['balance'], scale), scale),
'unconfirmed': format_amount(make_int(walletinfo.get('unconfirmed_balance'), scale), scale),
'synced': '{0:.2f}'.format(round(blockchaininfo['verificationprogress'], 2)),
'expected_seed': ci.knownWalletSeed(),
}
if coin == Coins.PART:
rv['stealth_address'] = self.getCachedStealthAddressForCoin(Coins.PART)
rv['anon_balance'] = walletinfo['anon_balance']
rv['anon_pending'] = walletinfo['unconfirmed_anon'] + walletinfo['immature_anon_balance']
rv['blind_balance'] = walletinfo['blind_balance']
rv['blind_unconfirmed'] = walletinfo['unconfirmed_blind']
elif coin == Coins.XMR:
rv['main_address'] = self.getCachedMainWalletAddress(ci)
return rv
def updateWalletInfo(self, coin):
wi = self.getWalletInfo(coin)
# Store wallet info to db so it's available after startup
self.mxDB.acquire()
try:
rv = []
now = int(time.time())
session = scoped_session(self.session_factory)
session.add(Wallets(coin_id=coin, wallet_data=json.dumps(wi), created_at=now))
coin_id = int(coin)
query_str = f'DELETE FROM wallets WHERE coin_id = {coin_id} AND record_id NOT IN (SELECT record_id FROM wallets WHERE coin_id = {coin_id} ORDER BY created_at DESC LIMIT 3 )'
session.execute(query_str)
session.commit()
except Exception as e:
self.log.error(f'updateWalletInfo {e}')
finally:
session.close()
session.remove()
self._updating_wallets_info[int(coin)] = False
self.mxDB.release()
def updateWalletsInfo(self, force_update=False, only_coin=None):
now = int(time.time())
if not force_update and now - self._last_updated_wallets_info < 30:
return
for c in Coins:
if only_coin is not None and c != only_coin:
continue
if c not in chainparams:
continue
if self.coin_clients[c]['connection_type'] == 'rpc':
self._updating_wallets_info[int(c)] = True
self.thread_pool.submit(self.updateWalletInfo, c)
if only_coin is None:
self._last_updated_wallets_info = int(time.time())
def getWalletsInfo(self, opts=None):
rv = {}
for c in Coins:
if c not in chainparams:
continue
if self.coin_clients[c]['connection_type'] == 'rpc':
try:
rv[c] = self.getWalletInfo(c)
except Exception as ex:
rv[c] = {'name': chainparams[c]['name'].capitalize(), 'error': str(ex)}
return rv
def getCachedWalletsInfo(self, opts=None):
rv = {}
# Requires? self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
where_str = ''
if opts is not None and 'coin_id' in opts:
where_str = 'WHERE coin_id = {}'.format(opts['coin_id'])
inner_str = f'SELECT coin_id, MAX(created_at) as max_created_at FROM wallets {where_str} GROUP BY coin_id'
query_str = 'SELECT a.coin_id, wallet_data, created_at FROM wallets a, ({}) b WHERE a.coin_id = b.coin_id AND a.created_at = b.max_created_at'.format(inner_str)
q = session.execute(query_str)
for row in q:
coin_id = row[0]
if self.coin_clients[coin_id]['connection_type'] != 'rpc':
# Skip cached info if coin was disabled
continue
wallet_data = json.loads(row[1])
wallet_data['lastupdated'] = row[2]
wallet_data['updating'] = self._updating_wallets_info.get(coin_id, False)
# Ensure the latest deposit address is displayed
q = session.execute('SELECT value FROM kv_string WHERE key = "receive_addr_{}"'.format(chainparams[coin_id]['name']))
for row in q:
wallet_data['deposit_address'] = row[0]
rv[coin_id] = wallet_data
finally:
session.close()
session.remove()
if opts is not None and 'coin_id' in opts:
return rv
for c in Coins:
if c not in chainparams:
continue
if self.coin_clients[c]['connection_type'] == 'rpc':
coin_id = int(c)
if coin_id not in rv:
rv[coin_id] = {
'name': chainparams[c]['name'].capitalize(),
'updating': self._updating_wallets_info.get(coin_id, False),
}
return rv
def countAcceptedBids(self, offer_id=None):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
if offer_id:
q = session.execute('SELECT COUNT(*) FROM bids WHERE state >= {} AND offer_id = x\'{}\''.format(BidStates.BID_ACCEPTED, offer_id.hex())).first()
else:
q = session.execute('SELECT COUNT(*) FROM bids WHERE state >= {}'.format(BidStates.BID_ACCEPTED)).first()
return q[0]
finally:
session.close()
session.remove()
self.mxDB.release()
def listOffers(self, sent=False, filters={}):
self.mxDB.acquire()
try:
rv = []
now = int(time.time())
session = scoped_session(self.session_factory)
if sent:
q = session.query(Offer).filter(Offer.was_sent == True) # noqa: E712
else:
q = session.query(Offer).filter(sa.and_(Offer.expire_at > now, Offer.active_ind == 1))
filter_offer_id = filters.get('offer_id', None)
if filter_offer_id is not None:
q = q.filter(Offer.offer_id == filter_offer_id)
filter_coin_from = filters.get('coin_from', None)
if filter_coin_from and filter_coin_from > -1:
q = q.filter(Offer.coin_from == int(filter_coin_from))
filter_coin_to = filters.get('coin_to', None)
if filter_coin_to and filter_coin_to > -1:
q = q.filter(Offer.coin_to == int(filter_coin_to))
order_dir = filters.get('sort_dir', 'desc')
order_by = filters.get('sort_by', 'created_at')
if order_by == 'created_at':
q = q.order_by(Offer.created_at.desc() if order_dir == 'desc' else Offer.created_at.asc())
elif order_by == 'rate':
q = q.order_by(Offer.rate.desc() if order_dir == 'desc' else Offer.rate.asc())
limit = filters.get('limit', None)
if limit is not None:
q = q.limit(limit)
offset = filters.get('offset', None)
if offset is not None:
q = q.offset(offset)
for row in q:
# Show offers for enabled coins only
try:
ci_from = self.ci(row.coin_from)
ci_to = self.ci(row.coin_to)
except Exception as e:
continue
rv.append(row)
return rv
finally:
session.close()
session.remove()
self.mxDB.release()
def listBids(self, sent=False, offer_id=None, for_html=False, filters={}, with_identity_info=False):
self.mxDB.acquire()
try:
rv = []
now = int(time.time())
session = scoped_session(self.session_factory)
identity_fields = ''
query_str = 'SELECT bids.created_at, bids.expire_at, bids.bid_id, bids.offer_id, bids.amount, bids.state, bids.was_received, tx1.state, tx2.state, offers.coin_from, bids.rate, bids.bid_addr {} FROM bids '.format(identity_fields) + \
'LEFT JOIN offers ON offers.offer_id = bids.offer_id ' + \
'LEFT JOIN transactions AS tx1 ON tx1.bid_id = bids.bid_id AND tx1.tx_type = {} '.format(TxTypes.ITX) + \
'LEFT JOIN transactions AS tx2 ON tx2.bid_id = bids.bid_id AND tx2.tx_type = {} '.format(TxTypes.PTX)
query_str += 'WHERE bids.active_ind = 1 '
filter_bid_id = filters.get('bid_id', None)
if filter_bid_id is not None:
query_str += 'AND bids.bid_id = x\'{}\' '.format(filter_bid_id.hex())
if offer_id is not None:
query_str += 'AND bids.offer_id = x\'{}\' '.format(offer_id.hex())
elif sent:
query_str += 'AND bids.was_sent = 1 '
else:
query_str += 'AND bids.was_received = 1 '
sort_dir = filters.get('sort_dir', 'DESC').upper()
sort_by = filters.get('sort_by', 'created_at')
query_str += f' ORDER BY bids.{sort_by} {sort_dir}'
limit = filters.get('limit', None)
if limit is not None:
query_str += f' LIMIT {limit}'
offset = filters.get('offset', None)
if offset is not None:
query_str += f' OFFSET {offset}'
q = session.execute(query_str)
for row in q:
rv.append(row)
return rv
finally:
session.close()
session.remove()
self.mxDB.release()
def listSwapsInProgress(self, for_html=False):
self.mxDB.acquire()
try:
rv = []
for k, v in self.swaps_in_progress.items():
rv.append((k, v[0].offer_id.hex(), v[0].state, v[0].getITxState(), v[0].getPTxState()))
return rv
finally:
self.mxDB.release()
def listWatchedOutputs(self):
self.mxDB.acquire()
try:
rv = []
rv_heights = []
for c, v in self.coin_clients.items():
if c in (Coins.PART_ANON, Coins.PART_BLIND): # exclude duplicates
continue
if self.coin_clients[c]['connection_type'] == 'rpc':
rv_heights.append((c, v['last_height_checked']))
for o in v['watched_outputs']:
rv.append((c, o.bid_id, o.txid_hex, o.vout, o.tx_type))
return (rv, rv_heights)
finally:
self.mxDB.release()
def listAllSMSGAddresses(self, addr_id=None):
filters = ''
if addr_id is not None:
filters += f' WHERE addr_id = {addr_id} '
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
rv = []
query_str = f'SELECT addr_id, addr, use_type, active_ind, created_at, note, pubkey FROM smsgaddresses {filters} ORDER BY created_at'
q = session.execute(query_str)
for row in q:
rv.append({
'id': row[0],
'addr': row[1],
'type': row[2],
'active_ind': row[3],
'created_at': row[4],
'note': row[5],
'pubkey': row[6],
})
return rv
finally:
session.close()
session.remove()
self.mxDB.release()
def newSMSGAddress(self, use_type=AddressTypes.RECV_OFFER, addressnote=None, session=None):
now = int(time.time())
use_session = None
try:
if session:
use_session = session
else:
self.mxDB.acquire()
use_session = scoped_session(self.session_factory)
v = use_session.query(DBKVString).filter_by(key='smsg_chain_id').first()
if not v:
smsg_account = self.callrpc('extkey', ['deriveAccount', 'smsg keys', '78900'])
smsg_account_id = smsg_account['account']
self.log.info(f'Creating smsg keys account {smsg_account_id}')
extkey = self.callrpc('extkey')
# Disable receiving on all chains
smsg_chain_id = None
extkey = self.callrpc('extkey', ['account', smsg_account_id])
for c in extkey['chains']:
rv = self.callrpc('extkey', ['options', c['id'], 'receive_on', 'false'])
if c['function'] == 'active_external':
smsg_chain_id = c['id']
if not smsg_chain_id:
raise ValueError('External chain not found.')
use_session.add(DBKVString(
key='smsg_chain_id',
value=smsg_chain_id))
else:
smsg_chain_id = v.value
smsg_chain = self.callrpc('extkey', ['key', smsg_chain_id])
num_derives = int(smsg_chain['num_derives'])
new_addr = self.callrpc('deriverangekeys', [num_derives, num_derives, smsg_chain_id, False, True])[0]
num_derives += 1
rv = self.callrpc('extkey', ['options', smsg_chain_id, 'num_derives', str(num_derives)])
addr_info = self.callrpc('getaddressinfo', [new_addr])
self.callrpc('smsgaddlocaladdress', [new_addr]) # Enable receiving smsgs
use_session.add(SmsgAddress(addr=new_addr, use_type=use_type, active_ind=1, created_at=now, note=addressnote, pubkey=addr_info['pubkey']))
return new_addr, addr_info['pubkey']
finally:
if session is None:
use_session.commit()
use_session.close()
use_session.remove()
self.mxDB.release()
def addSMSGAddress(self, pubkey_hex, addressnote=None):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
now = int(time.time())
ci = self.ci(Coins.PART)
add_addr = ci.pubkey_to_address(bytes.fromhex(pubkey_hex))
self.callrpc('smsgaddaddress', [add_addr, pubkey_hex])
session.add(SmsgAddress(addr=add_addr, use_type=AddressTypes.SEND_OFFER, active_ind=1, created_at=now, note=addressnote, pubkey=pubkey_hex))
session.commit()
return add_addr
finally:
session.close()
session.remove()
self.mxDB.release()
def editSMSGAddress(self, address, active_ind, addressnote):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
mode = '-' if active_ind == 0 else '+'
self.callrpc('smsglocalkeys', ['recv', mode, address])
session.execute('UPDATE smsgaddresses SET active_ind = {}, note = "{}" WHERE addr = "{}"'.format(active_ind, addressnote, address))
session.commit()
finally:
session.close()
session.remove()
self.mxDB.release()
def listSmsgAddresses(self, use_type_str):
if use_type_str == 'offer_send_from':
use_type = AddressTypes.OFFER
elif use_type_str == 'offer_send_to':
use_type = AddressTypes.SEND_OFFER
elif use_type_str == 'bid':
use_type = AddressTypes.BID
else:
raise ValueError('Unknown address type')
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
rv = []
q = session.execute('SELECT sa.addr, ki.label FROM smsgaddresses AS sa LEFT JOIN knownidentities AS ki ON sa.addr = ki.address WHERE sa.use_type = {} AND sa.active_ind = 1 ORDER BY sa.addr_id DESC'.format(use_type))
for row in q:
rv.append((row[0], row[1]))
return rv
finally:
session.close()
session.remove()
self.mxDB.release()
def createCoinALockRefundSwipeTx(self, ci, bid, offer, xmr_swap, xmr_offer):
self.log.debug('Creating %s lock refund swipe tx', ci.coin_name())
pkh_dest = ci.decodeAddress(self.getReceiveAddressForCoin(ci.coin_type()))
spend_tx = ci.createScriptLockRefundSpendToFTx(
xmr_swap.a_lock_refund_tx, xmr_swap.a_lock_refund_tx_script,
pkh_dest,
xmr_offer.a_fee_rate, xmr_swap.vkbv)
vkaf = self.getPathKey(offer.coin_from, offer.coin_to, bid.created_at, xmr_swap.contract_count, KeyTypes.KAF)
prevout_amount = ci.getLockRefundTxSwapOutputValue(bid, xmr_swap)
sig = ci.signTx(vkaf, spend_tx, 0, xmr_swap.a_lock_refund_tx_script, prevout_amount)
witness_stack = [
sig,
b'',
xmr_swap.a_lock_refund_tx_script,
]
xmr_swap.a_lock_refund_swipe_tx = ci.setTxSignature(spend_tx, witness_stack)
def setBidDebugInd(self, bid_id, debug_ind):
self.log.debug('Bid %s Setting debug flag: %s', bid_id.hex(), debug_ind)
bid = self.getBid(bid_id)
bid.debug_ind = debug_ind
self.saveBid(bid_id, bid)
def storeOfferRevoke(self, offer_id, sig):
self.log.debug('Storing revoke request for offer: %s', offer_id.hex())
for pair in self._possibly_revoked_offers:
if offer_id == pair[0]:
return False
self._possibly_revoked_offers.appendleft((offer_id, sig))
return True
def isOfferRevoked(self, offer_id, offer_addr_from):
for pair in self._possibly_revoked_offers:
if offer_id == pair[0]:
signature_enc = base64.b64encode(pair[1]).decode('utf-8')
passed = self.callcoinrpc(Coins.PART, 'verifymessage', [offer_addr_from, signature_enc, offer_id.hex() + '_revoke'])
return True if passed is True else False # _possibly_revoked_offers should not contain duplicates
return False
def updateBidInProgress(self, bid):
swap_in_progress = self.swaps_in_progress.get(bid.bid_id, None)
if swap_in_progress is None:
return
self.swaps_in_progress[bid.bid_id] = (bid, swap_in_progress[1])
def getAddressLabel(self, addresses):
self.mxDB.acquire()
try:
session = scoped_session(self.session_factory)
rv = []
for a in addresses:
v = session.query(KnownIdentity).filter_by(address=a).first()
rv.append('' if (not v or not v.label) else v.label)
return rv
finally:
session.close()
session.remove()
self.mxDB.release()
def add_connection(self, host, port, peer_pubkey):
self.log.info('add_connection %s %d %s', host, port, peer_pubkey.hex())
self._network.add_connection(host, port, peer_pubkey)
def get_network_info(self):
if not self._network:
return {'Error': 'Not Initialised'}
return self._network.get_info()
def lookupRates(self, coin_from, coin_to):
self.log.debug('lookupRates {}, {}'.format(coin_from, coin_to))
rv = {}
ci_from = self.ci(int(coin_from))
ci_to = self.ci(int(coin_to))
headers = {'Connection': 'close'}
name_from = ci_from.chainparams()['name']
name_to = ci_to.chainparams()['name']
url = 'https://api.coingecko.com/api/v3/simple/price?ids={},{}&vs_currencies=usd'.format(name_from, name_to)
start = time.time()
req = urllib.request.Request(url, headers=headers)
js = json.loads(urllib.request.urlopen(req, timeout=10).read())
js['time_taken'] = time.time() - start
rate = float(js[name_from]['usd']) / float(js[name_to]['usd'])
js['rate_inferred'] = ci_to.format_amount(rate, conv_int=True, r=1)
rv['coingecko'] = js
ticker_from = ci_from.chainparams()['ticker']
ticker_to = ci_to.chainparams()['ticker']
if ci_from.coin_type() == Coins.BTC:
pair = '{}-{}'.format(ticker_from, ticker_to)
url = 'https://api.bittrex.com/api/v1.1/public/getticker?market=' + pair
start = time.time()
req = urllib.request.Request(url, headers=headers)
js = json.loads(urllib.request.urlopen(req, timeout=10).read())
js['time_taken'] = time.time() - start
js['pair'] = pair
try:
rate_inverted = ci_from.make_int(1.0 / float(js['result']['Last']), r=1)
js['rate_inferred'] = ci_to.format_amount(rate_inverted)
except Exception as e:
self.log.warning('lookupRates error: %s', str(e))
js['rate_inferred'] = 'error'
rv['bittrex'] = js
elif ci_to.coin_type() == Coins.BTC:
pair = '{}-{}'.format(ticker_to, ticker_from)
url = 'https://api.bittrex.com/api/v1.1/public/getticker?market=' + pair
start = time.time()
req = urllib.request.Request(url, headers=headers)
js = json.loads(urllib.request.urlopen(req, timeout=10).read())
js['time_taken'] = time.time() - start
js['pair'] = pair
js['rate_last'] = js['result']['Last']
rv['bittrex'] = js
else:
pair = 'BTC-{}'.format(ticker_from)
url = 'https://api.bittrex.com/api/v1.1/public/getticker?market=' + pair
start = time.time()
req = urllib.request.Request(url, headers=headers)
js_from = json.loads(urllib.request.urlopen(req, timeout=10).read())
js_from['time_taken'] = time.time() - start
js_from['pair'] = pair
pair = 'BTC-{}'.format(ticker_to)
url = 'https://api.bittrex.com/api/v1.1/public/getticker?market=' + pair
start = time.time()
req = urllib.request.Request(url, headers=headers)
js_to = json.loads(urllib.request.urlopen(req, timeout=10).read())
js_to['time_taken'] = time.time() - start
js_to['pair'] = pair
try:
rate_inferred = float(js_from['result']['Last']) / float(js_to['result']['Last'])
rate_inferred = ci_to.format_amount(rate, conv_int=True, r=1)
except Exception as e:
rate_inferred = 'error'
rv['bittrex'] = {'from': js_from, 'to': js_to, 'rate_inferred': rate_inferred}
return rv
|
base.py
|
import ast
import os
import time
import atexit
from logging import StreamHandler
from logging.handlers import SocketHandler
import threading
import warnings
from terra import settings
import terra.compute.utils
from terra.executor import Executor
from terra.logger import (
getLogger, LogRecordSocketReceiver, SkipStdErrAddFilter
)
logger = getLogger(__name__)
class ServiceRunFailed(Exception):
''' Exception thrown when a service runner returns non-zero
'''
class BaseService:
'''
The base class for all Terra Service definitions
``super().__init__ should`` be called when inheriting a :class:`BaseService`
class's ``__init__``
Service definitions can define a ``pre_{command}`` and ``post_{command}``
function that will be called before and after a ``{command}Service`` call,
if they exist
'''
def __init__(self):
self.env = os.environ.copy()
self.volumes = []
''' A copy of the processes environment variables local to a service '''
def _env_array(self, key):
'''
Recover array environment variables
For example, define the following in ``terra.env``
.. code-block:: bash
SOMETHING=( "hello" "there" )
array_to_python_ast_list_of_strings SOMETHING_AST "${SOMETHING[@]}"
Services can recover the environment variable as a python compatible
array via
.. code-block:: python
self._env_array('SOMETHING_AST')
'''
return ast.literal_eval(self.env[key])
def _validate_volume(self, local, remote,
check_remote=True,
local_must_exist=False):
'''
Validate volume inputs. Raise a :class:`ValueError` under any of the
following conditions:
- ``local`` is empty or None
- ``check_remote`` is True and ``remote`` is empty or None
- ``local_must_exist`` is True and ``local`` file/folder does not exist
Raises
------
ValueError
see conditions above
'''
if not local:
raise ValueError('local file/folder must be specified')
elif check_remote and not remote:
raise ValueError('remote file/folder must be specified')
elif local_must_exist and not os.path.exists(local):
raise ValueError('local file/folder does not exist {}'
.format(local))
def add_volume(self, local, remote, flags=None, prefix=None,
local_must_exist=False):
'''
Add a volume to the service
'''
self._validate_volume(local, remote, local_must_exist=local_must_exist)
self.volumes.append((local, remote))
def pre_run(self):
'''
A function that runs before the run service
All service classes should implement at least ``run_service``, as this is
the quintessential call in running a service. ``pre_run`` in
:class:`terra.compute.base.BaseService` is mainly responsible for handling
Executors that need a separate volume translation
'''
# The executor volume map is calculated on the host side, where all the
# information is available. For example if using docker and celery, then
# docker config need to be run to get the container volumes, and that has
# to be run on the host machine. So this is calculated here.
settings.executor.volume_map = Executor.configuration_map(self)
logger.debug4("Executor Volume map: %s", settings.executor.volume_map)
def post_run(self):
pass
class AlreadyRegisteredException(Exception):
'''
Exception thrown if a function has already been registered
'''
class BaseCompute:
'''
The base class for all Terra Service Compute Arches
'''
@classmethod
def register(cls, service):
'''
Used to register a function for a particular service using a specific
compute
'''
service_name = f'{service.__module__}.{service.__qualname__}'
def wrapper(impl):
if service_name not in services:
services[service_name] = {}
if cls in services[service_name]:
raise AlreadyRegisteredException(f'Service {service_name} already '
'registered')
services[service_name][cls] = impl
return impl
return wrapper
def __getattr__(self, name):
implementation = name + '_service'
# Default implementation caller
try:
# super hasattr
self.__getattribute__(implementation)
except AttributeError:
raise AttributeError(f'Compute command "{name}" does not have a service '
f'implementation "{implementation}"') from None
def defaultCommand(self, service_class, *args, **kwargs):
service_info = terra.compute.utils.load_service(service_class)
# Check and call pre_ call
pre_call = getattr(service_info, 'pre_' + name, None)
if pre_call:
pre_call(*args, **kwargs)
# Call command implementation
rv = self.__getattribute__(implementation)(
service_info, *args, **kwargs)
# Check and call post_ call
post_call = getattr(service_info, 'post_' + name, None)
if post_call:
post_call(*args, **kwargs)
return rv
defaultCommand.__doc__ = f'''The {name} command for {__class__.__qualname__}
The {name} command will call the a service's pre_{name} if it has one,
followed by the {implementation}, and then the service's post_{name} if
it has one.
Calls {implementation}''' # noqa
defaultCommand.__name__ = name
defaultCommand.__qualname__ = type(self).__qualname__ + '.' + name
# bind function and return it
return defaultCommand.__get__(self, type(self))
def get_volume_map(self, config, service_info):
return []
def run_service(self, *args, **kwargs):
'''
Place holder for code to run an instance in the compute. Runs
``create`` and then runs and returns ``start`` by default.
'''
self.create(*args, **kwargs)
return self.start(*args, **kwargs)
def configuration_map_service(self, service_info):
'''
Returns the mapping of volumes from the host to the remote
Returns
-------
list
Return a list of tuple pairs [(host, remote), ... ] of the volumes
mounted from the host to remote
'''
return service_info.volumes
@staticmethod
def configure_logger(sender, **kwargs):
if settings.terra.zone == 'controller':
# Setup log file for use in configure
if settings.logging.log_file:
os.makedirs(os.path.dirname(settings.logging.log_file), exist_ok=True)
sender._log_file = settings.logging.log_file
else:
sender._log_file = os.devnull
os.makedirs(settings.processing_dir, exist_ok=True)
sender._log_file = open(sender._log_file, 'a')
sender.main_log_handler = StreamHandler(stream=sender._log_file)
sender.root_logger.addHandler(sender.main_log_handler)
# setup the TCP socket listener
sender.tcp_logging_server = LogRecordSocketReceiver(
settings.logging.server.listen_address,
settings.logging.server.port)
listener_thread = threading.Thread(
target=sender.tcp_logging_server.serve_until_stopped)
listener_thread.setDaemon(True)
listener_thread.start()
# Wait up to a second, to make sure the thread started
for _ in range(1000):
if sender.tcp_logging_server.ready:
break
time.sleep(0.001)
else: # pragma: no cover
warnings.warn("TCP Logging server thread did not startup. "
"This is probably not a problem, unless logging isn't "
"working.", RuntimeWarning)
# Auto cleanup
@atexit.register
def cleanup_thread():
sender.tcp_logging_server.abort = 1
listener_thread.join(timeout=5)
if listener_thread.is_alive(): # pragma: no cover
warnings.warn("TCP Logger Server Thread did not shut down "
"gracefully. Attempting to exit anyways.",
RuntimeWarning)
elif settings.terra.zone == 'runner':
sender.main_log_handler = SocketHandler(
settings.logging.server.hostname, settings.logging.server.port)
# By default, all runners have access to the master controllers stderr,
# so there is no need for the master controller to echo out the log
# messages a second time.
sender.main_log_handler.addFilter(SkipStdErrAddFilter())
sender.root_logger.addHandler(sender.main_log_handler)
@staticmethod
def reconfigure_logger(sender, **kwargs):
# sender is logger in this case
#
# The default logging handler is a StreamHandler. This will reconfigure its
# output stream
if settings.terra.zone == 'controller':
if settings.logging.log_file:
os.makedirs(os.path.dirname(settings.logging.log_file), exist_ok=True)
log_file = settings.logging.log_file
else:
log_file = os.devnull
# Check to see if _log_file is unset. If it is, this is due to _log_file
# being called without configure being called. While it is not important
# this work, it's more likely for unit testsing
# if not os.path.samefile(log_file, sender._log_file.name):
if getattr(sender, '_log_file', None) is not None and \
log_file != sender._log_file.name:
os.makedirs(settings.processing_dir, exist_ok=True)
sender._log_file.close()
sender._log_file = open(log_file, 'a')
elif settings.terra.zone == 'runner':
# Only if it's changed
if settings.logging.server.hostname != sender.main_log_handler.host or \
settings.logging.server.port != sender.main_log_handler.port:
# Reconnect Socket Handler
sender.main_log_handler.close()
try:
sender.root_logger.removeHandler(sender.main_log_handler)
except ValueError: # pragma: no cover
pass
sender.main_log_handler = SocketHandler(
settings.logging.server.hostname, settings.logging.server.port)
sender.root_logger.addHandler(sender.main_log_handler)
services = {}
|
activity.py
|
# -*- coding: utf-8 -*-
"""
Activity
========
Activities are self generated classes to which you can pass an identifier,
and a list of tasks to perform. The activities are in between the decider and
the tasks.
For ease, two types of task runners are available: Sync and Async. If
you need something more specific, you should either create your own runner, or
you should create a main task that will then split the work.
Create an activity::
import boto3
from garcon import activity
# First step is to create the workflow on a specific domain.
client = boto3.client('swf')
create = activity.create(client, 'domain', 'workflow-name')
initial_activity = create(
# Name of your activity
name='activity_name',
# List of tasks to run (here we use the Sync runner)
run=runner.Sync(task1),
# No requires since it's the first one. Later in your flow, if you have
# a dependency, just use the variable that contains the activity.
requires=[],
# If the activity fails, number of times you want to retry.
retry=0,
# If you want to run the activity `n` times, you can use a generator.
generator=[generator_name])
"""
from botocore import exceptions
import itertools
import json
import threading
import backoff
from garcon import log
from garcon import utils
from garcon import runner
ACTIVITY_STANDBY = 0
ACTIVITY_SCHEDULED = 1
ACTIVITY_COMPLETED = 2
ACTIVITY_FAILED = 3
DEFAULT_ACTIVITY_SCHEDULE_TO_START = 600 # 10 minutes
class ActivityInstanceNotReadyException(Exception):
"""Exception when an activity instance is not ready.
Activity instances that are considered not ready are instances that have
not completed.
"""
pass
class ActivityInstance:
def __init__(
self, activity_worker, local_context=None, execution_context=None):
"""Activity Instance.
In SWF, Activity is a worker: it will get information from the context,
and will launch activity instances (only one, unless you have a
generator.) The activity instance generates its key (visible in the SWF
console) from the local context. Activity instances are owned by an
execution.
Args:
activity_worker (ActivityWorker): The activity worker that owns
this specific Activity Instance.
local_context (dict): the local context of the activity (it does
not include the execution context.) Most times the context will
be empty since it is only filled with data that comes from the
generators.
execution_context (dict): the execution context of when an activity
will be scheduled with.
"""
self.activity_worker = activity_worker
self.execution_context = execution_context or dict()
self.local_context = local_context or dict()
self.global_context = dict(
list(self.execution_context.items()) +
list(self.local_context.items()))
@property
def activity_name(self):
"""Return the activity name of the worker.
"""
return self.activity_worker.name
@property
def retry(self):
"""Return the number of retries allowed (matches the worker.)
"""
return self.activity_worker.retry
@property
def id(self):
"""Generate the id of the activity.
The id is crutial (not just important): it allows to indentify the
state the activity instance in the event history (if it has failed,
been executed, or marked as completed.)
Return:
str: composed of the activity name (task list), and the activity
id.
"""
if not self.local_context:
activity_id = 1
else:
activity_id = utils.create_dictionary_key(self.local_context)
return '{name}-{id}'.format(
name=self.activity_name,
id=activity_id)
@property
def schedule_to_start(self):
"""Return the schedule to start timeout.
The schedule to start timeout assumes that only one activity worker is
available (since swf does not provide a count of available workers). So
if the default value is 5 minutes, and you have 10 instances: the
schedule to start will be 50 minutes for all instances.
Return:
int: Schedule to start timeout.
"""
return (
self.activity_worker.pool_size *
self.activity_worker.schedule_to_start_timeout)
@property
def schedule_to_close(self):
"""Return the schedule to close timeout.
The schedule to close timeout is a simple calculation that defines when
an activity (from the moment it has been scheduled) should end. It is
a calculation between the schedule to start timeout and the activity
timeout.
Return:
int: Schedule to close timeout.
"""
return self.schedule_to_start + self.timeout
@property
def timeout(self):
"""Return the timeout in seconds.
This timeout corresponds on when the activity has started and when we
assume the activity has ended (which corresponds in boto to
start_to_close_timeout.)
Return:
int: Task list timeout.
"""
return self.runner.timeout(self.global_context)
@property
def heartbeat_timeout(self):
"""Return the heartbeat in seconds.
This heartbeat corresponds on when an activity needs to send a signal
to swf that it is still running. This will set the value when the
activity is scheduled.
Return:
int: Task list timeout.
"""
return self.runner.heartbeat(self.global_context)
@property
def runner(self):
"""Shortcut to get access to the runner.
Raises:
runner.RunnerMissing: an activity should always have a runner,
if the runner is missing an exception is raised (we will not
be able to calculate values such as timeouts without a runner.)
Return:
Runner: the activity runner.
"""
activity_runner = getattr(self.activity_worker, 'runner', None)
if not activity_runner:
raise runner.RunnerMissing()
return activity_runner
def create_execution_input(self):
"""Create the input of the activity from the context.
AWS has a limit on the number of characters that can be used (32k). If
you use the `task.decorate`, the data sent to the activity is optimized
to match the values of the context as well as the execution context.
Return:
dict: the input to send to the activity.
"""
activity_input = dict()
try:
for requirement in self.runner.requirements(self.global_context):
value = self.global_context.get(requirement)
if value is not None:
activity_input.update({requirement: value})
activity_input.update({
'execution.domain': self.global_context.get('execution.domain'),
'execution.run_id': self.global_context.get('execution.run_id'),
'execution.workflow_id': self.global_context.get(
'execution.workflow_id')
})
except runner.NoRunnerRequirementsFound:
return self.global_context
return activity_input
class Activity(log.GarconLogger):
version = '1.0'
task_list = None
def __init__(self, client):
"""Instantiates an activity.
Args:
client: the boto client used for this activity.
"""
self.client = client
self.name = None
self.domain = None
self.task_list = None
@backoff.on_exception(
backoff.expo,
exceptions.ClientError,
max_tries=5,
giveup=utils.non_throttle_error,
on_backoff=utils.throttle_backoff_handler,
jitter=backoff.full_jitter)
def poll_for_activity(self, identity=None):
"""Runs Activity Poll.
If a SWF throttling exception is raised during a poll, the poll will
be retried up to 5 times using exponential backoff algorithm.
Upgrading to boto3 would make this retry logic redundant.
Args:
identity (str): Identity of the worker making the request, which
is recorded in the ActivityTaskStarted event in the AWS
console. This enables diagnostic tracing when problems arise.
Return:
ActivityExecution: activity execution.
"""
additional_params = {}
if identity:
additional_params.update(identity=identity)
execution_definition = self.client.poll_for_activity_task(
domain=self.domain, taskList=dict(name=self.task_list),
**additional_params)
return ActivityExecution(
self.client, execution_definition.get('activityId'),
execution_definition.get('taskToken'),
execution_definition.get('input'))
def run(self, identity=None):
"""Activity Runner.
Information is being pulled down from SWF and it checks if the Activity
can be ran. As part of the information provided, the input of the
previous activity is consumed (context).
Args:
identity (str): Identity of the worker making the request, which
is recorded in the ActivityTaskStarted event in the AWS
console. This enables diagnostic tracing when problems arise.
"""
try:
if identity:
self.logger.debug('Polling with {}'.format(identity))
execution = self.poll_for_activity(identity)
except Exception as error:
# Catch exceptions raised during poll() to avoid an Activity thread
# dying & worker daemon unable to process the affected Activity.
# AWS api limits on SWF calls are a common source of such
# exceptions (see https://github.com/xethorn/garcon/pull/75)
# on_exception() can be overriden by the flow to send an alert
# when an exception occurs.
if self.on_exception:
self.on_exception(self, error)
self.logger.error(error, exc_info=True)
return True
self.set_log_context(execution.context)
if execution.activity_id:
try:
context = self.execute_activity(execution)
execution.complete(context)
except Exception as error:
# If the workflow has been stopped, it is not possible for the
# activity to be updated – it throws an exception which stops
# the worker immediately.
try:
execution.fail(str(error)[:255])
if self.on_exception:
self.on_exception(self, error)
except Exception as error2: # noqa: E722
if self.on_exception:
self.on_exception(self, error2)
self.unset_log_context()
return True
def execute_activity(self, activity):
"""Execute the runner.
Args:
execution (ActivityExecution): the activity execution.
Return:
dict: The result of the operation.
"""
return self.runner.execute(activity, activity.context)
def hydrate(self, data):
"""Hydrate the task with information provided.
Args:
data (dict): the data to use (if defined.)
"""
self.pool_size = 0
self.version = data.get('version') or self.version
self.name = self.name or data.get('name')
self.domain = getattr(self, 'domain', '') or data.get('domain')
self.requires = getattr(self, 'requires', []) or data.get('requires')
self.retry = getattr(self, 'retry', None) or data.get('retry', 0)
self.task_list = self.task_list or data.get('task_list')
self.on_exception = (
getattr(self, 'on_exception', None) or data.get('on_exception'))
# The start timeout is how long it will take between the scheduling
# of the activity and the start of the activity.
self.schedule_to_start_timeout = (
getattr(self, 'schedule_to_start_timeout', None) or
data.get('schedule_to_start') or
DEFAULT_ACTIVITY_SCHEDULE_TO_START)
# The previous way to create an activity was to fill a `tasks` param,
# which is not `run`.
self.runner = (
getattr(self, 'runner', None) or
data.get('run') or data.get('tasks'))
self.generators = getattr(
self, 'generators', None) or data.get('generators')
def instances(self, context):
"""Get all instances for one activity based on the current context.
There are two scenarios: when the activity worker has a generator and
when it does not. When it doesn't (the most simple case), there will
always be one instance returned.
Generators will however consume the context to calculate how many
instances of the activity are needed – and it will generate them
(regardless of their state.)
Args:
context (dict): the current context.
Return:
list: all the instances of the activity (for a current workflow
execution.)
"""
if not self.generators:
self.pool_size = 1
yield ActivityInstance(self, execution_context=context)
return
generator_values = []
for generator in self.generators:
generator_values.append(generator(context))
contexts = list(itertools.product(*generator_values))
self.pool_size = len(contexts)
for generator_contexts in contexts:
# Each generator returns a context, merge all the contexts
# to only be one - which can be used to 1/ create the id of the
# activity and 2/ be passed as a local context.
instance_context = dict()
for current_generator_context in generator_contexts:
instance_context.update(current_generator_context.items())
yield ActivityInstance(
self, execution_context=context,
local_context=instance_context)
class ExternalActivity(Activity):
"""External activity
One of the main advantages of SWF is the ability to write a workflow that
has activities written in any languages. The external activity class allows
to write the workflow in Garcon and benefit from some features (timeout
calculation among other things, sending context data.)
"""
def __init__(self, timeout=None, heartbeat=None):
"""Create the External Activity.
Args:
timeout (int): activity timeout in seconds (mandatory)
heartbeat (int): heartbeat timeout in seconds, if not defined, it
will be equal to the timeout.
"""
Activity.__init__(self, client=None)
self.runner = runner.External(timeout=timeout, heartbeat=heartbeat)
def run(self):
"""Run the external activity.
This activity is handled outside, so the run method should remain
unimplemented and return False (so the run loop stops.)
"""
return False
class ActivityExecution:
def __init__(self, client, activity_id, task_token, context):
"""Create an an activity execution.
Args:
client (boto3.client): the boto client (for easy access if needed).
activity_id (str): the activity id.
task_token (str): the task token.
context (str): data for the execution.
"""
self.client = client
self.activity_id = activity_id
self.task_token = task_token
self.context = context and json.loads(context) or dict()
def heartbeat(self, details=None):
"""Create a task heartbeat.
Args:
details (str): details to add to the heartbeat.
"""
self.client.record_activity_task_heartbeat(self.task_token,
details=details or '')
def fail(self, reason=None):
"""Mark the activity execution as failed.
Args:
reason (str): optional reason for the failure.
"""
self.client.respond_activity_task_failed(
taskToken=self.task_token,
reason=reason or '')
def complete(self, context=None):
"""Mark the activity execution as completed.
Args:
context (str or dict): the context result of the operation.
"""
self.client.respond_activity_task_completed(
taskToken=self.task_token,
result=json.dumps(context))
class ActivityWorker():
def __init__(self, flow, activities=None):
"""Initiate an activity worker.
The activity worker take in consideration all the activities from a
flow, or specific activities. Some activities (tasks) might require
more power than others, and be then launched on different machines.
If a list of activities is passed, the worker will be focused on
completing those and will ignore all the others.
Args:
flow (module): the flow module.
activities (list): the list of activities that this worker should
handle.
"""
self.flow = flow
self.activities = find_workflow_activities(self.flow)
self.worker_activities = activities
def run(self):
"""Run the activities.
"""
for activity in self.activities:
if (self.worker_activities and
activity.name not in self.worker_activities):
continue
threading.Thread(target=worker_runner, args=(activity,)).start()
class ActivityState:
"""
Activity State
==============
Provides information about a specific activity instance state (if the
instance is already scheduled, has failed, or has been completed.) Along
with the default values, this class also provides additional metadata such
as the result of an activity instance.
"""
def __init__(self, activity_id):
"""Create a State.
Args:
activity_id (str): the activity id.
"""
self.activity_id = activity_id
self._result = None
self.states = []
@property
def result(self):
"""Get the result.
"""
if not self.ready:
raise ActivityInstanceNotReadyException()
return self._result
@property
def ready(self):
"""Check if an activity is ready.
"""
return self.get_last_state() == ACTIVITY_COMPLETED
def get_last_state(self):
"""Get the last state of the activity execution.
Return:
int: the state of the activity (see: activity.py)
"""
if len(self.states):
return self.states[-1]
return None
def add_state(self, state):
"""Add a state in the activity execution.
Args:
state (int): the state of the activity to add (see activity.py)
"""
self.states.append(state)
def set_result(self, result):
"""Set the result of the activity.
This method sometimes throws an exception: an activity id can only have
one result.
Args:
result (dict): Result of the activity.
"""
if self._result:
raise Exception('Result is ummutable – it should not be changed.')
self._result = result
def wait(self):
"""Wait until ready.
"""
if not self.ready:
raise ActivityInstanceNotReadyException()
def worker_runner(worker):
"""Run indefinitely the worker.
Args:
worker (object): the Activity worker.
"""
while (worker.run()):
continue
def create(client, domain, workflow_name, version='1.0', on_exception=None):
"""Helper method to create Activities.
The helper method simplifies the creation of an activity by setting the
domain, the task list, and the activity dependencies (what other
activities) need to be completed before this one can run.
Note:
The task list is generated based on the domain and the name of the
activity. Always make sure your activity name is unique.
Args:
client (boto3.client): the boto3 client.
domain (str): the domain name.
workflow_name (str): workflow name.
version (str): activity version.
on_exception (callable): the error handler.
Return:
callable: activity generator.
"""
def wrapper(**options):
activity = Activity(client)
if options.get('external'):
activity = ExternalActivity(
timeout=options.get('timeout'),
heartbeat=options.get('heartbeat'))
activity_name = '{name}_{activity}'.format(
name=workflow_name,
activity=options.get('name'))
activity.hydrate(dict(
domain=domain,
version=version,
name=activity_name,
generators=options.get('generators', []),
requires=options.get('requires', []),
retry=options.get('retry'),
task_list=activity_name,
tasks=options.get('tasks'),
run=options.get('run'),
schedule_to_start=options.get('schedule_to_start'),
on_exception=options.get('on_exception') or on_exception))
return activity
return wrapper
def find_available_activities(flow, history, context):
"""Find all available activity instances of a flow.
The history contains all the information of our activities (their state).
This method focuses on finding all the activities that need to run.
Args:
flow (module): the flow module.
history (dict): the history information.
context (dict): from the context find the available activities.
"""
for instance in find_activities(flow, context):
# If an event is already available for the activity, it means it is
# not in standby anymore, it's either processing or has been completed.
# The activity is thus not available anymore.
states = history.get(instance.activity_name, {}).get(instance.id)
if states:
if states.get_last_state() != ACTIVITY_FAILED:
continue
elif (not instance.retry or
instance.retry < count_activity_failures(states)):
raise Exception(
'The activity failures has exceeded its retry limit.')
can_yield = True
for requirement in instance.activity_worker.requires:
require_history = history.get(requirement.name)
if not require_history:
can_yield = False
break
for requirement_states in require_history.values():
if ACTIVITY_COMPLETED not in requirement_states.states:
can_yield = False
break
if can_yield:
yield instance
def find_uncomplete_activities(flow, history, context):
"""Find uncomplete activity instances.
Uncomplete activities are all the activities that are not marked as
completed.
Args:
flow (module): the flow module.
history (dict): the history information.
context (dict): from the context find the available activities.
Yield:
activity: The available activity.
"""
for instance in find_activities(flow, context):
states = history.get(instance.activity_name, {}).get(instance.id)
if not states or ACTIVITY_COMPLETED not in states.states:
yield instance
def find_workflow_activities(flow):
"""Retrieves all the activities from a flow
Args:
flow (module): the flow module.
Return:
list: all the activities.
"""
activities = []
for module_attribute in dir(flow):
current_activity = getattr(flow, module_attribute)
if isinstance(current_activity, Activity):
activities.append(current_activity)
return activities
def find_activities(flow, context):
"""Retrieves all the activities from a flow.
Args:
flow (module): the flow module.
Return:
list: All the activity instances for the flow.
"""
activities = []
for module_attribute in dir(flow):
current_activity = getattr(flow, module_attribute)
if isinstance(current_activity, Activity):
for activity_instance in current_activity.instances(context):
activities.append(activity_instance)
return activities
def count_activity_failures(states):
"""Count the number of times an activity has failed.
Args:
states (dict): list of activity states.
Return:
int: The number of times an activity has failed.
"""
return len([evt for evt in states.states if evt == ACTIVITY_FAILED])
|
tree-height.py
|
# python3
import sys
import threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.parent = list(map(int, sys.stdin.readline().split()))
self.nodes = {}
for i in range(self.n):
self.nodes[i] = []
for i in range(self.n):
if self.parent[i] == -1:
pass
else:
self.nodes[self.parent[i]] += [i]
def compute_height(self):
root = None
try:
root = self.parent.index(-1)
except ValueError:
return 0
queue = []
queue.append(root)
height = 0
while True:
node_count = len(queue)
if node_count == 0:
return height
height += 1
while node_count > 0:
node = queue[0]
queue.pop(0)
if self.nodes[node]:
for v in self.nodes[node]:
queue.append(v)
node_count -= 1
def main():
tree = TreeHeight()
tree.read()
print(tree.compute_height())
threading.Thread(target=main).start()
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import warnings
import errno
from test import support
from test.support import TESTFN, run_unittest, unlink
from io import BytesIO
from io import StringIO
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_issue_8594(self):
# XXX - this test is supposed to be removed in next major Python
# version
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
self.assertRaisesRegex(AttributeError, 'dispatcher instance',
getattr, d, 'foo')
# cheap inheritance with the underlying socket is supposed
# to still work but a DeprecationWarning is expected
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
family = d.family
self.assertEqual(family, socket.AF_INET)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertIn("unknown error", err.lower())
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join()
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class TCPServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, handler=BaseTestHandler, host=HOST, port=0):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()[:2]
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, address):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI(unittest.TestCase):
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((HOST, 0))
self.listen(5)
self.address = self.socket.getsockname()[:2]
def handle_accept(self):
self.flag = True
server = TestListener()
client = BaseClient(server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((HOST, 0))
self.listen(5)
self.address = self.socket.getsockname()[:2]
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener()
client = BaseClient(server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
class TestClient(BaseClient):
def handle_expt(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = TCPServer()
client = BaseClient(server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, socket.AF_INET)
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
self.assertEqual(s.socket.type, socket.SOCK_STREAM | SOCK_NONBLOCK)
def test_bind(self):
s1 = asyncore.dispatcher()
s1.create_socket(socket.AF_INET, socket.SOCK_STREAM)
s1.bind((HOST, 0))
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (HOST, port))
def test_set_reuse_addr(self):
sock = socket.socket()
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket())
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(socket.AF_INET, socket.SOCK_STREAM)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
class TestAPI_UseSelect(BaseTestAPI):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UsePoll(BaseTestAPI):
use_poll = True
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll, TestAPI_UseSelect,
TestAPI_UsePoll, FileWrapperTest]
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
dvrk_mtm_test.py
|
#!/usr/bin/env python
# Author: Anton Deguet
# Date: 2017-07-22
# (C) Copyright 2017-2022 Johns Hopkins University (JHU), All Rights Reserved.
# --- begin cisst license - do not edit ---
# This software is provided "as is" under an open source license, with
# no warranty. The complete license can be found in license.txt and
# http://www.cisst.org/cisst/license.txt.
# --- end cisst license ---
# Start a single arm using
# > rosrun dvrk_robot dvrk_console_json -j <console-file>
# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py:
# > rosrun dvrk_python dvrk_arm_test.py <arm-name>
import argparse
import sys
import time
import threading
import rclpy
import dvrk
import math
import numpy
import PyKDL
from sensor_msgs.msg import Joy
# example of application using arm.py
class example_application:
# configuration
def configure(self, node, expected_interval):
print('configuring dvrk_mtm_test for node %s using namespace %s' % (node.get_name(), node.get_namespace()))
self.expected_interval = expected_interval
self.arm = dvrk.mtm(arm_name = node.get_namespace(),
ros_node = node,
expected_interval = expected_interval)
self.coag_event = threading.Event()
node.create_subscription(Joy, '/footpedals/coag',
self.coag_event_cb, 10)
# homing example
def home(self):
print('starting enable')
if not self.arm.enable(10):
sys.exit('failed to enable within 10 seconds')
print('starting home')
if not self.arm.home(10):
sys.exit('failed to home within 10 seconds')
# get current joints just to set size
print('move to starting position')
goal = numpy.copy(self.arm.setpoint_jp())
# go to zero position, make sure 3rd joint is past cannula
goal.fill(0)
self.arm.move_jp(goal).wait()
# foot pedal callback
def coag_event_cb(self, data):
if data.buttons[0] == 1:
self.coag_event.set()
# wait for foot pedal
def wait_for_coag(self):
self.coag_event.clear()
self.coag_event.wait(100000)
# tests
def tests(self):
# turn on gravity compensation
self.arm.use_gravity_compensation(True)
print('press COAG pedal to move to the next test')
print('arm will go limp, hold it and press coag')
self.wait_for_coag()
self.arm.body.servo_cf(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
print('keep holding arm, press coag, a force in body frame will be applied (direction depends on wrist orientation)')
self.wait_for_coag()
self.arm.body_set_cf_orientation_absolute(False)
self.arm.body.servo_cf(numpy.array([0.0, 0.0, -3.0, 0.0, 0.0, 0.0]))
print('keep holding arm, press coag, a force in world frame will be applied (fixed direction)')
self.wait_for_coag()
self.arm.body_set_cf_orientation_absolute(True)
self.arm.body.servo_cf(numpy.array([0.0, 0.0, -3.0, 0.0, 0.0, 0.0]))
print('keep holding arm, press coag, orientation will be locked')
self.wait_for_coag()
self.arm.lock_orientation_as_is()
print('keep holding arm, press coag, force will be removed')
self.wait_for_coag()
self.arm.body.servo_cf(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
print('keep holding arm, press coag, orientation will be unlocked')
self.wait_for_coag()
self.arm.unlock_orientation()
print('keep holding arm, press coag, arm will freeze in position')
self.wait_for_coag()
self.arm.move_jp(self.arm.measured_jp()).wait()
print('press coag to end')
self.wait_for_coag()
# main method
def run(self):
self.home()
self.tests()
if __name__ == '__main__':
# ros init node so we can use default ros arguments (e.g. __ns:= for namespace)
rclpy.init(args = sys.argv)
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--arm', type=str, required=True,
choices=['MTML', 'MTMR'],
help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace')
parser.add_argument('-i', '--interval', type=float, default=0.01,
help = 'expected interval in seconds between messages sent by the device')
args = parser.parse_args(sys.argv[1:]) # skip argv[0], script name
node = rclpy.create_node('dvrk_mtm_test', namespace = args.arm)
application = example_application()
application.configure(node, args.interval)
executor = rclpy.executors.MultiThreadedExecutor()
executor.add_node(node)
executor_thread = threading.Thread(target = executor.spin, daemon = True)
executor_thread.start()
try:
application.run()
except KeyboardInterrupt:
pass
print('stopping ROS thread')
rclpy.shutdown()
executor_thread.join()
node.destroy_node()
|
context.py
|
import argparse
import atexit
import datetime
import logging
import os
import shlex
import signal
import sys
import threading
import time
from typing import Dict, Optional
import requests
from monitoring.monitorlib import ids, rid, scd, versioning
from monitoring.tracer.resources import ResourceSet
ENV_OPTIONS = 'TRACER_OPTIONS'
RID_SUBSCRIPTION_ID_CODE = 'tracer RID Subscription'
SCD_SUBSCRIPTION_ID_CODE = 'tracer SCD Subscription'
logging.basicConfig()
_logger = logging.getLogger('tracer.context')
_logger.setLevel(logging.DEBUG)
resources: Optional[ResourceSet] = None
class SubscriptionManagementError(RuntimeError):
def __init__(self, msg):
super(SubscriptionManagementError, self).__init__(msg)
def init() -> None:
if not os.environ.get(ENV_OPTIONS, None):
raise ValueError('{} environment variable must be specified'.format(ENV_OPTIONS))
parser = argparse.ArgumentParser(description="Subscribe to changes in DSS-tracked Entity status")
ResourceSet.add_arguments(parser)
parser.add_argument('--base-url', help='Base URL at which this server may be reached externally')
parser.add_argument('--monitor-rid', action='store_true', default=False, help='If specified, monitor ISA activity per the remote ID standard')
parser.add_argument('--monitor-scd', action='store_true', default=False, help='If specified, monitor Operation and Constraint activity per the strategic deconfliction standard')
args = parser.parse_args(shlex.split(os.environ[ENV_OPTIONS]))
global resources
resources = ResourceSet.from_arguments(args)
config = vars(args)
config['code_version'] = versioning.get_code_version()
resources.logger.logconfig(config)
try:
_logger.info('Establishing Subscriptions from PID {} at {}...'.format(os.getpid(), datetime.datetime.utcnow()))
_subscribe(resources, args.base_url, args.monitor_rid, args.monitor_scd)
_logger.info('Subscriptions established.')
except SubscriptionManagementError as e:
msg = 'Failed to initialize: {}'.format(e)
_logger.error(msg)
sys.stderr.write(msg)
sys.exit(-1)
cleanup = {
'lock': threading.Lock(),
'complete': False,
}
def shutdown(signal_number, stack_frame) -> None:
with cleanup['lock']:
if cleanup['complete']:
return
_logger.info('Cleaning up Subscriptions from PID {} at {}...'.format(os.getpid(), datetime.datetime.utcnow()))
_unsubscribe(resources, args.monitor_rid, args.monitor_scd)
_logger.info('Subscription cleanup complete.')
cleanup['complete'] = True
atexit.register(shutdown, None, None)
for sig in (signal.SIGABRT, signal.SIGINT, signal.SIGTERM):
signal.signal(sig, shutdown)
dt = (resources.end_time - datetime.datetime.utcnow()).total_seconds()
def terminate_at_expiration():
time.sleep(dt)
_logger.info('Terminating server at expiration of Subscription(s)')
os.kill(os.getpid(), signal.SIGINT)
threading.Thread(target=terminate_at_expiration, daemon=True).start()
def _subscribe(resources: ResourceSet, base_url: str, monitor_rid: bool, monitor_scd: bool) -> None:
if base_url.endswith('/'):
base_url = base_url[0:-1]
if monitor_rid:
_subscribe_rid(resources, base_url)
if monitor_scd:
_subscribe_scd(resources, base_url)
def _unsubscribe(resources: ResourceSet, monitor_rid: bool, monitor_scd: bool) -> None:
if monitor_rid:
_clear_existing_rid_subscription(resources, 'cleanup')
if monitor_scd:
_clear_existing_scd_subscription(resources, 'cleanup')
def _describe_response(resp: requests.Response, description: str) -> Dict:
info = {
'description': description,
'url': resp.url,
'code': resp.status_code,
}
try:
info['json'] = resp.json()
except ValueError:
info['body'] = resp.content
return info
def _rid_subscription_url():
sub_id = ids.make_id(RID_SUBSCRIPTION_ID_CODE)
return '/v1/dss/subscriptions/{}'.format(sub_id)
def _subscribe_rid(resources: ResourceSet, callback_url: str) -> None:
_clear_existing_rid_subscription(resources, 'old')
body = {
'extents': {
'spatial_volume': {
'footprint': {
'vertices': rid.vertices_from_latlng_rect(resources.area)
},
'altitude_lo': 0,
'altitude_hi': 3048,
},
'time_start': resources.start_time.strftime(rid.DATE_FORMAT),
'time_end': resources.end_time.strftime(rid.DATE_FORMAT),
},
'callbacks': {
'identification_service_area_url': callback_url
},
}
resp = resources.dss_client.put(_rid_subscription_url(), json=body, scope=rid.SCOPE_READ)
if resp.status_code != 200:
msg = 'Failed to create RID Subscription'
msg += ' -> ' + resources.logger.log_new('ridsubscription', _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
msg = 'Created RID Subscription successfully'
resources.logger.log_new('ridsubscription', _describe_response(resp, msg))
def _clear_existing_rid_subscription(resources: ResourceSet, suffix: str):
url = _rid_subscription_url()
resp = resources.dss_client.get(url, scope=rid.SCOPE_READ)
if resp.status_code == 404:
return # This is the expected condition (no pre-existing Subscription)
elif resp.status_code == 200:
# There is a pre-existing Subscription; delete it
try:
resp_json = resp.json()
except ValueError:
msg = 'Response to get existing RID Subscription did not return valid JSON'
msg += ' -> ' + resources.logger.log_new('ridsubscription_{}'.format(suffix), _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
version = resp_json.get('subscription', {}).get('version', None)
if not version:
msg = 'Response to get existing RID Subscription did not include a version'
msg += ' -> ' + resources.logger.log_new('ridsubscription_old', _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
resources.logger.log_new('ridsubscription_{}'.format(suffix), _describe_response(resp, 'RID Subscription retrieved successfully'))
del_url = url + '/{}'.format(resp_json['subscription']['version'])
resp = resources.dss_client.delete(del_url, scope=rid.SCOPE_READ)
if resp.status_code != 200:
msg = 'Response to delete existing RID Subscription indicated {}'.format(resp.status_code)
msg += ' -> ' + resources.logger.log_new('ridsubscription_{}_del'.format(suffix), _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
resources.logger.log_new('ridsubscription_{}_del'.format(suffix), _describe_response(resp, 'RID Subscription deleted successfully'))
else:
# We expected to get a 200 or 404 but got something else instead
msg = 'Response to get existing RID Subscription did not return 200 or 404'
msg += ' -> ' + resources.logger.log_new('ridsubscription_{}'.format(suffix), _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
def _scd_subscription_url():
sub_id = ids.make_id(SCD_SUBSCRIPTION_ID_CODE)
return '/dss/v1/subscriptions/{}'.format(sub_id)
def _subscribe_scd(resources: ResourceSet, base_url: str) -> None:
_clear_existing_scd_subscription(resources, 'old')
body = {
'extents': scd.make_vol4(
resources.start_time, resources.end_time, 0, 3048,
polygon=scd.make_polygon(latlngrect=resources.area)),
'old_version': 0,
'uss_base_url': base_url,
'notify_for_operations': True,
'notify_for_constraints': True,
}
resp = resources.dss_client.put(_scd_subscription_url(), json=body, scope=scd.SCOPE_SC)
if resp.status_code != 200:
msg = 'Failed to create SCD Subscription'
msg += ' -> ' + resources.logger.log_new('scdsubscription', _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
msg = 'Created SCD Subscription successfully'
resources.logger.log_new('scdsubscription', _describe_response(resp, msg))
def _clear_existing_scd_subscription(resources: ResourceSet, suffix: str):
url = _scd_subscription_url()
resp = resources.dss_client.get(url, scope=scd.SCOPE_SC)
if resp.status_code == 404:
return # This is the expected condition (no pre-existing Subscription)
elif resp.status_code == 200:
# There is a pre-existing Subscription; delete it
try:
resp_json = resp.json()
except ValueError:
msg = 'Response to get existing SCD Subscription did not return valid JSON'
msg += ' -> ' + resources.logger.log_new('scdsubscription_{}'.format(suffix), _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
version = resp_json.get('subscription', {}).get('version', None)
if version is None:
msg = 'Response to get existing SCD Subscription did not include a version'
msg += ' -> ' + resources.logger.log_new('scdsubscription_old', _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
resources.logger.log_new('scdsubscription_{}'.format(suffix), _describe_response(resp, 'SCD Subscription retrieved successfully'))
resp = resources.dss_client.delete(url, scope=scd.SCOPE_SC)
if resp.status_code != 200:
msg = 'Response to delete existing SCD Subscription indicated {}'.format(resp.status_code)
msg += ' -> ' + resources.logger.log_new('scdsubscription_{}_del'.format(suffix), _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
resources.logger.log_new('scdsubscription_{}_del'.format(suffix), _describe_response(resp, 'SCD Subscription deleted successfully'))
else:
# We expected to get a 200 or 404 but got something else instead
msg = 'Response to get existing SCD Subscription did not return 200 or 404'
msg += ' -> ' + resources.logger.log_new('scdsubscription_{}'.format(suffix), _describe_response(resp, msg))
raise SubscriptionManagementError(msg)
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2021 Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (8, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
IS_PY2 = sys.version_info[0] == 2
CAN_CALL_CPUID_IN_SUBPROCESS = True
g_trace = None
class Trace(object):
def __init__(self, is_active, is_stored_in_string):
self._is_active = is_active
if not self._is_active:
return
from datetime import datetime
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
if is_stored_in_string:
self._output = StringIO()
else:
date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w')
self._stdout = StringIO()
self._stderr = StringIO()
self._err = None
def header(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
self._output.write("{0} ({1} {2})\n".format(msg, file, line))
self._output.flush()
def success(self):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
self._output.write("Success ... ({0} {1})\n\n".format(file, line))
self._output.flush()
def fail(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
if isinstance(msg, str):
msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n'
self._output.write(msg)
self._output.write("Failed ... ({0} {1})\n\n".format(file, line))
self._output.flush()
elif isinstance(msg, Exception):
from traceback import format_exc
err_string = format_exc()
self._output.write("\tFailed ... ({0} {1})\n".format(file, line))
self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n')
self._output.flush()
def command_header(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[3]
file = frame[1]
line = frame[2]
self._output.write("\t{0} ({1} {2})\n".format(msg, file, line))
self._output.flush()
def command_output(self, msg, output):
if not self._is_active: return
self._output.write("\t\t{0}\n".format(msg))
self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n')
self._output.flush()
def keys(self, keys, info, new_info):
if not self._is_active: return
from inspect import stack
frame = stack()[2]
file = frame[1]
line = frame[2]
# List updated keys
self._output.write("\tChanged keys ({0} {1})\n".format(file, line))
changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]]
if changed_keys:
for key in changed_keys:
self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key]))
else:
self._output.write('\t\tNone\n')
# List new keys
self._output.write("\tNew keys ({0} {1})\n".format(file, line))
new_keys = [key for key in keys if key in new_info and key not in info]
if new_keys:
for key in new_keys:
self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key]))
else:
self._output.write('\t\tNone\n')
self._output.write('\n')
self._output.flush()
def write(self, msg):
if not self._is_active: return
self._output.write(msg + '\n')
self._output.flush()
def to_dict(self, info, is_fail):
return {
'output' : self._output.getvalue(),
'stdout' : self._stdout.getvalue(),
'stderr' : self._stderr.getvalue(),
'info' : info,
'err' : self._err,
'is_fail' : is_fail
}
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
is_beos = 'beos' in uname or 'haiku' in uname
return is_beos and len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString")
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier")
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE")
return arch_string_raw
@staticmethod
def winreg_hz_actual():
hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz")
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet")
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
p1, p2, stdout_output, stderr_output = None, None, None, None
g_trace.command_header('Running command "' + ' '.join(command) + '" ...')
# Run the command normally
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
# Run the command and pipe it into another command
else:
p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE)
p2.stdout.close()
# Get the stdout and stderr
stdout_output, stderr_output = p1.communicate()
if not IS_PY2:
stdout_output = stdout_output.decode(encoding='UTF-8')
stderr_output = stderr_output.decode(encoding='UTF-8')
# Send the result to the logger
g_trace.command_output('return code:', str(p1.returncode))
g_trace.command_output('stdout:', stdout_output)
# Return the return code and stdout
return p1.returncode, stdout_output
def _read_windows_registry_key(key_name, field_name):
g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name))
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
value = winreg.QueryValueEx(key, field_name)[0]
winreg.CloseKey(key)
g_trace.command_output('value:', str(value))
return value
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8',
'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64',
'RISCV_32', 'RISCV_64', 'LOONG_32', 'LOONG_64']:
raise Exception("py-cpuinfo currently only works on X86 "
"and some ARM/PPC/S390X/MIPS/RISCV/LOONG CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
g_trace.keys(keys, info, new_info)
# Update the keys with new values
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Sometimes ',' is used as a decimal separator
ticks = ticks.replace(',', '.')
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _friendly_bytes_to_int(friendly_bytes):
input = friendly_bytes.lower()
formats = {
'gb' : 1024 * 1024 * 1024,
'mb' : 1024 * 1024,
'kb' : 1024,
'g' : 1024 * 1024 * 1024,
'm' : 1024 * 1024,
'k' : 1024,
'b' : 1,
}
try:
for pattern, multiplier in formats.items():
if input.endswith(pattern):
return int(input.split(pattern)[0].strip()) * multiplier
except Exception as err:
pass
return friendly_bytes
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer(r"\(", cpu_string)]
ends = [m.start() for m in re.finditer(r"\)", cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except Exception as err:
g_trace.fail(err)
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match(r'^armv8-a|aarch64|arm64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match(r'^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
# S390X
elif re.match(r'^s390x$', arch_string_raw):
arch = 'S390X'
bits = 64
# MIPS
elif re.match('^mips$', arch_string_raw):
arch = 'MIPS_32'
bits = 32
elif re.match('^mips64$', arch_string_raw):
arch = 'MIPS_64'
bits = 64
# RISCV
elif re.match(r'^riscv$|^riscv32$|^riscv32be$', arch_string_raw):
arch = 'RISCV_32'
bits = 32
elif re.match(r'^riscv64$|^riscv64be$', arch_string_raw):
arch = 'RISCV_64'
bits = 64
# LOONG
elif re.match('^loongarch32$', arch_string_raw):
arch = 'LOONG_32'
bits = 32
elif re.match('^loongarch64$', arch_string_raw):
arch = 'LOONG_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing(trace):
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
trace.fail('Failed to find sestatus.')
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
trace.fail('Failed to run sestatus. Skipping ...')
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap)
trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory)
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _filter_dict_keys_with_empty_values(info):
# Filter out None, 0, "", (), {}, []
info = {k: v for k, v in info.items() if v}
# Filter out (0, 0)
info = {k: v for k, v in info.items() if v != (0, 0)}
# Filter out strings that start with "0.0"
info = {k: v for k, v in info.items() if not (type(v) == str and v.startswith('0.0'))}
return info
class ASM(object):
def __init__(self, restype=None, argtypes=(), machine_code=[]):
self.restype = restype
self.argtypes = argtypes
self.machine_code = machine_code
self.prochandle = None
self.mm = None
self.func = None
self.address = None
self.size = 0
def compile(self):
machine_code = bytes.join(b'', self.machine_code)
self.size = ctypes.c_size_t(len(machine_code))
if DataSource.is_windows:
# Allocate a memory segment the size of the machine code, and make it executable
size = len(machine_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not self.address:
raise Exception("Failed to VirtualAlloc")
# Copy the machine code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(self.address, machine_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC
# Allocate a private and executable memory segment the size of the machine code
machine_code = bytes.join(b'', self.machine_code)
self.size = len(machine_code)
self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC)
# Copy the machine code into the memory segment
self.mm.write(machine_code)
self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes)
self.func = functype(self.address)
def run(self):
# Call the machine code like a function
retval = self.func()
return retval
def free(self):
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE)
else:
self.mm.close()
self.prochandle = None
self.mm = None
self.func = None
self.address = None
self.size = 0
class CPUID(object):
def __init__(self, trace=None):
if trace == None:
trace = Trace(False, False)
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing(trace)
def _asm_func(self, restype=None, argtypes=(), machine_code=[]):
asm = ASM(restype, argtypes, machine_code)
asm.compile()
return asm
def _run_asm(self, *machine_code):
asm = ASM(ctypes.c_uint32, (), machine_code)
asm.compile()
retval = asm.run()
asm.free()
return retval
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping_id = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family_id = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model_id = (eax >> 16) & 0xF # 4 bits
extended_family_id = (eax >> 20) & 0xFF # 8 bits
family = 0
if family_id in [15]:
family = extended_family_id + family_id
else:
family = family_id
if family_id in [6, 15]:
model = (extended_model_id << 4) + model
return {
'stepping' : stepping_id,
'model' : model,
'family' : family,
'processor_type' : processor_type
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_b' : (ecx & 0xFF) * 1024,
'associativity' : (ecx >> 12) & 0xF,
'line_size_b' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks_func(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32 = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
# Monkey patch func to combine high and low args into one return
old_func = get_ticks_x86_32.func
def new_func():
# Pass two uint32s into function
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
old_func(ctypes.byref(high), ctypes.byref(low))
# Shift the two uint32s into one uint64
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
return retval
get_ticks_x86_32.func = new_func
retval = get_ticks_x86_32
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64 = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64
return retval
def get_raw_hz(self):
from time import sleep
ticks_fn = self.get_ticks_func()
start = ticks_fn.func()
sleep(1)
end = ticks_fn.func()
ticks = (end - start)
ticks_fn.free()
return ticks
def _get_cpu_info_from_cpuid_actual():
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
trace = Trace(True, True)
info = {}
# Pipe stdout and stderr to strings
sys.stdout = trace._stdout
sys.stderr = trace._stderr
try:
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
trace.fail('Not running on X86_32 or X86_64. Skipping ...')
return trace.to_dict(info, True)
# Return none if SE Linux is in enforcing mode
cpuid = CPUID(trace)
if cpuid.is_selinux_enforcing:
trace.fail('SELinux is enforcing. Skipping ...')
return trace.to_dict(info, True)
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : cache_info['size_b'],
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : cache_info['associativity'],
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = _filter_dict_keys_with_empty_values(info)
trace.success()
except Exception as err:
from traceback import format_exc
err_string = format_exc()
trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n'
return trace.to_dict(info, True)
return trace.to_dict(info, False)
def _get_cpu_info_from_cpuid_subprocess_wrapper(queue):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
output = _get_cpu_info_from_cpuid_actual()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
queue.put(_obj_to_b64(output))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
g_trace.header('Tying to get info from CPUID ...')
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
g_trace.fail('Can\'t CPUID. Skipping ...')
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
g_trace.fail('Not running on X86_32 or X86_64. Skipping ...')
return {}
try:
if CAN_CALL_CPUID_IN_SUBPROCESS:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
g_trace.fail('Failed to run CPUID in process. Skipping ...')
return {}
# Return {} if no results
if queue.empty():
g_trace.fail('Failed to get anything from CPUID process. Skipping ...')
return {}
# Return the result, only if there is something to read
else:
output = _b64_to_obj(queue.get())
import pprint
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(output)
if 'output' in output and output['output']:
g_trace.write(output['output'])
if 'stdout' in output and output['stdout']:
sys.stdout.write('{0}\n'.format(output['stdout']))
sys.stdout.flush()
if 'stderr' in output and output['stderr']:
sys.stderr.write('{0}\n'.format(output['stderr']))
sys.stderr.flush()
if 'is_fail' not in output:
g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...')
return {}
# Fail if there was an exception
if 'err' in output and output['err']:
g_trace.fail('Failed to run CPUID in process. Skipping ...')
g_trace.write(output['err'])
g_trace.write('Failed ...')
return {}
if 'is_fail' in output and output['is_fail']:
g_trace.write('Failed ...')
return {}
if 'info' not in output or not output['info']:
g_trace.fail('Failed to get return info from CPUID process. Skipping ...')
return {}
return output['info']
else:
# FIXME: This should write the values like in the above call to actual
orig_stdout = sys.stdout
orig_stderr = sys.stderr
output = _get_cpu_info_from_cpuid_actual()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
g_trace.success()
return output['info']
except Exception as err:
g_trace.fail(err)
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
g_trace.header('Tying to get info from /proc/cpuinfo ...')
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...')
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...')
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
if flags:
flags = flags.split()
flags.sort()
# Check for other cache format
if not cache_size:
try:
for i in range(0, 10):
name = "cache{0}".format(i)
value = _get_field(False, output, None, None, name)
if value:
value = [entry.split('=') for entry in value.split(' ')]
value = dict(value)
if 'level' in value and value['level'] == '3' and 'size' in value:
cache_size = value['size']
break
except Exception:
pass
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _friendly_bytes_to_int(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
g_trace.header('Tying to get info from cpufreq-info ...')
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
g_trace.fail('Failed to find cpufreq-info. Skipping ...')
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
g_trace.fail('Failed to run cpufreq-info. Skipping ...')
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
g_trace.header('Tying to get info from lscpu ...')
try:
if not DataSource.has_lscpu():
g_trace.fail('Failed to find lscpu. Skipping ...')
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
g_trace.fail('Failed to run lscpu. Skipping ...')
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
else:
brand = _get_field(False, output, None, None, 'Model')
if brand and not brand.isdigit():
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache')
if l2_cache_size:
info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
g_trace.header('Tying to get info from the dmesg ...')
# Just return {} if this arch has an unreliable dmesg log
arch, bits = _parse_arch(DataSource.arch_string_raw)
if arch in ['S390X']:
g_trace.fail('Running on S390X. Skipping ...')
return {}
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
g_trace.fail('Failed to find dmesg. Skipping ...')
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"dmesg -a\". Skipping ...')
return {}
info = _parse_dmesg_output(output)
g_trace.success()
return info
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
g_trace.header('Tying to get info from lsprop ...')
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
g_trace.fail('Failed to find lsprop. Skipping ...')
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...')
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...')
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...')
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...')
return {}
info = _parse_dmesg_output(output)
g_trace.success()
return info
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
g_trace.header('Tying to get info from sysctl ...')
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
g_trace.fail('Failed to find sysctl. Skipping ...')
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...')
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, int, 0, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : int(cache_size) * 1024,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
g_trace.header('Tying to get info from sysinfo version 1 ...')
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
g_trace.fail('Failed to find sysinfo. Skipping ...')
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
g_trace.header('Tying to get info from sysinfo version 2 ...')
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
g_trace.fail('Failed to find sysinfo. Skipping ...')
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
g_trace.header('Tying to get info from wmic ...')
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...')
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run wmic. Skipping ...')
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes
if l2_cache_size:
l2_cache_size = int(l2_cache_size) * 1024
l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes
if l3_cache_size:
l3_cache_size = int(l3_cache_size) * 1024
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
g_trace.header('Tying to get info from Windows registry ...')
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
g_trace.fail('Not running on Windows. Skipping ...')
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
g_trace.header('Tying to get info from kstat ...')
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
g_trace.fail('Failed to find isinfo or kstat. Skipping ...')
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...')
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...')
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_platform_uname():
g_trace.header('Tying to get info from platform.uname ...')
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
g_trace.write('!' * 80)
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
g_trace.write("python_version: {0}".format(info['python_version']))
g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version']))
g_trace.write("arch: {0}".format(info['arch']))
g_trace.write("bits: {0}".format(info['bits']))
g_trace.write("count: {0}".format(info['count']))
g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw']))
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
# FIXME: This should print stdout and stderr to trace log
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
g_trace.write('!' * 80)
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file')
args = parser.parse_args()
global g_trace
g_trace = Trace(args.trace, False)
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
g_trace = Trace(False, False)
_check_arch()
|
test_double_spend.py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# # Double Spend testing
# This test challenge the system with double spends.
import os
from uuid import uuid4
from threading import Thread
import queue
import bigchaindb_driver.exceptions
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_double_create():
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
alice = generate_keypair()
results = queue.Queue()
tx = bdb.transactions.fulfill(
bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset={'data': {'uuid': str(uuid4())}}),
private_keys=alice.private_key)
def send_and_queue(tx):
try:
bdb.transactions.send_commit(tx)
results.put('OK')
except bigchaindb_driver.exceptions.TransportError as e:
results.put('FAIL')
t1 = Thread(target=send_and_queue, args=(tx, ))
t2 = Thread(target=send_and_queue, args=(tx, ))
t1.start()
t2.start()
results = [results.get(timeout=2), results.get(timeout=2)]
assert results.count('OK') == 1
assert results.count('FAIL') == 1
|
rdma.py
|
# Windows Azure Linux Agent
#
# Copyright 2016 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handle packages and modules to enable RDMA for IB networking
"""
import os
import re
import time
import threading
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib
from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME
dapl_config_paths = [
'/etc/dat.conf',
'/etc/rdma/dat.conf',
'/usr/local/etc/dat.conf'
]
def setup_rdma_device():
logger.verbose("Parsing SharedConfig XML contents for RDMA details")
xml_doc = parse_doc(
fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME)))
if xml_doc is None:
logger.error("Could not parse SharedConfig XML document")
return
instance_elem = find(xml_doc, "Instance")
if not instance_elem:
logger.error("Could not find <Instance> in SharedConfig document")
return
rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address")
if not rdma_ipv4_addr:
logger.error(
"Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document")
return
rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress")
if not rdma_mac_addr:
logger.error(
"Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document")
return
# add colons to the MAC address (e.g. 00155D33FF1D ->
# 00:15:5D:33:FF:1D)
rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2]
for i in range(0, len(rdma_mac_addr), 2)])
logger.info("Found RDMA details. IPv4={0} MAC={1}".format(
rdma_ipv4_addr, rdma_mac_addr))
# Set up the RDMA device with collected informatino
RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start()
logger.info("RDMA: device is set up")
return
class RDMAHandler(object):
driver_module_name = 'hv_network_direct'
@staticmethod
def get_rdma_version():
"""Retrieve the firmware version information from the system.
This depends on information provided by the Linux kernel."""
driver_info_source = '/var/lib/hyperv/.kvp_pool_0'
base_kernel_err_msg = 'Kernel does not provide the necessary '
base_kernel_err_msg += 'information or the kvp daemon is not running.'
if not os.path.isfile(driver_info_source):
error_msg = 'RDMA: Source file "%s" does not exist. '
error_msg += base_kernel_err_msg
logger.error(error_msg % driver_info_source)
return
lines = open(driver_info_source).read()
if not lines:
error_msg = 'RDMA: Source file "%s" is empty. '
error_msg += base_kernel_err_msg
logger.error(error_msg % driver_info_source)
return
r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines)
if r:
NdDriverVersion = r.groups()[0]
return NdDriverVersion
else:
error_msg = 'RDMA: NdDriverVersion not found in "%s"'
logger.error(error_msg % driver_info_source)
return
@staticmethod
def is_kvp_daemon_running():
"""Look for kvp daemon names in ps -ef output and return True/False
"""
# for centos, the hypervkvpd and the hv_kvp_daemon both are ok.
# for suse, it uses hv_kvp_daemon
kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon']
exitcode, ps_out = shellutil.run_get_output("ps -ef")
if exitcode != 0:
raise Exception('RDMA: ps -ef failed: %s' % ps_out)
for n in kvp_daemon_names:
if n in ps_out:
logger.info('RDMA: kvp daemon (%s) is running' % n)
return True
else:
logger.verbose('RDMA: kvp daemon (%s) is not running' % n)
return False
def load_driver_module(self):
"""Load the kernel driver, this depends on the proper driver
to be installed with the install_driver() method"""
logger.info("RDMA: probing module '%s'" % self.driver_module_name)
result = shellutil.run('modprobe --first-time %s' % self.driver_module_name)
if result != 0:
error_msg = 'Could not load "%s" kernel module. '
error_msg += 'Run "modprobe --first-time %s" as root for more details'
logger.error(
error_msg % (self.driver_module_name, self.driver_module_name)
)
return False
logger.info('RDMA: Loaded the kernel driver successfully.')
return True
def install_driver(self):
"""Install the driver. This is distribution specific and must
be overwritten in the child implementation."""
logger.error('RDMAHandler.install_driver not implemented')
def is_driver_loaded(self):
"""Check if the network module is loaded in kernel space"""
cmd = 'lsmod | grep ^%s' % self.driver_module_name
status, loaded_modules = shellutil.run_get_output(cmd)
logger.info('RDMA: Checking if the module loaded.')
if loaded_modules:
logger.info('RDMA: module loaded.')
return True
logger.info('RDMA: module not loaded.')
return False
def reboot_system(self):
"""Reboot the system. This is required as the kernel module for
the rdma driver cannot be unloaded with rmmod"""
logger.info('RDMA: Rebooting system.')
ret = shellutil.run('shutdown -r now')
if ret != 0:
logger.error('RDMA: Failed to reboot the system')
dapl_config_paths = [
'/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']
class RDMADeviceHandler(object):
"""
Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma
interface.
"""
rdma_dev = '/dev/hvnd_rdma'
device_check_timeout_sec = 120
device_check_interval_sec = 1
ipv4_addr = None
mac_adr = None
def __init__(self, ipv4_addr, mac_addr):
self.ipv4_addr = ipv4_addr
self.mac_addr = mac_addr
def start(self):
"""
Start a thread in the background to process the RDMA tasks and returns.
"""
logger.info("RDMA: starting device processing in the background.")
threading.Thread(target=self.process).start()
def process(self):
try:
RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr)
skip_rdma_device = False
retcode,out = shellutil.run_get_output("modinfo hv_network_direct")
if retcode == 0:
version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE)
if version:
v1 = int(version.groups(0)[0])
v2 = int(version.groups(0)[1])
if v1>4 or v1==4 and v2>0:
logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later")
skip_rdma_device = True
else:
logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.")
else:
logger.warn("RDMA: failed to get module info on hv_network_direct.")
if not skip_rdma_device:
RDMADeviceHandler.wait_rdma_device(
self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.write_rdma_config_to_device(
self.rdma_dev, self.ipv4_addr, self.mac_addr)
RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr)
except Exception as e:
logger.error("RDMA: device processing failed: {0}".format(e))
@staticmethod
def update_dat_conf(paths, ipv4_addr):
"""
Looks at paths for dat.conf file and updates the ip address for the
infiniband interface.
"""
logger.info("Updating DAPL configuration file")
for f in paths:
logger.info("RDMA: trying {0}".format(f))
if not os.path.isfile(f):
logger.info(
"RDMA: DAPL config not found at {0}".format(f))
continue
logger.info("RDMA: DAPL config is at: {0}".format(f))
cfg = fileutil.read_file(f)
new_cfg = RDMADeviceHandler.replace_dat_conf_contents(
cfg, ipv4_addr)
fileutil.write_file(f, new_cfg)
logger.info("RDMA: DAPL configuration is updated")
return
raise Exception("RDMA: DAPL configuration file not found at predefined paths")
@staticmethod
def replace_dat_conf_contents(cfg, ipv4_addr):
old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\""
new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format(
ipv4_addr)
return re.sub(old, new, cfg)
@staticmethod
def write_rdma_config_to_device(path, ipv4_addr, mac_addr):
data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr)
logger.info(
"RDMA: Updating device with configuration: {0}".format(data))
with open(path, "w") as f:
logger.info("RDMA: Device opened for writing")
f.write(data)
logger.info("RDMA: Updated device with IPv4/MAC addr successfully")
@staticmethod
def generate_rdma_config(ipv4_addr, mac_addr):
return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr)
@staticmethod
def wait_rdma_device(path, timeout_sec, check_interval_sec):
logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec))
total_retries = timeout_sec/check_interval_sec
n = 0
while n < total_retries:
if os.path.exists(path):
logger.info("RDMA: device ready")
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def update_network_interface(mac_addr, ipv4_addr):
netmask=16
logger.info("RDMA: will update the network interface with IPv4/MAC")
if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr)
logger.info("RDMA: network interface found: {0}", if_name)
logger.info("RDMA: bringing network interface up")
if shellutil.run("ifconfig {0} up".format(if_name)) != 0:
raise Exception("Could not bring up RMDA interface: {0}".format(if_name))
logger.info("RDMA: configuring IPv4 addr and netmask on interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0:
raise Exception("Could set addr to {1} on {0}".format(if_name, addr))
logger.info("RDMA: network address and netmask configured on interface")
@staticmethod
def get_interface_by_mac(mac):
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
output = output.replace('\n', '')
match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac),
output, re.IGNORECASE)
if match is None:
raise Exception("Failed to get ifname with mac: {0}".format(mac))
output = match.group(0)
eths = re.findall(r"eth\d", output)
if eths is None or len(eths) == 0:
raise Exception("ifname with mac: {0} not found".format(mac))
return eths[-1]
|
test_ctypes.py
|
from ctypes import *
import sys
import threading
import numpy as np
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import jit, types, errors
from numba.typing import ctypes_utils
from .support import MemoryLeakMixin, tag, TestCase
from .ctypes_usecases import *
class TestCTypesTypes(TestCase):
def _conversion_tests(self, check):
check(c_double, types.float64)
check(c_int, types.intc)
check(c_uint16, types.uint16)
check(c_size_t, types.uintp)
check(c_ssize_t, types.intp)
check(c_void_p, types.voidptr)
check(POINTER(c_float), types.CPointer(types.float32))
check(POINTER(POINTER(c_float)),
types.CPointer(types.CPointer(types.float32)))
check(None, types.void)
def test_from_ctypes(self):
"""
Test converting a ctypes type to a Numba type.
"""
def check(cty, ty):
got = ctypes_utils.from_ctypes(cty)
self.assertEqual(got, ty)
self._conversion_tests(check)
# An unsupported type
with self.assertRaises(TypeError) as raises:
ctypes_utils.from_ctypes(c_wchar_p)
self.assertIn("Unsupported ctypes type", str(raises.exception))
def test_to_ctypes(self):
"""
Test converting a Numba type to a ctypes type.
"""
def check(cty, ty):
got = ctypes_utils.to_ctypes(ty)
self.assertEqual(got, cty)
self._conversion_tests(check)
# An unsupported type
with self.assertRaises(TypeError) as raises:
ctypes_utils.to_ctypes(types.ellipsis)
self.assertIn("Cannot convert Numba type '...' to ctypes type",
str(raises.exception))
class TestCTypesUseCases(MemoryLeakMixin, TestCase):
def test_c_sin(self):
pyfunc = use_c_sin
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_two_funcs(self):
# Check that two constant functions don't get mixed up.
pyfunc = use_two_funcs
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
@unittest.skipUnless(is_windows, "Windows-specific test")
def test_stdcall(self):
# Just check that it doesn't crash
cres = compile_isolated(use_c_sleep, [types.uintc])
cfunc = cres.entry_point
cfunc(1)
def test_ctype_wrapping(self):
pyfunc = use_ctype_wrapping
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_ctype_voidptr(self):
pyfunc = use_c_pointer
# pyfunc will segfault if called
cres = compile_isolated(pyfunc, [types.int32])
cfunc = cres.entry_point
x = 123
self.assertEqual(cfunc(x), x + 1)
def test_function_pointer(self):
pyfunc = use_func_pointer
cfunc = jit(nopython=True)(pyfunc)
for (fa, fb, x) in [
(c_sin, c_cos, 1.0),
(c_sin, c_cos, -1.0),
(c_cos, c_sin, 1.0),
(c_cos, c_sin, -1.0)]:
expected = pyfunc(fa, fb, x)
got = cfunc(fa, fb, x)
self.assertEqual(got, expected)
# A single specialization was compiled for all calls
self.assertEqual(len(cfunc.overloads), 1, cfunc.overloads)
def test_untyped_function(self):
with self.assertRaises(TypeError) as raises:
compile_isolated(use_c_untyped, [types.double])
self.assertIn("ctypes function '_numba_test_exp' doesn't define its argument types",
str(raises.exception))
def test_python_call_back(self):
mydct = {'what': 1232121}
def call_me_maybe(arr):
return mydct[arr[0].decode('ascii')]
# Create a callback into the python interpreter
py_call_back = CFUNCTYPE(c_int, py_object)(call_me_maybe)
def pyfunc(a):
what = py_call_back(a)
return what
cfunc = jit(nopython=True, nogil=True)(pyfunc)
arr = np.array(["what"], dtype='S10')
self.assertEqual(pyfunc(arr), cfunc(arr))
def test_python_call_back_threaded(self):
def pyfunc(a, repeat):
out = 0
for _ in range(repeat):
out += py_call_back(a)
return out
cfunc = jit(nopython=True, nogil=True)(pyfunc)
arr = np.array(["what"], dtype='S10')
repeat = 1000
expected = pyfunc(arr, repeat)
outputs = []
# Warm up
cfunc(arr, repeat)
# Test the function in multiple threads to exercise the
# GIL ensure/release code
def run(func, arr, repeat):
outputs.append(func(arr, repeat))
threads = [threading.Thread(target=run, args=(cfunc, arr, repeat))
for _ in range(10)]
# Start threads
for th in threads:
th.start()
# End threads
for th in threads:
th.join()
# Check results
for got in outputs:
self.assertEqual(expected, got)
def test_passing_array_ctypes_data(self):
"""
Test the ".ctypes.data" attribute of an array can be passed
as a "void *" parameter.
"""
def pyfunc(arr):
return c_take_array_ptr(arr.ctypes.data)
cfunc = jit(nopython=True, nogil=True)(pyfunc)
arr = np.arange(5)
expected = pyfunc(arr)
got = cfunc(arr)
self.assertEqual(expected, got)
def check_array_ctypes(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
arr = np.linspace(0, 10, 5)
expected = arr ** 2.0
got = cfunc(arr)
self.assertPreciseEqual(expected, got)
return cfunc
def test_passing_array_ctypes_voidptr(self):
"""
Test the ".ctypes" attribute of an array can be passed
as a "void *" parameter.
"""
self.check_array_ctypes(use_c_vsquare)
def test_passing_array_ctypes_voidptr_pass_ptr(self):
"""
Test the ".ctypes" attribute of an array can be passed
as a pointer parameter of the right type.
"""
cfunc = self.check_array_ctypes(use_c_vcube)
# Non-compatible pointers are not accepted (here float32* vs. float64*)
with self.assertRaises(errors.TypingError) as raises:
cfunc(np.float32([0.0]))
self.assertIn("Invalid use of ExternalFunctionPointer",
str(raises.exception))
def test_storing_voidptr_to_int_array(self):
# Make C callback that returns a void*
cproto = CFUNCTYPE(c_void_p)
@cproto
def get_voidstar():
return 0xdeadbeef
# Make python functions that use the C callback
def pyfunc(a):
ptr = get_voidstar()
a[0] = ptr
return ptr
# Compile it
cres = compile_isolated(pyfunc, [types.uintp[::1]])
cfunc = cres.entry_point
# Setup inputs
arr_got = np.zeros(1, dtype=np.uintp)
arr_expect = arr_got.copy()
# Run functions
ret_got = cfunc(arr_got)
ret_expect = pyfunc(arr_expect)
# Check
self.assertEqual(ret_expect, 0xdeadbeef)
self.assertPreciseEqual(ret_got, ret_expect)
self.assertPreciseEqual(arr_got, arr_expect)
if __name__ == '__main__':
unittest.main()
|
uavcan_ip_interface.py
|
#!/usr/bin/env python3
"""
UAVCAN to TUN network adapter.
"""
import argparse
import os
import struct
import sys
import fcntl
import uavcan
import subprocess
import time
import logging
from queue import Queue, Empty
import threading
DSDL_DIR = os.path.join(os.path.dirname(__file__), "../uavcan_data_types/cvra")
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--interface",
"-i",
help="CAN Interface to use (e.g. can0 or /dev/ttyUSB0",
required=True,
)
parser.add_argument(
"--ip-address",
"-a",
default="10.0.0.1/24",
help="IP address of this interface (default 10.0.0.1/24)",
)
parser.add_argument(
"--packets-per-second",
type=int,
default=1000,
help="Max number of packet per second to transmit (protects the CAN bus).",
)
parser.add_argument("--dsdl", help="Path to DSDL directory", default=DSDL_DIR)
parser.add_argument(
"--verbose", "-v", action="store_true", help="Enable debug output."
)
return parser.parse_args()
def open_tun_interface(ip_addr):
if sys.platform == "linux":
fd = os.open("/dev/net/tun", os.O_RDWR)
# Values obtained with a test C program
IFF_TAP = 0x2
IFF_NO_PI = 4096
TUNSETIFF = 0x400454CA
# See man netdevice for struct ifreq
val = struct.pack("16sh15x", "uavcan0".encode(), IFF_TAP | IFF_NO_PI)
fcntl.ioctl(fd, TUNSETIFF, val)
subprocess.check_call("ip link set dev uavcan0 up".split())
subprocess.check_call("ip addr add dev uavcan0 {}".format(ip_addr).split())
return fd
elif sys.platform == "darwin": # macOS
tap = "tap0"
fd = os.open("/dev/" + tap, os.O_RDWR)
subprocess.call("ifconfig {} {}".format(tap, ip_addr).split())
return fd
else:
raise RuntimeError("supports mac and linux only")
class RateLimiter:
"""Simple rate limiter.
See https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm
"""
def __init__(self, max_rate):
self.max_rate = max_rate
self.quota = max_rate
self.last_time = time.time()
def check(self) -> bool:
"""Checks if we are allowed to proceed based on max rate."""
t = time.time()
dt, self.last_time = t - self.last_time, t
self.quota += self.max_rate * dt
self.quota = min(self.quota, self.max_rate)
# If we don't have quota left, forbid the transaction
if self.quota <= 1.0:
return False
# If we still have quota, take one from it and allow the transaction
self.quota -= 1.0
return True
def rx_thread(tun_fd, queue, max_packet_per_second):
limiter = RateLimiter(max_packet_per_second)
while True:
packet = os.read(tun_fd, 1500)
if limiter.check():
queue.put(packet)
else:
logging.debug("Dropped packet")
def node_thread(tun_fd, node, can_to_tap, tap_to_can):
def msg_callback(event):
msg = event.message
can_to_tap.put(msg.data)
node.add_handler(uavcan.thirdparty.cvra.uwb_beacon.DataPacket, msg_callback)
while True:
# A timeout of 0 means only process frames that are immediately
# available
try:
node.spin(timeout=0)
except uavcan.transport.TransferError:
logging.warning("uavcan exception, ignoring...")
pass
try:
packet = tap_to_can.get(block=False)
except Empty:
continue
# Checks that the packet fits in a UWB frame
assert len(packet) < 1024
# Finally send it over CAN
msg = uavcan.thirdparty.cvra.uwb_beacon.DataPacket()
msg.dst_addr = 0xFFFF # broadcast
msg.data = list(packet)
node.broadcast(msg)
def tx_thread(tun_fd, queue):
while True:
packet = queue.get()
os.write(tun_fd, bytes(packet))
def main():
args = parse_args()
level = logging.INFO
if args.verbose:
level = logging.DEBUG
logging.basicConfig(level=level)
if os.getuid() != 0:
logging.error("must run as root.")
sys.exit(1)
uavcan.load_dsdl(args.dsdl)
tun_fd = open_tun_interface(args.ip_address)
node = uavcan.make_node(args.interface, node_id=42)
tap_to_can = Queue()
can_to_tap = Queue()
logging.info("waiting for packets, press 3x Ctrl-C to stop...")
rx_thd = threading.Thread(
target=rx_thread, args=(tun_fd, tap_to_can, args.packets_per_second)
)
tx_thd = threading.Thread(target=tx_thread, args=(tun_fd, can_to_tap))
node_thd = threading.Thread(
target=node_thread, args=(tun_fd, node, can_to_tap, tap_to_can)
)
rx_thd.start()
tx_thd.start()
node_thd.start()
node_thd.join()
rx_thd.join()
tx_thd.join()
if __name__ == "__main__":
main()
|
alert_events.py
|
import pika
import json
import iot_logging
from threading import Thread
from datetime import datetime
import boto3
from flask import render_template
from flask_mail import Message
from iot_api import app
from iot_api import mail
from iot_api import rabbit_parameters
from iot_api.user_api.model import User, Alert, AlertType
from iot_api.user_api.models import (
Notification, NotificationData, NotificationPreferences, NotificationDataCollectorSettings,
NotificationAlertSettings, NotificationAssetImportance, NotificationAdditionalEmail,
NotificationAdditionalTelephoneNumber, NotificationAssetTag
)
from iot_api.user_api.repository import AssetRepository, DeviceRepository, GatewayRepository
#from iot_api.user_api.enums import WebUrl
from iot_api.user_api.websocket.notifications import emit_notification_event
from iot_api.user_api.websocket.alerts import emit_alert_event
from iot_api import config
import smtplib
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
LOG = iot_logging.getLogger(__name__)
if config.SEND_SMS:
try:
sns = boto3.client('sns')
except:
LOG.error("Unable to connect with Amazon Web Services")
def subscribe_alert_consumers():
thread = Thread(target = consumer)
thread.setDaemon(True)
thread.start()
def consumer():
queue="alert_events"
while(True):
try:
LOG.debug('Creating new connection to queue alert_events')
connection = pika.BlockingConnection(rabbit_parameters)
channel = connection.channel()
channel.queue_declare(queue=queue)
channel.basic_consume(on_message_callback=handle_alert_events, queue=queue, auto_ack=True)
channel.start_consuming()
except Exception as e:
LOG.error(f"Error on connection to queue alert_events:\n{e}")
def handle_alert_events(ch, method, properties, body):
event = None
try:
event = json.loads(body)
except Exception:
LOG.error("Couldn't deserialize event")
if not event:
return
alert_id = event.get('alert_id')
organization_id = event.get('organization_id')
data_collector_id = event.get('data_collector_id')
event_type = event.get('event_type')
alert_type_code = event.get('alert_type')
phones = []
emails = []
if event_type == 'NEW':
alert_type = AlertType.find_one(alert_type_code)
users = User.find_all_user_by_organization_id(organization_id)
users = list(filter(lambda x:(x.blocked==False and x.deleted==False and x.active == True),users))
emit_alert_event({'alert_id': alert_id}, organization_id)
try:
alert = Alert.find_one(alert_id)
device = None
gateway = None
if alert and alert.device_id:
device = AssetRepository.get_with(alert.device_id, "device")
if alert and alert.gateway_id:
gateway = AssetRepository.get_with(alert.gateway_id, "gateway")
except Exception as e:
LOG.error(f"Error {e} on alert assets search {alert}. Ignoring device/gateway related preferences")
device = None
gateway = None
for user in users:
alert_settings = NotificationAlertSettings.find_one(user.id)
dc_settings = NotificationDataCollectorSettings.find_one(user_id = user.id, data_collector_id = data_collector_id)
preferences = NotificationPreferences.find_one(user.id)
# Check whether the alert assets are important for the user or not
try:
asset_importance = NotificationAssetImportance.get_with(user.id)
is_important_for_user = False
if asset_importance and device is not None:
is_important_for_user = getattr(asset_importance, device.importance.value.lower())
elif asset_importance:
is_important_for_user = getattr(asset_importance, gateway.importance.value.lower())
except Exception as e:
LOG.error(f"Error {e} on NotificationAssetImportance search for user {user.id}. Ignoring asset_importance preference")
is_important_for_user = True
# Check whether the alert assets contain all the tags in user notification preferences or not
try:
asset_tags = NotificationAssetTag.find_all_with(user_id = user.id)
tag_id_list = [asset_tag.tag_id for asset_tag in asset_tags]
if device:
has_all_tags = DeviceRepository.has_all_tags(device.id, tag_id_list)
elif gateway:
has_all_tags = GatewayRepository.has_all_tags(gateway.id, tag_id_list)
except Exception as e:
LOG.error(f"Error {e} on handling NotificationAssetTag preferences for user {user.id}. Ignoring this preference")
has_all_tags = True
if alert_settings and getattr(alert_settings, alert_type.risk.lower()) and is_important_for_user and has_all_tags and dc_settings and dc_settings.enabled:
data = NotificationData.find_one(user.id)
notification = Notification(type = 'NEW_ALERT', alert_id = alert_id, user_id=user.id, created_at = datetime.now())
notification.save()
if data and data.ws_sid and preferences and preferences.push:
emit_notification_event(notification.to_dict(), data.ws_sid)
if preferences:
if preferences.sms:
if user.phone and not user.phone in phones:
phones.append(user.phone)
additional = NotificationAdditionalTelephoneNumber.find(user_id = user.id)
for item in additional:
if item.active and not item.phone in phones:
phones.append(item.phone)
if preferences.email:
if user.email and not user.email in emails:
emails.append(user.email)
additional = NotificationAdditionalEmail.find(user_id = user.id)
for item in additional:
if item.active and not item.email in emails:
emails.append(item.email)
# Send a SMS message to the specified phone number
for phone in phones:
if config.SEND_SMS:
sns.publish(
PhoneNumber=phone,
Message=f'New notification from {config.BRAND_NAME}. There\'s a new alert: {alert_type.name}. You can check this accessing to {config.BRAND_URL}',
)
if len(emails) > 0:
with app.app_context():
msg = MIMEMultipart('alternative')
msg['Subject'] = f"New {config.BRAND_NAME} Notification"
msg['From'] = email.utils.formataddr((config.SMTP_SENDER_NAME, config.SMTP_SENDER))
part = MIMEText(
render_template(
'notification.html',
brand_name=config.BRAND_NAME,
full_url=config.BRAND_URL,
alert_type=alert_type.name
), 'html'
)
msg.attach(part)
server = smtplib.SMTP(config.SMTP_HOST, config.SMTP_PORT)
#server.set_debuglevel(1)
server.ehlo()
server.starttls()
#stmplib docs recommend calling ehlo() before & after starttls()
server.ehlo()
server.login(config.SMTP_USERNAME, config.SMTP_PASSWORD)
for email_user in emails:
try:
msg['To'] = email_user
server.sendmail(config.SMTP_SENDER,email_user, msg.as_string())
except Exception as exc:
server.close()
print(exc)
server.close()
subscribe_alert_consumers()
|
scratch_to_bot.py
|
#! /usr/bin/python
# Connect to Scratch, read any broadcasts and handle any that are intended for the Camera bot
import time
import py_websockets_bot
import scratch
import time
import socket
import platform
import subprocess
import psutil
import threading
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
DEBUG = False
ROBOT_HOSTNAME = "robot.local"
VLC_ATTRIBUTES = 'http://robot.local:8080/?action=stream'
VLC_PROCESS_NAME = "VLC"
TIME_BETWEEN_SCRATCH_RECONNECTION_ATTEMPTS = 1.0
ROBOT_UPDATE_SLEEP = 0.1
SPEED_FACTOR = 10.0 # Converts cm/s to motor speed
DEFAULT_SPEED = 4.0 # cm/s
DEFAULT_DISTANCE = 4.0
MOVEMENT_SLEEP = 0.1 # Seconds to sleep between setting motors
ANGULAR_VELOCITY = 180.0 # Degrees per second on turn
ANGULAR_SPEED_FACTOR = 8.0 # Converts degrees per second to motor speed
ANGULAR_SLEEP = 0.05 # Seconds to sleep between setting motors
NECK_CENTRAL_ANGLE = 90.0 # Pan offset from centre in degrees
NECK_LIMIT = 80 # Maximum angle neck can pan from adjusted centre
HEAD_CENTRAL_ANGLE = 0.0 # Tilt offset from centre in degrees
HEAD_LIMIT = 80 # Maximum angle head can tilt from adjusted centre
Input_buffer = []
Stop_motors_immediately = False
MAX_LENGTH_INPUT_BUFFER = 100
Exit_program = False
############################# ScratchReader ########################################
#
# This class runs in a separate thread and reads in commands from Scratch, adding
# them to Input_buffer, a global list that is shared with the main thread.
# When a "robot stop" command is received from Scratch, this is placed at the
# the front of the list, and any subsequent motor control commands are cancelled.
#
# If the buffer gets full, then subsequent commands are dropped.
#
class ScratchReader(threading.Thread):
def __init__(self, buffer_lock, stop_motors_lock):
self.scratch = scratch.Scratch()
self.buffer_lock = buffer_lock
self.stop_motors_lock = stop_motors_lock
self.exit_reader = False
print "ScratchReader connected to Scratch"
threading.Thread.__init__(self, name="ScratchReader")
self.daemon = True
def run(self):
while not self.exit_reader:
raw_message = self.scratch.receive()
print "ScratchReader read message", raw_message
self.add_message_to_buffer(raw_message)
print "Scratch Reader finished"
def add_message_to_buffer(self, raw_message):
global Stop_motors_immediately
stop_motors = False
tokens = split_into_tokens(raw_message)
print "Tokenized message", raw_message
with self.buffer_lock:
if self.message_requires_immediate_execution(tokens):
self.schedule_for_immediate_execution(tokens)
stop_motors = True
else:
self.schedule_at_end_of_queue(tokens)
if stop_motors:
with self.stop_motors_lock:
Stop_motors_immediately = True
def message_requires_immediate_execution(self, tokens):
if len(tokens) < 3:
return False
return ((tokens[1]=='robot') and (tokens[2]=='stop'))
def message_is_a_movement_command(self, tokens):
return ((tokens[1]=='robot') and ((tokens[2]=='forward') or (tokens[2]=='backward') or (tokens[2]=='turn') or (tokens[2]=='neck') or (tokens[2]=='head')))
def schedule_at_end_of_queue(self, tokens):
global Input_buffer
if len(Input_buffer) < MAX_LENGTH_INPUT_BUFFER:
Input_buffer.append(tokens)
def schedule_for_immediate_execution(self, tokens):
global Input_buffer
Input_buffer.insert(0, tokens)
i = 0
print "Purging movement commands from buffer length", len(Input_buffer)
while (i < len(Input_buffer)):
print "Checking", Input_buffer[i]
if self.message_is_a_movement_command(Input_buffer[i]):
print "deleting"
del Input_buffer[i]
else:
i = i + 1
def sensor_update(self, params):
print "updating sensors"
self.scratch.sensorupdate(params)
def shutdown(self):
print "Shutting down Scratch Reader"
self.exit_reader = True
############################# End of ScratchReader #################################
############################# BufferReader #########################################
#
# An object created with this class runs on the main thread and reads in buffered
# Scratch commands one at a time. It uses a lock on the globally scoped input buffer
# to coordinate with ScratchReader which is operating on a separate thread.
#
class BufferReader():
def __init__(self, buffer_lock):
self.lock = buffer_lock
def get_next_command(self):
command_tokens = None
with self.lock:
if len(Input_buffer) > 0:
command_tokens = Input_buffer[0]
del Input_buffer[0]
return command_tokens
############################# End of BufferReader ##################################
############################# Robot Class ##########################################
class Robot:
def __init__(self, stop_motors_lock, sensor_update ):
print "Initializing robot class"
hostname = ROBOT_HOSTNAME
self.speed = DEFAULT_SPEED
self.neck_angle = 0
self.head_angle = 0
self.stop_motors_lock = stop_motors_lock
self.sensor_update_function = sensor_update
if not DEBUG:
self.bot = py_websockets_bot.WebsocketsBot( hostname )
print "Connected to robot"
self.set_neck_and_head()
self.set_sensor_config()
self.exit_update = False
self.update_thread = threading.Thread(target=update_robot_loop, args=(self,))
self.update_thread.start()
def __enter__(self):
print "__enter__"
def __exit__(self):
if not DEBUG: self.bot.disconnect()
def set_sensor_config(self):
print "Setting sensors"
if not DEBUG:
# Configure the sensors on the robot
sensorConfiguration = py_websockets_bot.mini_driver.SensorConfiguration(
configD12=py_websockets_bot.mini_driver.PIN_FUNC_ULTRASONIC_READ)
# We set the sensor configuration by getting the current robot configuration and modifying it.
# In this way we don't trample on any other configuration settings
robot_config = self.bot.get_robot_config()
robot_config.miniDriverSensorConfiguration = sensorConfiguration
self.bot.set_robot_config( robot_config )
def extract_distance(self, tokens):
distance = to_integer(tokens[3]) if len(tokens)>=4 else DEFAULT_DISTANCE
return distance
def extract_angle(self, tokens):
angle = 0.0
print tokens
if len(tokens)>=3:
parameter = tokens[2]
print "parameter", parameter
if parameter=='left':
angle = -90.0
elif parameter=='right':
angle = 90.0
elif (parameter=='centre') or (parameter=='center'):
angle = 0.0
else:
angle = to_integer(parameter)
return angle
def extract_on_or_off(self, tokens, position=2):
result = None
if (len(tokens)>=position+1):
on_or_off = tokens[position]
if (on_or_off=='on') or (on_or_off=='off'):
result = on_or_off
return result
def robot_forward(self, distance):
global Stop_motors_immediately
if distance < 0:
direction = -1
else:
direction = 1
print "Move robot forward ", distance, " cm"
if self.speed != 0:
print "Speed", self.speed
motor_time = abs(distance / float(self.speed))
print "Loop for ", motor_time, " seconds"
timeout = time.time() + motor_time
while True:
if time.time() > timeout:
break
motor_speed = self.speed * SPEED_FACTOR * direction
stop_motors = False
with self.stop_motors_lock:
if Stop_motors_immediately:
Stop_motors_immediately = False
stop_motors = True
if stop_motors:
print "Stopping motors immediately"
self.robot_set_motor_speeds(0.0, 0.0)
break
else:
self.robot_set_motor_speeds(motor_speed, motor_speed)
time.sleep(MOVEMENT_SLEEP)
print "Finished robot_forward ", distance
self.robot_set_motor_speeds( 0.0, 0.0 )
def robot_set_motor_speeds(self, left_motor_speed, right_motor_speed):
print "robot_set_motor_speeds", left_motor_speed, right_motor_speed
if not DEBUG:
self.bot.set_motor_speeds( left_motor_speed, right_motor_speed )
def robot_stop(self):
print "Stop robot motors"
self.robot_set_motor_speeds( 0.0, 0.0)
def robot_turn(self, angle):
print "Robot turn ", angle
motor_time = abs(angle / ANGULAR_VELOCITY)
print "Loop for ", motor_time, " seconds"
timeout = time.time() + motor_time
print "timeout ", timeout
motor_speed = self.speed * ANGULAR_SPEED_FACTOR
if angle <= 0:
left_motor_speed = -motor_speed
right_motor_speed = motor_speed
else:
left_motor_speed = motor_speed
right_motor_speed = -motor_speed
while True:
print "loop ", time.time()
if time.time() > timeout:
print "BREAK ************"
break
print "set_motor_speeds to ", left_motor_speed, right_motor_speed
if not DEBUG:
self.bot.set_motor_speeds( left_motor_speed, right_motor_speed )
time.sleep(ANGULAR_SLEEP)
if not DEBUG:
self.bot.set_motor_speeds( 0, 0 )
def adjust_angle(self, angle, centre_adjust, limit):
angle = angle
if angle < -limit:
angle = -limit
elif angle > limit:
angle = limit
angle = angle + centre_adjust
return angle
def robot_neck(self, angle):
self.neck_angle = angle
print "Robot neck ", angle
self.set_neck_and_head()
def robot_head(self, angle):
self.head_angle = angle
print "Robot head ", angle
self.set_neck_and_head()
def set_neck_and_head(self):
adjusted_neck_angle = self.adjust_angle(self.neck_angle, NECK_CENTRAL_ANGLE, NECK_LIMIT)
print "Neck angle ", self.neck_angle, " Adjusted ", adjusted_neck_angle
adjusted_head_angle = self.adjust_angle(self.head_angle, HEAD_CENTRAL_ANGLE, HEAD_LIMIT)
print "Pan / tilt set to ", adjusted_neck_angle, adjusted_head_angle
if not DEBUG:
self.bot.set_neck_angles( pan_angle_degrees=adjusted_neck_angle, tilt_angle_degrees=adjusted_head_angle)
def handle_robot_command(self, tokens=[]):
print tokens
if len(tokens) > 1:
robot_command = tokens[1]
if robot_command=='forward':
self.robot_forward(self.extract_distance(tokens))
elif robot_command=='backward':
self.robot_forward(-self.extract_distance(tokens))
elif robot_command=='turn':
self.robot_turn(self.extract_angle(tokens))
elif robot_command=='neck':
self.robot_neck(self.extract_angle(tokens))
elif robot_command=='head':
self.robot_head(self.extract_angle(tokens))
elif robot_command=='stop':
self.robot_stop()
elif robot_command=='camera':
self.robot_camera(self.extract_on_or_off(tokens))
else:
print "Unknown command for robot"
else:
print "Empty command sent to robot"
def robot_camera(self, on_or_off):
print "on_or_off", on_or_off
if on_or_off=='on':
self.open_camera_window()
self.start_streaming_images()
elif on_or_off=='off':
self.stop_streaming_images()
self.close_camera_window()
def open_camera_window(self):
print "Open camera window"
open_vlc_instance()
def close_camera_window(self):
print "Close camera window"
kill_all_vlc_instances()
def start_streaming_images(self):
print "Start streaming images"
if not DEBUG:
self.bot.start_streaming_camera_images()
def stop_streaming_images(self):
print "Stop streaming images"
if not DEBUG:
self.bot.stop_streaming_camera_images()
def update(self):
print "update"
if not DEBUG:
#self.bot.update()
#bot.update() is not compatible with Mac OSX
self.bot._update_camera_keep_alive()
self.update_sensors()
def update_sensors(self):
if not DEBUG:
# Read sensors
status_dict, read_time = self.bot.get_robot_status_dict()
sensor_dict = status_dict[ "sensors" ]
ultrasonic = sensor_dict["ultrasonic"]
ultrasonic_distance = ultrasonic["data"]
print "Ultrasonic Distance ", ultrasonic_distance
self.sensor_update_function({'distance' : ultrasonic_distance})
def shutdown(self):
print "Shutting down robot"
self.exit_update = True
self.update_thread.join()
print "Robot shut down"
def exit_update_now(self):
return self.exit_update
############################# End of Robot Class ###################################
############################# Utility robot functions ##############################
def update_robot_loop(robot):
while (robot.exit_update_now()== False):
try:
robot.update()
time.sleep(ROBOT_UPDATE_SLEEP)
except KeyboardInterrupt:
print "Update thread received keyboard interrupt"
break
# except:
# print "update failed"
# break
print "Exiting robot update thread"
############################# End of Utility robot functions #######################
def to_integer(string):
try:
return int(string)
except ValueError:
return 0
def split_into_tokens(message):
command_type = message[0].lower()
scratch_command = message[1].lower()
tokens = [command_type] + scratch_command.split()
return tokens
############################# Platform utility functions ###########################
def open_vlc_instance():
global VLC
kill_all_vlc_instances()
subprocess.Popen([VLC, VLC_ATTRIBUTES], stdout=subprocess.PIPE)
def kill_all_vlc_instances():
print "Preparing to kill open VLC windows"
for proc in psutil.process_iter():
if proc.name() == VLC_PROCESS_NAME:
print "Killing!"
proc.kill()
def adjust_to_platform():
global VLC
if platform.system()=='Darwin':
print "Running on Mac"
VLC = "/Applications/VLC.app/Contents/MacOS/VLC"
else:
print "Running on ", platform.system()
VLC = "vlc"
############################# Legacy functions ###################################
############################# End of Legacy functions ############################
############################# The Main Loop ######################################
if __name__ == "__main__":
adjust_to_platform()
while True:
try:
buffer_lock = threading.Lock()
stop_motors_lock = threading.Lock()
# Read Scratch commands on a separate thread and insert them into the Input_buffer
scratch_reader = ScratchReader(buffer_lock, stop_motors_lock)
scratch_reader.start()
# BufferProcessor pulls commands off the queue
buffer_reader = BufferReader(buffer_lock)
# This is the Robot
robot = Robot(stop_motors_lock, scratch_reader.sensor_update)
while True:
# Interpret Scratch commands on main thread
next_command = buffer_reader.get_next_command()
if next_command != None:
if next_command[0] == 'broadcast':
if (len(next_command) > 1) and (next_command[1] == 'robot'):
robot_tokens = next_command
del robot_tokens[0]
robot.handle_robot_command(robot_tokens)
elif next_command[0] == 'sensor-update':
print "TO DO: sensor update code"
else:
time.sleep(0.05)
except scratch.ScratchError:
print "No connection to Scratch."
time.sleep(TIME_BETWEEN_SCRATCH_RECONNECTION_ATTEMPTS)
except socket.error:
print "Socket error connecting to robot"
except KeyboardInterrupt:
print "User requested break"
break
print "Shutting down"
scratch_reader.shutdown()
robot.shutdown()
|
stim_thread.py
|
from time import time, sleep
from ble_client import StimClient
from threading import Thread
from multiprocessing import Process, Manager
CHUNK_SIZE = 20 # samples
STIM_FREQUENCY = 20 # Hz
STIM_PERIOD = 1 / STIM_FREQUENCY
class StimThread(Thread):
def run(self):
"""
Play stim by buffering & sending track over BLE.
To maintain calibration, play a zero-stim track while stim buffer is empty.
Sends ~1-second chunks each ~1-second
"""
self.msg_q = msg_q = Manager().Queue()
self.client_bt_process = Process(target=ble_process, args=[msg_q])
self.client_bt_process.start()
self.stim_buffer = []
self.ble_buffer = []
self.active = True
start_time = time()
n_sent = 0
n_added = 0
try:
# Maintain a constant zero-stim stream to keep timing aligned
while self.active:
sample_num = int((time() - start_time) * STIM_FREQUENCY)
# Add stim data or zero-voltage to ble buffer
n_to_add = sample_num - n_added
for _ in range(n_to_add):
self.ble_buffer.append(
self.stim_buffer.pop(0) if self.stim_buffer else 0.5
)
n_added += 1
# Send chunks to stimulator
n_to_send = sample_num - n_sent
if n_to_send >= CHUNK_SIZE:
to_send = self.ble_buffer[:n_to_send]
del self.ble_buffer[:n_to_send]
msg_q.put(encode(to_send))
n_sent += n_to_send
sleep(STIM_PERIOD)
except KeyboardInterrupt:
print("exiting stim thread")
self.stop()
def stop(self):
self.active = False
self.msg_q.put("close")
def stimulate(self, track):
self.stim_buffer += track
def reconnect(self):
self.msg_q.put("reconnect")
def config(self):
self.msg_q.put("config")
def stim_mode(self):
self.msg_q.put("stim")
def ble_process(msg_q):
client = StimClient(msg_q)
client.connect()
client.async_sender()
def encode(stim_data):
chunk_string = ""
# incoming is floats form 0 to 1, convert this to integer 0-255, then to hex string
for sample in stim_data:
transform_sample = int(sample * 255)
hex_sample_string = hex(transform_sample)[2:]
if len(hex_sample_string) < 2: # we want all hex numbers to have 2 digits
hex_sample_string = "0" + hex_sample_string
chunk_string += hex_sample_string
return chunk_string
|
test_pye_performance.py
|
# Copyright 2015 Falldog Hsieh <falldog7@gmail.com>
# Modifications copyright 2021 Andrey Martyanov <andrey@martyanov.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import logging
import unittest
from zipfile import ZipFile
from multiprocessing import Process, Queue
from os.path import dirname, abspath, join
from test import base
CUR_DIR = abspath(dirname(__file__))
ROOT_DIR = abspath(join(CUR_DIR, '..'))
DATA_DIR = join(CUR_DIR, 'data')
REQUEST_ZIP = join(DATA_DIR, 'requests-2.12.4.zip')
REQUEST_MAIN = join(DATA_DIR, 'main_requests.py')
PYADMIN_PATH = join(ROOT_DIR, 'pyconcrete-admin.py')
RUN_COUNT = int(os.environ.get('TEST_PYE_PERFORMANCE_COUNT', '5'))
logger = logging.getLogger('pyconcrete')
def main_requests(import_concrete, q):
"""
testing main function for multiprocessing
purpose: testing import without exception
"""
if import_concrete:
import pyconcrete
t = time.time()
import requests
from requests.adapters import HTTPAdapter
from requests.auth import HTTPDigestAuth, _basic_auth_str
from requests.compat import (Morsel, cookielib, getproxies, str, urljoin, urlparse, is_py3, builtin_str)
from requests.cookies import cookiejar_from_dict, morsel_to_cookie
from requests.exceptions import (
ConnectionError, ConnectTimeout, InvalidSchema, InvalidURL, MissingSchema, ReadTimeout, Timeout, RetryError
)
from requests.models import PreparedRequest
from requests.structures import CaseInsensitiveDict
from requests.sessions import SessionRedirectMixin
from requests.models import urlencode
from requests.hooks import default_hooks
t = time.time() - t
q.put(requests.__file__)
q.put(t)
@unittest.skipIf(not os.path.exists(REQUEST_ZIP), "requests zip file doesn't exists")
class TestPerformance(base.TestPyConcreteBase):
def setUp(self):
super(TestPerformance, self).setUp()
zip = ZipFile(REQUEST_ZIP)
zip.extractall(self.tmp_dir)
zip.close()
self.req_dir = join(self.tmp_dir, 'requests')
base.touch(join(self.req_dir, '__init__.py'))
def _test_requests(self, import_concrete):
sys.path.insert(0, self.req_dir)
q = Queue()
p = Process(target=main_requests, args=(import_concrete, q))
p.start()
path = q.get(timeout=5)
t = q.get(timeout=2)
p.join()
self.assertTrue(path.startswith(self.req_dir), "wrong import path of requests = %s" % path)
return t
def test_requests_pye(self):
self.lib_compile_pye(self.req_dir, remove_py=True, remove_pyc=True)
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(True)
logger.info('test import request (pye) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_pyc(self):
self.lib_compile_pyc(self.req_dir, remove_py=True)
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(False)
logger.info('test import request (pyc) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_pyc_with_import_hooker(self):
self.lib_compile_pyc(self.req_dir, remove_py=True)
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(True)
logger.info('test import request (pyc) (import hooker) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_py(self):
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(False)
logger.info('test import request (py) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
def test_requests_py_with_import_hooker(self):
t = 0.0
for i in range(RUN_COUNT):
t += self._test_requests(True)
logger.info('test import request (py) (import hooker) [count=%d] total time = %.2f, avg time = %.2f' % (RUN_COUNT, t, t/RUN_COUNT))
if __name__ == '__main__':
unittest.main()
|
dispatcher.py
|
# Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import logging
import random
import threading
import time
import typing
import traceback
import ccc.elasticsearch
import ccc.github
import ccc.secrets_server
import ccc.github
import ci.util
import concourse.client
from github3.exceptions import NotFoundError
from .pipelines import update_repository_pipelines
from concourse.enumerator import JobMappingNotFoundError
from github.util import GitHubRepositoryHelper
from model import ConfigFactory
from model.base import ConfigElementNotFoundError
from model.webhook_dispatcher import WebhookDispatcherConfig
from concourse.client.util import (
determine_jobs_to_be_triggered,
jobs_not_triggered,
pin_resource_and_trigger_build,
PinningFailedError,
PinningUnnecessary,
)
from concourse.client.model import (
ResourceType,
)
from concourse.model.job import AbortObsoleteJobs
from .model import (
AbortConfig,
Pipeline,
PullRequestAction,
PullRequestEvent,
PushEvent,
RefType,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class GithubWebhookDispatcher:
def __init__(
self,
cfg_set,
whd_cfg: WebhookDispatcherConfig
):
self.cfg_set = cfg_set
self.whd_cfg = whd_cfg
self.cfg_factory = ci.util.ctx().cfg_factory()
logger.info(f'github-whd initialised for cfg-set: {cfg_set.name()}')
def concourse_clients(self):
for concourse_config_name in self.whd_cfg.concourse_config_names():
concourse_cfg = self.cfg_factory.concourse(concourse_config_name)
concourse_uam_cfg = self.cfg_factory.concourse_uam(concourse_cfg.concourse_uam_config())
job_mapping_set = self.cfg_factory.job_mapping(concourse_cfg.job_mapping_cfg_name())
for job_mapping in job_mapping_set.job_mappings().values():
yield concourse.client.from_cfg(
concourse_cfg=concourse_cfg,
concourse_uam_cfg=concourse_uam_cfg,
team_name=job_mapping.team_name(),
)
def dispatch_create_event(self, create_event):
ref_type = create_event.ref_type()
if not ref_type == RefType.BRANCH:
logger.info(f'ignored create event with type {ref_type}')
return
# todo: rename parameter
self._update_pipeline_definition(push_event=create_event)
def dispatch_push_event(self, push_event):
if self._pipeline_definition_changed(push_event):
self._update_pipeline_definition(push_event)
logger.debug('before push-event dispatching')
self.abort_running_jobs_if_configured(push_event)
def _check_resources():
for concourse_api in self.concourse_clients():
logger.debug(f'using concourse-api: {concourse_api}')
resources = self._matching_resources(
concourse_api=concourse_api,
event=push_event,
)
logger.debug('triggering resource-check')
self._trigger_resource_check(concourse_api=concourse_api, resources=resources)
thread = threading.Thread(target=_check_resources)
thread.start()
def _update_pipeline_definition(self, push_event):
try:
try:
update_repository_pipelines(
repo_url=push_event.repository().repository_url(),
cfg_set=self.cfg_set,
whd_cfg=self.whd_cfg,
)
except (JobMappingNotFoundError, ConfigElementNotFoundError) as e:
# A config element was missing or o JobMapping for the given repository was present.
# Print warning, reload and try again
logger.warning(
f'failed to update pipeline definition: {e}. Will reload config and try again.'
)
# Attempt to fetch latest cfg from SS and replace it
raw_dict = ccc.secrets_server.SecretsServerClient.default().retrieve_secrets()
factory = ConfigFactory.from_dict(raw_dict)
self.cfg_set = factory.cfg_set(self.cfg_set.name())
# retry
update_repository_pipelines(
repo_url=push_event.repository().repository_url(),
cfg_set=self.cfg_set,
whd_cfg=self.whd_cfg,
)
except BaseException as be:
logger.warning(f'failed to update pipeline definition - ignored {be}')
import traceback
try:
traceback.print_exc()
except BaseException:
pass # ignore
def _pipeline_definition_changed(self, push_event):
if '.ci/pipeline_definitions' in push_event.modified_paths():
return True
return False
def determine_affected_pipelines(self, push_event) -> typing.Generator[Pipeline, None, None]:
'''yield each concourse pipeline that may be affected by the given push-event.
'''
repo = push_event.repository()
repo_url = repo.repository_url()
repo_enumerator = concourse.enumerator.GithubRepositoryDefinitionEnumerator(
repository_url=repo_url,
cfg_set=self.cfg_set,
)
try:
definition_descriptors = [d for d in repo_enumerator.enumerate_definition_descriptors()]
except NotFoundError:
logger.warning(
f"Unable to access repository '{repo_url}' on github '{repo.github_host()}'. "
"Please make sure the repository exists and the technical user has the necessary "
"permissions to access it."
)
definition_descriptors = []
for descriptor in definition_descriptors:
# need to merge and consider the effective definition
effective_definition = descriptor.pipeline_definition
for override in descriptor.override_definitions:
effective_definition = ci.util.merge_dicts(effective_definition, override)
yield Pipeline(
pipeline_name=descriptor.effective_pipeline_name(),
target_team=descriptor.concourse_target_team,
effective_definition=effective_definition,
)
def matching_client(self, team):
for c in self.concourse_clients():
if c.routes.team == team:
return c
def abort_running_jobs_if_configured(self, push_event):
builds_to_consider = 5
for pipeline in self.determine_affected_pipelines(
push_event
):
if not (client := self.matching_client(pipeline.target_team)):
raise RuntimeError(
f"No matching Concourse client found for team '{pipeline.target_team}'"
)
pipeline_config = client.pipeline_cfg(pipeline.pipeline_name)
resources = [
r for r in pipeline_config.resources
if ResourceType(r.type) in (ResourceType.GIT, ResourceType.PULL_REQUEST)
]
for job in determine_jobs_to_be_triggered(*resources):
if (
not pipeline.effective_definition['jobs'].get(job.name)
or not 'abort_outdated_jobs' in pipeline.effective_definition['jobs'][job.name]
):
continue
abort_cfg = AbortConfig.from_dict(
pipeline.effective_definition['jobs'][job.name]
)
if abort_cfg.abort_obsolete_jobs is AbortObsoleteJobs.NEVER:
continue
elif (
abort_cfg.abort_obsolete_jobs is AbortObsoleteJobs.ON_FORCE_PUSH_ONLY
and not push_event.is_forced_push()
):
continue
elif abort_cfg.abort_obsolete_jobs is AbortObsoleteJobs.ALWAYS:
pass
else:
raise NotImplementedError(abort_cfg.abort_obsolete_jobs)
running_builds = [
b for b in client.job_builds(pipeline.pipeline_name, job.name)
if b.status() is concourse.client.model.BuildStatus.RUNNING
][:builds_to_consider]
for build in running_builds:
if build.plan().contains_version_ref(push_event.previous_ref()):
logger.info(
f"Aborting obsolete build '{build.build_number()}' for job '{job.name}'"
)
client.abort_build(build.id())
def dispatch_pullrequest_event(self, pr_event):
if not pr_event.action() in (
PullRequestAction.OPENED,
PullRequestAction.REOPENED,
PullRequestAction.LABELED,
PullRequestAction.SYNCHRONIZE,
):
return logger.info(f'ignoring pull-request action {pr_event.action()}')
def _process_pr_event():
for concourse_api in self.concourse_clients():
resources = list(self._matching_resources(
concourse_api=concourse_api,
event=pr_event,
))
if len(resources) == 0:
continue
if (
pr_event.action() in [PullRequestAction.OPENED, PullRequestAction.SYNCHRONIZE]
and not self._set_pr_labels(pr_event, resources)
):
logger.warning(
f'Unable to set required labels for PR #{pr_event.number()} for '
f'repository {pr_event.repository().repository_path()}. Will not trigger '
'resource check.'
)
continue
logger.info(f'triggering resource check for PR number: {pr_event.number()}')
self._trigger_resource_check(concourse_api=concourse_api, resources=resources)
self._ensure_pr_resource_updates(
concourse_api=concourse_api,
pr_event=pr_event,
resources=resources,
)
# Give concourse a chance to react
time.sleep(random.randint(5,10))
self.handle_untriggered_jobs(pr_event=pr_event, concourse_api=concourse_api)
thread = threading.Thread(target=_process_pr_event)
thread.start()
def _trigger_resource_check(self, concourse_api, resources):
logger.debug('_trigger_resource_check')
for resource in resources:
logger.info('triggering resource check for: ' + resource.name)
try:
concourse_api.trigger_resource_check(
pipeline_name=resource.pipeline_name(),
resource_name=resource.name,
)
except Exception:
traceback.print_exc()
def _set_pr_labels(self, pr_event, resources) -> bool:
'''
@ return True if the required label was set
'''
required_labels = {
resource.source.get('label')
for resource in resources if resource.source.get('label') is not None
}
if not required_labels:
return True
repo = pr_event.repository()
github_host = repo.github_host()
repository_path = repo.repository_path()
pr_number = pr_event.number()
github_cfg = ccc.github.github_cfg_for_hostname(
cfg_factory=self.cfg_set,
host_name=github_host,
)
owner, name = repository_path.split('/')
try:
github_helper = GitHubRepositoryHelper(
owner=owner,
name=name,
github_cfg=github_cfg,
)
except NotFoundError:
logger.warning(
f"Unable to access repository '{repository_path}' on github '{github_host}'. "
"Please make sure the repository exists and the technical user has the necessary "
"permissions to access it."
)
return False
sender_login = pr_event.sender()['login']
if pr_event.action() is PullRequestAction.OPENED:
if github_helper.is_pr_created_by_org_member(pr_number):
logger.info(
f"New pull request by member of '{owner}' in '{repository_path}' found. "
f"Setting required labels '{required_labels}'."
)
github_helper.add_labels_to_pull_request(pr_number, *required_labels)
return True
else:
logger.debug(
f"New pull request by member in '{repository_path}' found, but creator is not "
f"member of '{owner}' - will not set required labels."
)
github_helper.add_comment_to_pr(
pull_request_number=pr_number,
comment=(
f"Thank you @{sender_login} for your contribution. Before I can start "
"building your PR, a member of the organization must set the required "
f"label(s) {required_labels}. Once started, you can check the build "
"status in the PR checks section below."
)
)
return False
elif pr_event.action() is PullRequestAction.SYNCHRONIZE:
if github_helper.is_org_member(organization_name=owner, user_login=sender_login):
logger.info(
f"Update to pull request #{pr_event.number()} by org member '{sender_login}' "
f" in '{repository_path}' found. Setting required labels '{required_labels}'."
)
github_helper.add_labels_to_pull_request(pr_number, *required_labels)
return True
else:
logger.debug(
f"Update to pull request #{pr_event.number()} by '{sender_login}' "
f" in '{repository_path}' found. Ignoring, since they are not an org member'."
)
return False
return False
def _matching_resources(self, concourse_api, event):
if isinstance(event, PushEvent):
resource_type = ResourceType.GIT
elif isinstance(event, PullRequestEvent):
resource_type = ResourceType.PULL_REQUEST
else:
raise NotImplementedError
resources = concourse_api.pipeline_resources(
concourse_api.pipelines(),
resource_type=resource_type,
)
for resource in resources:
if not resource.has_webhook_token():
continue
ghs = resource.github_source()
repository = event.repository()
if not ghs.hostname() == repository.github_host():
continue
if not ghs.repo_path().lstrip('/') == repository.repository_path():
continue
if isinstance(event, PushEvent):
if not event.ref().endswith(ghs.branch_name()):
continue
if msg := event.commit_message():
if (
not ghs.disable_ci_skip()
and any(skip in msg for skip in ('[skip ci]', '[ci skip]'))
):
logger.info(
f"Do not trigger resource {resource.name}. Found [skip ci] or [ci skip]"
)
continue
yield resource
def _ensure_pr_resource_updates(
self,
concourse_api,
pr_event,
resources,
retries=10,
sleep_seconds=3,
):
time.sleep(sleep_seconds)
retries -= 1
if retries < 0:
try:
self.log_outdated_resources(resources)
# ignore logging errors
except BaseException:
pass
outdated_resources_names = [r.name for r in resources]
logger.info(f'could not update resources {outdated_resources_names} - giving up')
return
def resource_versions(resource):
return concourse_api.resource_versions(
pipeline_name=resource.pipeline_name(),
resource_name=resource.name,
)
def is_up_to_date(resource, resource_versions):
# check if pr requires a label to be present
require_label = resource.source.get('label')
if require_label:
if require_label not in pr_event.label_names():
logger.info('skipping PR resource update (required label not present)')
# regardless of whether or not the resource is up-to-date, it would not
# be discovered by concourse's PR resource due to policy
return True
# assumption: PR resource is up-to-date if our PR-number is listed
# XXX hard-code structure of concourse-PR-resource's version dict
pr_numbers = map(lambda r: r.version()['pr'], resource_versions)
return str(pr_event.number()) in pr_numbers
# filter out all resources that are _not_ up-to-date (we only care about those).
# Also keep resources that currently fail to check so that we keep retrying those
outdated_resources = [
resource for resource in resources
if resource.failing_to_check()
or not is_up_to_date(resource, resource_versions(resource))
]
if not outdated_resources:
logger.info('no outdated PR resources found')
return # nothing to do
logger.info(f'found {len(outdated_resources)} PR resource(s) that require being updated')
self._trigger_resource_check(concourse_api=concourse_api, resources=outdated_resources)
logger.info(f'retriggered resource check will try again {retries} more times')
self._ensure_pr_resource_updates(
concourse_api=concourse_api,
pr_event=pr_event,
resources=outdated_resources,
retries=retries,
sleep_seconds=sleep_seconds*1.2,
)
def handle_untriggered_jobs(self, pr_event: PullRequestEvent, concourse_api):
for job, resource, resource_version in jobs_not_triggered(pr_event, concourse_api):
logger.info(
f'processing untriggered job {job.name=} of {resource.pipeline_name()=} '
f'{resource.name=} {resource_version.version()=}. Triggered by {pr_event.action()=}'
)
try:
pin_resource_and_trigger_build(
job=job,
resource=resource,
resource_version=resource_version,
concourse_api=concourse_api,
retries=3,
)
except PinningUnnecessary as e:
logger.info(e)
except PinningFailedError as e:
logger.warning(e)
@functools.lru_cache()
def els_client(self):
elastic_cfg = self.cfg_set.elasticsearch()
elastic_client = ccc.elasticsearch.from_cfg(elasticsearch_cfg=elastic_cfg)
return elastic_client
def log_outdated_resources(self, outdated_resources):
els_index = self.cfg_set.webhook_dispatcher_deployment().logging_els_index()
elastic_client = self.els_client()
date = datetime.datetime.utcnow().isoformat()
elastic_client.store_documents(
index=els_index,
body=[
{
'date': date,
'resource_name': resource.name,
'pipeline_name': resource.pipeline_name(),
}
for resource in outdated_resources
],
)
|
client.py
|
import socket
from threading import Thread
def send():
while True:
message = input()
conn.send(message.encode('utf-8'))
def get():
while True:
data = conn.recv(16384)
data = data.decode('utf-8')
print(data)
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", 5000))
Thread(target=send).start()
Thread(target=get).start()
|
rocket.py
|
# -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Modified by Massimo Di Pierro
# Import System Modules
from __future__ import print_function
import sys
import errno
import socket
import logging
import platform
from gluon._compat import iteritems, to_bytes, StringIO
from gluon._compat import urllib_unquote, to_native
# Define Constants
VERSION = '1.2.6'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (
SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 10 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS=DEFAULT_MIN_THREADS,
MAX_THREADS=DEFAULT_MAX_THREADS)
PY3K = sys.version_info[0] > 2
class NullHandler(logging.Handler):
"""A Logging handler to prevent library errors."""
def emit(self, record):
pass
if PY3K:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, bytes):
return val.decode(encoding)
else:
return val
else:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, unicode):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.decode(encoding)
else:
return val
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket/__init__.py
# Monolithic build...start of module: rocket/connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
# from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def _sendall_darwin(self, buf):
pending = len(buf)
offset = 0
while pending:
try:
sent = self.socket.send(buf[offset:])
pending -= sent
offset += sent
except socket.error:
import errno
info = sys.exc_info()
if info[1].args[0] != errno.EAGAIN:
raise
return offset
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket/connection.py
# Monolithic build...start of module: rocket/filelike.py
# Import System Modules
import socket
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket/filelike.py
# Monolithic build...start of module: rocket/futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future(object):
pass
class ThreadPoolExecutor(object):
pass
class _WorkItem(object):
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name +
'A future already exists with that name.')
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError(
'Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"""Futures middleware that adds a Futures Executor to the environment"""
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket/futures.py
# Monolithic build...start of module: rocket/listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
if ':' in self.addr:
listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
ssl_version=ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every
# THREAD_STOP_CHECK_INTERVAL seconds. When that happens,
# we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket/listener.py
# Monolithic build...start of module: rocket/main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
try:
from queue import Queue
except ImportError:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces=('127.0.0.1', 8000),
method='wsgi',
app_info=None,
min_threads=None,
max_threads=None,
queue_size=None,
timeout=600,
handle_signals=True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info=app_info,
active_queue=self.active_queue,
monitor_queue=self.monitor_queue,
min_threads=min_threads,
max_threads=max_threads)
# Build our socket listeners
self.listeners = [Listener(
i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners) - 1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(
['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
raise DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads=10,
server_name=None,
max=-1,
request_queue_size=5,
timeout=10,
shutdown_timeout=5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads=numthreads,
max_threads=max_threads,
queue_size=request_queue_size,
timeout=timeout)
# Monolithic build...end of module: rocket/main.py
# Monolithic build...start of module: rocket/monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (
c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug(
'Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket/monitor.py
# Monolithic build...start of module: rocket/threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads / 10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
# active_threads = [t for t in self.threads if t.isAlive()]
# while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket/threadpool.py
# Monolithic build...start of module: rocket/worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|PATCH|TRACE|CONNECT) # Req Method
\ # single space
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # single space
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
%s %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = 'HTTP/1.1'
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.' + self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in str(val.args[0]):
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(client_ip=conn.client_addr,
time=datetime.now().strftime('%c'),
status=self.status.split(' ')[0],
size=self.size,
request_line=self.request_line)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (self.protocol,
status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.timeout:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received timeout error'
self.err_log.error(msg % status)
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout('Socket timed out before request.')
except TypeError:
raise SocketClosed(
'SSL bug caused closure of socket. See '
'"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".')
d = d.strip()
if not d:
if __debug__:
self.err_log.debug(
'Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k, v in iteritems(req):
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join(
[urllib_unquote(x) for x in re_SLASH.split(v)])
self.protocol = req['protocol']
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol=proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([urllib_unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
lname = None
lval = None
while True:
l = sock_file.readline()
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning(
'Client sent invalid header: ' + repr(l))
if l.strip().replace('\0', '') == '':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ' ' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"""Exception for when a socket times out between requests."""
pass
class BadRequest(Exception):
"""Exception for when a client sends an incomprehensible request."""
pass
class SocketClosed(Exception):
"""Exception for when a socket is closed by the client."""
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket/worker.py
# Monolithic build...start of module: rocket/methods/__init__.py
# Monolithic build...end of module: rocket/methods/__init__.py
# Monolithic build...start of module: rocket/methods/wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict(
{'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in iteritems(self.read_headers(sock_file)):
environ[str('HTTP_' + k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and to_native(ssl.DER_cert_to_PEM_cert(peercert))
except Exception:
print(sys.exc_info()[1])
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('Transfer-Encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'Date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'Server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'Content-Length' in h_set:
self.size = int(h_set['Content-Length'])
else:
s = int(self.status.split(' ')[0])
if (s < 200 or s not in (204, 205, 304)) and not self.chunked:
if sections == 1 or self.protocol != 'HTTP/1.1':
# Add a Content-Length header because it's not there
self.size = len(data)
h_set['Content-Length'] = str(self.size)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'Connection' not in h_set:
# If the application did not provide a connection header,
# fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky
# so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('Connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b'%x\r\n%s\r\n' % (len(data), to_bytes(data, 'ISO-8859-1')))
else:
self.conn.sendall(to_bytes(data))
except socket.timeout:
self.closeConnection = True
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
if self.chunked and self.request_method != 'HEAD':
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output, 'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket/methods/wsgi.py
def demo_app(environ, start_response):
global static_folder
import os
types = {'htm': 'text/html','html': 'text/html','gif': 'image/gif',
'jpg': 'image/jpeg','png': 'image/png','pdf': 'applications/pdf'}
if static_folder:
if not static_folder.startswith('/'):
static_folder = os.path.join(os.getcwd(),static_folder)
path = os.path.join(static_folder, environ['PATH_INFO'][1:] or 'index.html')
type = types.get(path.split('.')[-1],'text')
if os.path.exists(path):
try:
data = open(path,'rb').read()
start_response('200 OK', [('Content-Type', type)])
except IOError:
start_response('404 NOT FOUND', [])
data = '404 NOT FOUND'
else:
start_response('500 INTERNAL SERVER ERROR', [])
data = '500 INTERNAL SERVER ERROR'
else:
start_response('200 OK', [('Content-Type', 'text/html')])
data = '<html><body><h1>Hello from Rocket Web Server</h1></body></html>'
return [data]
def demo():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--ip", dest="ip",default="127.0.0.1",
help="ip address of the network interface")
parser.add_option("-p", "--port", dest="port",default="8000",
help="post where to run web server")
parser.add_option("-s", "--static", dest="static",default=None,
help="folder containing static files")
(options, args) = parser.parse_args()
global static_folder
static_folder = options.static
print('Rocket running on %s:%s' % (options.ip, options.port))
r=Rocket((options.ip,int(options.port)),'wsgi', {'wsgi_app':demo_app})
r.start()
if __name__=='__main__':
demo()
|
tee_popen.py
|
# -*- coding: utf-8 -*-
# TeePopen is a reader from stdout and stderr of subprocess.Popen into
# pipes to return to ansible and into unix socket to send to werf logger (logboek).
#
# stdout from subprocess.Popen (original_popen)
# |
# | looped read by TeeSplitter and write into 2 streams:
# ↓--------------------------↓
# stdout_live_sock fd_out_write
# | ↓
# | fd_out_read
# ↓ ↓
# LiveStdoutListener exec_command in AnsiballZ's basic.py
# in live.py
#
# The same is happen with stderr.
import os
import select
import socket
import time
import threading
from werf import STDOUT_UNIX_SOCK_NAME
class TeePopen(object):
def __init__(self, args, original_popen=None, bufsize=0, **kwargs):
self.returncode = None
if original_popen is not None:
# pipe for stdout back to ansible
self.fd_out_read, self.fd_out_write = self.pipe_cloexec()
self.stdout = os.fdopen(self.fd_out_read, 'rb', bufsize)
self.stdout_back_to_ansible = os.fdopen(self.fd_out_write, 'wb', 0)
# pipe for stderr back to ansible
self.fd_err_read, self.fd_err_write = self.pipe_cloexec()
self.stderr = os.fdopen(self.fd_err_read, 'rb', bufsize)
self.stderr_back_to_ansible = os.fdopen(self.fd_err_write, 'wb', 0)
# unix socket for stdout redirect
self.stdout_live_sock=None
self.stdout_live_sock = self._open_live_sock(STDOUT_UNIX_SOCK_NAME)
# unix socket for stderr redirect
#self.stderr_live_sock = self._open_live_sock(STDERR_UNIX_SOCK_NAME)
self.cmd = original_popen(args, bufsize=bufsize, **kwargs)
self.stdin = self.cmd.stdin
self._splitter = TeeSplitter(
in_a=self.cmd.stdout,
in_b=self.cmd.stderr,
out_a=self.stdout_back_to_ansible,
out_b=self.stderr_back_to_ansible,
out_ab=self.stdout_live_sock,
)
self._splitter.start()
# Periodically call poll to check if cmd is done and close fd_out_write
# and fd_err_write for nonblocking communicate method.
self.poll_thread = threading.Thread(target=self._poll_checker_thread)
self.poll_thread.setDaemon(True)
self.poll_thread.start()
# from suprocess.Popen
def pipe_cloexec(self):
"""Create a pipe with FDs set CLOEXEC."""
# Pipes' FDs are set CLOEXEC by default because we don't want them
# to be inherited by other subprocesses: the CLOEXEC flag is removed
# from the child's FDs by _dup2(), between fork() and exec().
# This is not atomic: we would need the pipe2() syscall for that.
r, w = os.pipe()
self._set_cloexec_flag(r)
self._set_cloexec_flag(w)
return r, w
# from suprocess.Popen
def _set_cloexec_flag(self, fd, cloexec=True):
import fcntl
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def _open_live_sock(self, filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(filename)
except socket.error:
raise
return sock
# poll cmd while not None. close stdout and stderr to unblock communicate method
def _poll_checker_thread(self):
while True:
res = self.poll()
time.sleep(0.1)
if res is not None:
break
def poll(self):
self.returncode = self.cmd.poll()
if self.returncode is not None:
# cmd is finished, so stop splitter thread
self._splitter.stop()
self._splitter.join(1)
# close sockets
self.stdout_live_sock.close()
self.stdout_back_to_ansible.close()
self.stderr_back_to_ansible.close()
return self.returncode
def wait(self):
self.returncode = self.cmd.wait()
return self.returncode
def communicate(self, args):
self.cmd.stdout = self.stdout
self.cmd.stderr = self.stderr
stdout, stderr = self.cmd.communicate(args)
return stdout, stderr
class TeeSplitter(threading.Thread):
def __init__(self, in_a=None, in_b=None, out_a=None, out_b=None, out_ab=None):
self.in_a = in_a
self.in_b = in_b
self.out_a = out_a
self.out_b = out_b
self.out_ab = out_ab
self._stop = False
threading.Thread.__init__(self)
self.setDaemon(True)
def stop(self):
self._stop = True
def run(self):
rpipes = [self.in_a, self.in_b]
while True:
rfds, wfds, efds = select.select(rpipes, [], [], 0.1)
for s in rfds:
data = self._read_from_pipes(rpipes, rfds, s)
if s is self.in_a:
self.write(self.out_a, data)
if s is self.in_b:
self.write(self.out_b, data)
self.write(self.out_ab, data)
if self._stop:
break
if not rpipes:
break
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = ''
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == '':
rpipes.remove(file_descriptor)
return data
def write(self, s, data):
if isinstance(s, socket.socket):
s.sendall(data)
elif isinstance(s, int):
os.write(s, data)
elif isinstance(s, file):
s.write(data)
else:
raise TypeError(type(s))
|
wxagent.py
|
# encoding=utf-8
from multiprocessing.connection import Listener
from threading import Thread
import itchat
from itchat.content import TEXT
import pdb
import json
import re
import urllib
import urllib2
from conf import WECHATGROUP, SDPURL, SDPAPIKEY, OPMURL, OPMAPIKEY, LISTENERHOST, LISTENERPORT, TULINGKEY
address = (LISTENERHOST, LISTENERPORT)
listener = Listener(address)
runing = True
@itchat.msg_register(TEXT, isGroupChat=True)
def simple_reply(msg):
text = msg['Content']
requester = msg['ActualNickName']
replyText = u'I received: {}\n'.format(msg['Content'])
print replyText
if msg['User']['NickName'] in WECHATGROUP:
sdpaddrequest = re.match(r'(?i)(sdp) (add request) (.*)', text)
opmgetdevice = re.match(r'(?i)(opm) (get) (\S*)', text)
if sdpaddrequest:
description = sdpaddrequest.group(3)
postResp = addSdpRequest(requester, description)
resp = json.loads(postResp)
if resp['operation']['result']['status'] == u'Success':
newRequestID = resp['operation']['Details']['WORKORDERID']
replyText = u'{}的SDP工单已添加,工单号是:{}'.format(requester, newRequestID)
else:
replyText = u'{}的SDP工单添加失败,返回信息:\n{}'.format(requester, postResp)
elif opmgetdevice:
deviceName = opmgetdevice.group(3)
postResp = getOpmDevice(deviceName)
resp = json.loads(postResp)
error = resp.get('error', '')
if error:
replyText = error.get('message', '')
else:
displayName = resp.get('displayName', '')
displayCategory = resp.get('displayCategory', '')
respTime = resp.get('responseTime', '')
status = resp.get('statusStr', '')
sysName = resp.get('sysName', '')
typeStr = resp.get('type', '')
managed = resp.get('managed', '')
deviceProtocol = resp.get('deviceProtocol', '')
availdata = resp['availdata']['Up']
defaultDials = u'性能监视器:\n'
for data in resp['defaultDials']:
defaultDials += u'\t\t - {}:{}\n'.format(data.get('displayName', ''), data.get('value', ''))
replyText = u'名称:{}\n类别:{}\n响应时间:{}\n状态:{}\n型号:{}\n类型:{}\n是否被管:{}\n协议:{}\n可用性:{}\n{}\n'.format(displayName, displayCategory, respTime, status, sysName, typeStr, managed, deviceProtocol, availdata, defaultDials)
else:
replyText += json.loads(get_tuling(text)).get('text',u'没啥可说,大哥常来啊!')
return replyText
'''
@itchat.msg_register(TEXT)
def other_reply(msg):
print 'Not group?????'
return u'I received:{}'.format(msg['Content'])
'''
def getOpmDevice(deviceName):
print 'Get in getOpmDevice()'
# url = 'http://192.168.0.96:8060/api/json/device/getDeviceSummary?isFluidic=true&name=192.168.0.18&apiKey=67b287274fb1be2b449f1653508b7669'
url = '{}/api/json/device/getDeviceSummary?isFluidic=true&name={}&apiKey={}'.format(OPMURL, deviceName, OPMAPIKEY)
response = urllib2.urlopen(url)
print ' - Got response'
return response.read().decode('UTF-8')
def addSdpRequest(requester, description):
print 'Get in addSdpRequest()'
# url = 'http://192.168.0.111:8050/sdpapi/request?TECHNICIAN_KEY=5C45F603-DF68-4CC1-BB66-E923670EC2BD'
url = '{}/sdpapi/request?TECHNICIAN_KEY={}'.format(SDPURL, SDPAPIKEY)
subject = u'来自微信的工单:组名:{} 用户名:{}'.format(u'运维告警', requester)
inputdata = u'{"operation": {"details": {"requester": "%s", "subject": "%s", "description": "%s", "requesttemplate": "Unable to browse", "priority": "High", "site": "New York", "group": "Network", "technician": "Howard Stern", "level": "Tier 3", "status": "open", "service": "Email"}}}' % (requester, subject, description)
postdata = {
"format": 'json',
"OPERATION_NAME": 'ADD_REQUEST',
"INPUT_DATA": inputdata.encode('UTF-8')
}
print 'ADD_REQUEST -- %s' % postdata
response = urllib2.urlopen(url=url, data=urllib.urlencode(postdata))
print ' - Got response'
return response.read().decode('UTF-8')
def get_tuling(msg):
apiUrl = 'http://www.tuling123.com/openapi/api'
data = {
'key': TULINGKEY,
'info': msg.encode('UTF-8'),
'userid': 'wechat-robot',
}
response = urllib2.urlopen(url=apiUrl, data=urllib.urlencode(data))
return response.read().decode('UTF-8')
def openConnection():
while runing:
conn = listener.accept()
chatrooms = itchat.get_chatrooms()
groupList = []
print 'connection accepted from', listener.last_accepted
data = conn.recv()
print data
try:
for room in chatrooms:
print room['NickName']
if room['NickName'] in WECHATGROUP:
groupList.append(room)
for group in groupList:
wxresp = itchat.send(data, group['UserName'])
conn.send_bytes('Wechat Response: {}'.format(wxresp['BaseResponse']['ErrMsg'].encode('utf-8')))
except Exception, e:
print e
raise e
finally:
conn.close()
def closeConnection():
global runing
runing = False
listener.close()
socket = Thread(target=openConnection)
socket.daemon = True
socket.start()
itchat.auto_login(hotReload=True, exitCallback=closeConnection)
itchat.run()
|
test_search.py
|
import time
import pdb
import copy
import logging
from multiprocessing import Pool, Process
import pytest
import numpy as np
from pymilvus import DataType
from utils.utils import *
from common.constants import *
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, default_query_vecs = gen_query_vectors(field_name, entities, default_top_k, nq)
default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, default_top_k,
nq)
def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True):
'''
Generate entities and add it in collection
'''
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None):
'''
Generate entities and add it in collection
'''
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_raw_vectors, insert_entities, ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_flat(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_flat_top_k(self, connect, collection, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = 16385
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.tags(CaseLabel.L2)
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
'''
target: test search with different metric_type
method: build index with L2, and search using IP
expected: search ok
'''
search_metric_type = "IP"
index_type = get_simple_index["index_type"]
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, metric_type=search_metric_type,
search_params=search_param)
connect.load_collection(collection)
if index_type == "FLAT":
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
assert res[0]._distances[0] > res[0]._distances[default_top_k - 1]
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_names=[default_tag])
else:
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res) == nq
assert len(res[0]) == top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq, get_simple_index):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors and tag (tag name not existed in collection), check the result
expected: error raised
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_names=["new_tag"])
else:
connect.load_collection(collection)
with pytest.raises(Exception) as e:
connect.search(collection, query, partition_names=["new_tag"])
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_names=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
tag = "tag"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
print(f'entities[-1]["values"][:1]: {entities[-1]["values"][:1]}')
print(f'new_entities[-1]["values"][:1]: {new_entities[-1]["values"][:1]}')
query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param,
replace_vecs=[entities[-1]["values"][:1][0], new_entities[-1]["values"][:1][0]])
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query, partition_names=["(.*)tag"])
assert check_id_result(res[0], ids[0])
assert check_id_result(res[0], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_names=["new(.*)"])
assert not check_id_result(res[0], ids[0])
assert check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] < epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, query, partition_names=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_without_connect(self, dis_connect, collection):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_not_existed(self, connect):
'''
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_l2(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
'''
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, connect, args):
'''
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
@pytest.mark.skip("r0.3-test")
def _test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
"""
target: test search with field, and let return entities less than topk
method: insert entities and build ivf_ index, and search with field, n_probe=1
expected:
"""
entities, ids = init_data(connect, id_collection, auto_id=False)
simple_index = {"index_type": "IVF_FLAT", "params": {"nlist": 200}, "metric_type": "L2"}
connect.create_index(id_collection, field_name, simple_index)
# logging.getLogger().info(connect.get_collection_info(id_collection))
top_k = 300
default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq,
search_params={"nprobe": 1})
expr = {"must": [gen_default_vector_expr(default_query)]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(id_collection)
res = connect.search(id_collection, query, fields=["int64"])
assert len(res) == nq
for r in res[0]:
assert getattr(r.entity, "int64") == getattr(r.entity, "id")
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_no_must(self, connect, collection):
'''
method: build query without must expr
expected: error raised
'''
# entities, ids = init_data(connect, collection)
query = update_query_expr(default_query, keep_old=False)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_term_only(self, connect, collection):
'''
method: build query without vector only term
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_range_only(self, connect, collection):
'''
method: build query without vector only range
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_range_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_vector_only(self, connect, collection):
entities, ids = init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_wrong_format(self, connect, collection):
'''
method: build query without must expr, with wrong expr name
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must1": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_empty(self, connect, collection):
'''
method: search with empty query
expected: error raised
'''
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_value_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_value_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[i for i in range(100000, 100010)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
limit = default_nb // 2
for i in range(nq):
for result in res[i]:
logging.getLogger().info(result.id)
assert result.id in ids[:limit]
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_parts_in(self, connect, collection):
'''
method: build query with vector and term expr, with parts of term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(
values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_repeat(self, connect, collection):
'''
method: build query with vector and term expr, with the same values
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[1 for i in range(1, default_nb)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_value_empty(self, connect, collection):
'''
method: build query with term value empty
expected: return null
'''
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_complex_dsl(self, connect, collection):
'''
method: query with complicated dsl
expected: no error raised
'''
expr = {"must": [
{"must": [{"should": [gen_default_term_expr(values=[1]), gen_default_range_expr()]}]},
{"must": [gen_default_vector_expr(default_query)]}
]}
logging.getLogger().info(expr)
query = update_query_expr(default_query, expr=expr)
logging.getLogger().info(query)
connect.load_collection(collection)
res = connect.search(collection, query)
logging.getLogger().info(res)
"""
******************************************************************
# The following cases are used to build invalid term query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_key_error(self, connect, collection):
'''
method: build query with term key error
expected: Exception raised
'''
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(keyword="terrm", values=[i for i in range(default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_term()
)
def get_invalid_term(self, request):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
'''
method: build query with wrong format term
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
term = get_invalid_term
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_field_named_term(self, connect, collection):
'''
method: build query with field named "term"
expected: error raised
'''
term_fields = add_field_default(default_fields, field_name="term")
collection_term = gen_unique_str("term")
connect.create_collection(collection_term, term_fields)
term_entities = add_field(entities, field_name="term")
ids = connect.insert(collection_term, term_entities).primary_keys
assert len(ids) == default_nb
connect.flush([collection_term])
# count = connect.count_entities(collection_term)
# assert count == default_nb
stats = connect.get_collection_stats(collection_term)
assert stats["row_count"] == default_nb
term_param = {"term": {"term": {"values": [i for i in range(default_nb // 2)]}}}
expr = {"must": [gen_default_vector_expr(default_query),
term_param]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection_term)
res = connect.search(collection_term, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
connect.drop_collection(collection_term)
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields term, one of it not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
term["term"].update({"a": [0]})
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid range query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_key_error(self, connect, collection):
'''
method: build query with range key error
expected: Exception raised
'''
range = gen_default_range_expr(keyword="ranges")
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_range()
)
def get_invalid_range(self, request):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
'''
method: build query with wrong format range
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
range = get_invalid_range
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_string_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: raise Exception
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": "0", "LT": "1000"}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_invalid_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: 0
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": default_nb, "LT": 0}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == 0
@pytest.fixture(
scope="function",
params=gen_valid_ranges()
)
def get_valid_ranges(self, request):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
'''
method: build query with valid ranges
expected: pass
'''
entities, ids = init_data(connect, collection)
ranges = get_valid_ranges
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields ranges, one of fields not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
range = gen_default_range_expr()
range["range"].update({"a": {"GT": 1, "LT": default_nb // 2}})
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
************************************************************************
# The following cases are used to build query expr multi range and term
************************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_term_has_common(self, connect, collection):
'''
method: build query with multi term with same field, and values has common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 3)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_term_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_term_different_fields(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(field="float",
values=[float(i) for i in range(default_nb // 2, default_nb)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_term_multi_fields(self, connect, collection):
'''
method: build query with multi term, different field each term
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = {"int64": {"values": [i for i in range(default_nb // 2)]}}
term_second = {"float": {"values": [float(i) for i in range(default_nb // 2, default_nb)]}}
term = update_term_expr({"term": {}}, [term_first, term_second])
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_range_has_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges has common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": 1, "LT": default_nb // 3})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_range_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_range_different_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = gen_default_range_expr()
range_second = gen_default_range_expr(field="float", ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_range_multi_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = {"int64": {"GT": 0, "LT": default_nb // 2}}
range_second = {"float": {"GT": default_nb / 2, "LT": float(default_nb)}}
range = update_range_expr({"range": {}}, [range_first, range_second])
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build query expr both term and range
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_term_range_has_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": -1, "LT": default_nb // 2})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_term_range_no_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
"""
******************************************************************
# The following cases are used to build multi vectors query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_vectors_same_field(self, connect, collection):
'''
method: build query with two vectors same field
expected: error raised
'''
entities, ids = init_data(connect, collection)
vector1 = default_query
vector2 = gen_query_vectors(field_name, entities, default_top_k, nq=2)
expr = {
"must": [vector1, vector2]
}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchDSLBools(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_no_bool(self, connect, collection):
'''
method: build query without bool expr
expected: error raised
'''
entities, ids = init_data(connect, collection)
expr = {"bool1": {}}
query = expr
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_term(self, connect, collection):
'''
method: build query without must, with should.term instead
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_vector(self, connect, collection):
'''
method: build query without must, with should.vector instead
expected: error raised
'''
expr = {"should": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_must_not_only_term(self, connect, collection):
'''
method: build query without must, with must_not.term instead
expected: error raised
'''
expr = {"must_not": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_must_not_vector(self, connect, collection):
'''
method: build query without must, with must_not.vector instead
expected: error raised
'''
expr = {"must_not": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_must_should(self, connect, collection):
'''
method: build query must, and with should.term
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=True, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to test `search` function
# with invalid collection_name, or invalid query expr
******************************************************************
"""
class TestSearchInvalid(object):
"""
Test search collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_partition(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_collection(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition):
# tag = " "
tag = get_invalid_partition
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, partition_names=tag)
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
fields = [get_invalid_field]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_not_existed_field(self, connect, collection):
fields = [gen_unique_str("field_name")]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
"""
Test search collection with invalid query
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
'''
target: test search function, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query)
"""
Test search collection with invalid search params
"""
@pytest.fixture(
scope="function",
params=gen_invaild_search_params()
)
def get_search_params(self, request):
yield request.param
# 1463
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
search_params = get_search_params
index_type = get_simple_index["index_type"]
if index_type in ["FLAT"]:
# pytest.skip("skip in FLAT index")
pass
if index_type != search_params["index_type"]:
# pytest.skip("skip if index_type not matched")
pass
entities, ids = init_data(connect, collection, nb=1200)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1,
search_params=search_params["search_params"])
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_params_binary(self, connect, binary_collection):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
nq = 1
index_type = "BIN_IVF_FLAT"
int_vectors, entities, ids = init_binary_data(connect, binary_collection)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
connect.create_index(binary_collection, binary_field_name,
{"index_type": index_type, "metric_type": "JACCARD", "params": {"nlist": 128}})
connect.load_collection(binary_collection)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
search_params={"nprobe": 0}, metric_type="JACCARD")
with pytest.raises(Exception) as e:
res = connect.search(binary_collection, query)
# #1464
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
'''
target: test search function, with empty search params
method: search with params
expected: raise an error, and the connection is normal
'''
index_type = get_simple_index["index_type"]
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
if index_type == "FLAT":
# pytest.skip("skip in FLAT index")
pass
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, search_params={})
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_empty_vectors(self, connect, collection):
"""
target: test search function, with empty search vectors
method: search
expected: raise an exception
"""
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq=0)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchWithExpression(object):
@pytest.fixture(
scope="function",
params=[1, 10, 20],
)
def limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_normal_expressions(),
)
def expression(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[
{"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}},
]
)
def index_param(self, request):
return request.param
@pytest.fixture(
scope="function",
)
def search_params(self):
return {"metric_type": "L2", "params": {"nprobe": 10}}
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_expression(self, connect, collection, index_param, search_params, limit, expression):
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.create_index(collection, default_float_vec_field_name, index_param)
connect.load_collection(collection)
nq = 10
query_data = entities[2]["values"][:nq]
res = connect.search_with_expression(collection, query_data, default_float_vec_field_name, search_params,
limit, expression)
assert len(res) == nq
for topk_results in res:
assert len(topk_results) <= limit
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
|
tasks.py
|
import contextlib
import shutil
import threading
import time
from .colors import CYAN, GREEN, RED, YELLOW
from ..utils.threading import ExceptionalThread
UP_ONE = "\033[A\033[1000D"
CLEAR_LINE = "\033[2K"
console_lock = threading.Lock()
class Task:
"""
Something that can be started (by being created), have progress reported, and then finished.
It can also have a number of sub-tasks, and arbitary lines of extra information
that should be shown to the user alongside progress messages or a progress bar.
"""
INDENT_AMOUNT = 2
FLAVOR_NEUTRAL = "neutral"
FLAVOR_GOOD = "good"
FLAVOR_BAD = "bad"
FLAVOR_WARNING = "warning"
def __init__(self, name, parent=None, hide_if_empty=False, collapse_if_finished=False, progress_formatter=None):
self.name = name
# If this task only displays if it has children
self.hide_if_empty = hide_if_empty
# If this task collapses to just the first line if it's finished
self.collapse_if_finished = collapse_if_finished
# Any parent tasks to trigger updates in
self.parent = parent
with console_lock:
if self.parent is not None:
self.parent.subtasks.append(self)
# Sub tasks to show under this one
self.subtasks = []
# The current status message
self.status = None
# The current progress from 0 - 1
self.progress = None
# The way to format the progress numbers
self.progress_formatter = progress_formatter or str
# The current status flavor (turns into a colour)
self.status_flavor = self.FLAVOR_NEUTRAL
# Extra lines of information to show underneath the task
self.extra_info = []
# If the task is complete
self.finished = False
# Number of lines we had previously cleared
self.cleared_lines = 0
# If the output is currently "paused" for other things to write to the console
self.output_paused = False
# Run update
self.update()
def update(self, status=None, status_flavor=None, progress=None, force=False):
"""
Update either the status message, the progress bar, or both.
If this is the topmost task, this will trigger a reprint on the console.
"""
if self.finished and not force:
raise ValueError("You cannot update() a finished task!")
with console_lock:
if status is not None:
self.status = status
if progress is not None:
if len(progress) != 2:
raise ValueError("Progress must be a 2-tuple of (count, total)")
self.progress = progress
if status_flavor is not None:
self.status_flavor = status_flavor
# Look for a parent to potentially trigger update on, or print ourselves
# if there isn't one
if self.parent is not None:
self.parent.update()
else:
self.clear_and_output()
def add_extra_info(self, message):
"""
Adds a line of extra info and triggers updates
"""
with console_lock:
self.extra_info.append(message)
if self.parent is not None:
self.parent.update()
def set_extra_info(self, messages):
"""
Sets all extra info and triggers updates
"""
with console_lock:
self.extra_info = messages
if self.parent is not None:
self.parent.update()
def finish(self, **kwargs):
"""
Marks the task as finished, meaning it can no longer be mutated.
Used to optimise terminal output only.
"""
self.finished = True
self.update(force=True, **kwargs)
def wrapped_extra_info(self, text_width):
"""
Returns extra_info wrapped to fit the terminal width.
"""
actual_output = []
for line in self.extra_info:
line = line.strip()
while line:
actual_output.append(line[:text_width])
line = line[text_width:]
return actual_output
def make_progress_bar(self, count, total, width=30):
"""
Helper for making progress bar text.
"""
progress = min(max(count / total, 0), 1)
bar_width = width - 2
bar_size = int(bar_width * progress)
return "[{}{}] {}/{}".format(
"=" * bar_size,
" " * (bar_width - bar_size),
self.progress_formatter(count),
self.progress_formatter(total),
)
def output(self, terminal_width, indent=0):
"""
Returns the lines to output for this task to the screen (as a generator)
"""
if self.hide_if_empty and not self.subtasks:
return
# Work out progress text
progress_string = ""
if self.progress is not None:
progress_string = self.make_progress_bar(*self.progress) + " "
# Work out status text
status_string = self.status or ""
if self.status_flavor == self.FLAVOR_BAD:
status_string = RED(status_string)
elif self.status_flavor == self.FLAVOR_GOOD:
status_string = GREEN(status_string)
elif self.status_flavor == self.FLAVOR_WARNING:
status_string = YELLOW(status_string)
# Print out our line
indent_string = " " * (self.INDENT_AMOUNT * indent)
main_line = "{}{}: {}{}".format(
indent_string,
CYAN(self.name),
progress_string,
status_string,
)
if indent > 0:
yield main_line
if not (self.finished and self.collapse_if_finished):
# Print out extra info
indent_string = (indent + 1) * (" " * self.INDENT_AMOUNT)
for info in self.wrapped_extra_info(terminal_width - len(indent_string)):
yield indent_string + info[:terminal_width - len(indent_string)].replace("\n", "")
# Print out subtasks
for subtask in self.subtasks:
yield from subtask.output(terminal_width, indent=indent + 1)
if indent == 0:
yield main_line
def clear_and_output(self):
"""
Clears the terminal up to the right line then outputs the information
of the task.
"""
# See if output is paused
if self.output_paused:
return
# OK, print
with console_lock:
# Get terminal width
terminal_width = shutil.get_terminal_size((80, 20)).columns
# Get the output we need to print
output = list(self.output(terminal_width))
# Scroll the terminal down/up enough for any new lines
needed_lines = len(output)
new_lines = needed_lines - self.cleared_lines
if new_lines > 0:
print("\n" * new_lines, flush=True, end="")
elif new_lines < 0:
print(
(UP_ONE + CLEAR_LINE) * abs(new_lines),
flush=True,
end="",
)
self.cleared_lines = needed_lines
# Move cursor to top of cleared section
print(
(UP_ONE + CLEAR_LINE) * needed_lines,
flush=True,
end="",
)
for line in output:
print(line)
def _pause_output(self, pause=True):
"""
Allows the output to be paused and unpaused by finding the parent and doing it there.
"""
if self.parent is None:
self.output_paused = pause
if not pause:
# Make the output rewrite from where it is
self.cleared_lines = 0
self.clear_and_output()
else:
self.parent._pause_output(pause)
@contextlib.contextmanager
def paused_output(self):
"""
Context manager that pauses printing of output until it's exited.
"""
self._pause_output(True)
yield
self._pause_output(False)
@contextlib.contextmanager
def rate_limit(self, interval=0.1):
"""
Context manager that rate-limits updates on tasks
"""
buffered_changes = {"running": True}
# Thread loop that flushes every interval
def flusher():
while buffered_changes['running']:
# Do any extra_info calls
if "set_extra_info" in buffered_changes:
self.set_extra_info(buffered_changes['set_extra_info'])
del buffered_changes['set_extra_info']
# Do any update calls
if "update" in buffered_changes:
self.update(**buffered_changes['update'])
del buffered_changes['update']
# Sleep
time.sleep(interval)
# Fake task object to provide out
class BufferedTask(object):
def set_extra_info(self, extra_info):
self.buffered_changes['set_extra_info'] = extra_info
def update(self, **kwargs):
self.buffered_changes['update'] = kwargs
# Start thread that flushes every interval
flush_thread = ExceptionalThread(target=flusher, daemon=True)
flush_thread.start()
# Run inner code
yield BufferedTask()
# Do one more flush and exit
buffered_changes['running'] = False
flush_thread.join()
class RootTask(Task):
"""
Special task subclass that represents the "root" task, the instance that
has no output of its own but encapsulates all other tasks in the app in order.
"""
def __init__(self):
super(RootTask, self).__init__("__root__")
def output(self, terminal_width):
for subtask in self.subtasks:
yield from subtask.output(terminal_width, indent=0)
|
train_pg.py
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
x = input_placeholder
while n_layers > 0:
x = tf.layers.dense(x, size)
x = activation(x)
n_layers-=1
x = tf.layers.dense(x, output_size, activation = output_activation)
return x
def pathlength(path):
return len(path["reward"])
def norm(values, mean, std):
std_away = (values - np.mean(values))/(np.std(values) + 1e-8)
return mean + std * std_away
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
_lambda=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape = [None], name = "adv", dtype = tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, ac_dim, "policy", n_layers = n_layers, size = size)
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis = [1])
sy_logprob_n = - tf.nn.sparse_softmax_cross_entropy_with_logits(labels = sy_ac_na, logits = sy_logits_na)
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, ac_dim, "policy", n_layers = n_layers, size = size)
sy_logstd = tf.Variable(tf.zeros([1, ac_dim], name = 'logstd'))
sy_std = tf.exp(sy_logstd)
sy_z_sampled = tf.random_normal(tf.shape(sy_mean))
sy_sampled_ac = sy_mean + sy_std * sy_z_sampled
sy_z = (sy_ac_na - sy_mean)/sy_std
sy_logprob_n = - 0.5 * tf.reduce_sum(tf.square(sy_z), axis = 1)
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
loss = -tf.reduce_mean(tf.multiply(sy_logprob_n, sy_adv_n)) # Loss function that we'll differentiate to get the policy gradient.
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
bl_n = tf.placeholder(shape = [None], name = 'bl_n', dtype = tf.float32)
bl_loss = tf.nn.l2_loss(baseline_prediction - bl_n)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(bl_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
if reward_to_go:
q_n = []
for path in paths:
q = np.zeros(pathlength(path))
q[-1] = path['reward'][-1]
for i in reversed(range(pathlength(path) - 1)):
q[i] = path['reward'][i] + gamma * q[i+1]
q_n.extend(q)
else:
q_n = []
for path in paths:
ret_tau = 0
for i in range(pathlength(path)):
ret_tau += (gamma ** i) * path['reward'][i]
q = np.ones(shape = [pathlength(path)]) * ret_tau
q_n.extend(q)
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = norm(sess.run(baseline_prediction, feed_dict = {sy_ob_no: ob_no}), np.mean(q_n), np.std(q_n))
# Implementation of GAE
adv_n = []
for path in paths:
adv = np.zeros(pathlength(path))
adv[-1] = path['reward'][-1] - b_n[-1]
for i in reversed(range(pathlength(path) - 1)):
delta = path['reward'][i] + gamma * b_n[i + 1] - b_n[i]
adv[i] = delta + gamma * _lambda * adv[i+1]
if not reward_to_go:
adv = np.ones(size = [pathlength(path)]) * adv[0]
adv_n.extend(adv)
q_n = adv_n + b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n = norm(adv_n, 0, 1)
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
bl_true = norm(q_n, 0, 1)
_ = sess.run(baseline_update_op, feed_dict = {bl_n : bl_true, sy_ob_no : ob_no})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
_, after_loss = sess.run([update_op, loss],feed_dict = {sy_ob_no : ob_no, sy_ac_na : ac_na, sy_adv_n : adv_n})
print(after_loss.shape)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.log_tabular("After-Loss", after_loss)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--lambda_', type = float, default = 1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
_lambda = args.lambda_,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
import importlib.machinery
import importlib.util
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned a result '
br'with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
class Test_ModuleStateAccess(unittest.TestCase):
"""Test db to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
if __name__ == "__main__":
unittest.main()
|
server.py
|
from flask import Flask, render_template
from flask_socketio import SocketIO
import dronekit
import sys
import socket
import threading
import time
import signal
# Allow us to reuse sockets after the are bound.
# http://stackoverflow.com/questions/25535975/release-python-flask-port-when-script-is-terminated
socket.socket._bind = socket.socket.bind
def my_socket_bind(self, *args, **kwargs):
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return socket.socket._bind(self, *args, **kwargs)
socket.socket.bind = my_socket_bind
# okay, now that that's done...
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
@app.route('/')
def index():
return render_template('index.html')
def latlog(vehicle):
while True:
time.sleep(.5)
loc = vehicle.location.global_frame
if loc:
socketio.emit('location', {
"altitude": loc.alt,
"longitude": loc.lon,
"latitude": loc.lat,
})
else:
socket.emit('location', None)
if __name__ == '__main__':
target = sys.argv[1] if len(sys.argv) >= 2 else '127.0.0.1:14550'
print 'Connecting to ' + target + '...'
vehicle = dronekit.connect(target)
vehiclethread = threading.Thread(target=latlog, args=(vehicle,))
vehiclethread.start()
socketio.run(app, host="0.0.0.0", port=8080)
|
test_local_task_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import signal
import time
import uuid
from datetime import timedelta
from multiprocessing import Lock, Value
from unittest import mock
from unittest.mock import patch
import pytest
from parameterized import parameterized
from airflow import settings
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.test_utils import db
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
@pytest.fixture
def clear_db():
db.clear_db_dags()
db.clear_db_jobs()
db.clear_db_runs()
db.clear_db_task_fail()
yield
@pytest.fixture(scope='class')
def clear_db_class():
yield
db.clear_db_dags()
db.clear_db_jobs()
db.clear_db_runs()
db.clear_db_task_fail()
@pytest.mark.usefixtures('clear_db_class', 'clear_db')
class TestLocalTaskJob:
@pytest.fixture(autouse=True)
def set_instance_attrs(self):
with patch('airflow.jobs.base_job.sleep') as self.mock_base_job_sleep:
yield
def validate_ti_states(self, dag_run, ti_state_mapping, error_message):
for task_id, expected_state in ti_state_mapping.items():
task_instance = dag_run.get_task_instance(task_id=task_id)
task_instance.refresh_from_db()
assert task_instance.state == expected_state, error_message
def test_localtaskjob_essential_attr(self, dag_maker):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
with dag_maker(
'test_localtaskjob_essential_attr', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
):
op1 = DummyOperator(task_id='op1')
dr = dag_maker.dag_run
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
def test_localtaskjob_heartbeat(self, dag_maker):
session = settings.Session()
with dag_maker('test_localtaskjob_heartbeat'):
op1 = DummyOperator(task_id='op1')
dr = dag_maker.dag_run
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
with pytest.raises(AirflowException):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
assert ti.pid != os.getpid()
job1.heartbeat_callback(session=None)
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback()
@mock.patch('airflow.jobs.local_task_job.psutil')
def test_localtaskjob_heartbeat_with_run_as_user(self, psutil_mock, dag_maker):
session = settings.Session()
with dag_maker('test_localtaskjob_heartbeat'):
op1 = DummyOperator(task_id='op1', run_as_user='myuser')
dr = dag_maker.dag_run
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert ti.run_as_user
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
@conf_vars({('core', 'default_impersonation'): 'testuser'})
@mock.patch('airflow.jobs.local_task_job.psutil')
def test_localtaskjob_heartbeat_with_default_impersonation(self, psutil_mock, dag_maker):
session = settings.Session()
with dag_maker('test_localtaskjob_heartbeat'):
op1 = DummyOperator(task_id='op1')
dr = dag_maker.dag_run
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert job1.task_runner.run_as_user == 'testuser'
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
def test_heartbeat_failed_fast(self):
"""
Test that task heartbeat will sleep when it fails fast
"""
self.mock_base_job_sleep.side_effect = time.sleep
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.5
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
assert State.RUNNING == ti.state
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join()
assert not process.is_alive()
ti.refresh_from_db()
assert State.SUCCESS == ti.state
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
assert mock_start.call_count == 1
assert mock_ret_code.call_count == 2
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
assert time_end - time_start < job1.heartrate
session.close()
def test_mark_failure_on_failure_callback(self, dag_maker):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
def check_failure(context):
with failure_callback_called.get_lock():
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
assert context['exception'] == "task marked as failed externally"
def task_function(ti):
with create_session() as session:
assert State.RUNNING == ti.state
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(10)
# This should not happen -- the state change should be noticed and the task should get killed
with task_terminated_externally.get_lock():
task_terminated_externally.value = 0
with dag_maker("test_mark_failure", start_date=DEFAULT_DATE):
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@patch('airflow.utils.process_utils.subprocess.check_call')
@patch.object(StandardTaskRunner, 'return_code')
def test_failure_callback_only_called_once(self, mock_return_code, _check_call, dag_maker):
"""
Test that ensures that when a task exits with failure by itself,
failure callback is only called once
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
callback_count_lock = Lock()
def failure_callback(context):
with callback_count_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_failure_callback_race'
assert isinstance(context['exception'], AirflowFailException)
def task_function(ti):
raise AirflowFailException()
with dag_maker("test_failure_callback_race"):
task = PythonOperator(
task_id='test_exit_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Simulate race condition where job1 heartbeat ran right after task
# state got set to failed by ti.handle_failure but before task process
# fully exits. See _execute loop in airflow/jobs/local_task_job.py.
# In this case, we have:
# * task_runner.return_code() is None
# * ti.state == State.Failed
#
# We also need to set return_code to a valid int after job1.terminating
# is set to True so _execute loop won't loop forever.
def dummy_return_code(*args, **kwargs):
return None if not job1.terminating else -9
mock_return_code.side_effect = dummy_return_code
with timeout(10):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
assert failure_callback_called.value == 1
def test_mark_success_on_success_callback(self, dag_maker):
"""
Test that ensures that where a task is marked success in the UI
on_success_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
success_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def success_callback(context):
with shared_mem_lock:
success_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_success'
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_mark_success', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}):
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_success_callback=success_callback,
)
session = settings.Session()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
ti.refresh_from_db()
process.join()
assert success_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
def test_task_sigkill_calls_on_failure_callback(self, dag_maker):
"""
Test that ensures that when a task is killed with sigkill
on_failure_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def failure_callback(context):
with shared_mem_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_send_sigkill'
def task_function(ti):
os.kill(os.getpid(), signal.SIGKILL)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_send_sigkill'):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
time.sleep(0.3)
process.join()
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
def test_process_sigterm_calls_on_failure_callback(self, dag_maker):
"""
Test that ensures that when a task runner is killed with sigterm
on_failure_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def failure_callback(context):
with shared_mem_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_mark_failure', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
os.kill(process.pid, signal.SIGTERM)
ti.refresh_from_db()
process.join()
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
@parameterized.expand(
[
(
{('scheduler', 'schedule_after_task_execution'): 'True'},
{'A': 'B', 'B': 'C'},
{'A': State.QUEUED, 'B': State.NONE, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.SCHEDULED, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.SUCCESS, 'C': State.SCHEDULED},
"A -> B -> C, with fast-follow ON when A runs, B should be QUEUED. Same for B and C.",
),
(
{('scheduler', 'schedule_after_task_execution'): 'False'},
{'A': 'B', 'B': 'C'},
{'A': State.QUEUED, 'B': State.NONE, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.NONE, 'C': State.NONE},
None,
"A -> B -> C, with fast-follow OFF, when A runs, B shouldn't be QUEUED.",
),
(
{('scheduler', 'schedule_after_task_execution'): 'True'},
{'A': 'B', 'C': 'B', 'D': 'C'},
{'A': State.QUEUED, 'B': State.NONE, 'C': State.NONE, 'D': State.NONE},
{'A': State.SUCCESS, 'B': State.NONE, 'C': State.NONE, 'D': State.NONE},
None,
"D -> C -> B & A -> B, when A runs but C isn't QUEUED yet, B shouldn't be QUEUED.",
),
(
{('scheduler', 'schedule_after_task_execution'): 'True'},
{'A': 'C', 'B': 'C'},
{'A': State.QUEUED, 'B': State.FAILED, 'C': State.NONE},
{'A': State.SUCCESS, 'B': State.FAILED, 'C': State.UPSTREAM_FAILED},
None,
"A -> C & B -> C, when A is QUEUED but B has FAILED, C is marked UPSTREAM_FAILED.",
),
]
)
def test_fast_follow(
self, conf, dependencies, init_state, first_run_state, second_run_state, error_message
):
with conf_vars(conf):
session = settings.Session()
dag = DAG('test_dagrun_fast_follow', start_date=DEFAULT_DATE)
dag_model = DagModel(
dag_id=dag.dag_id,
next_dagrun=dag.start_date,
is_active=True,
)
session.add(dag_model)
session.flush()
python_callable = lambda: True
with dag:
task_a = PythonOperator(task_id='A', python_callable=python_callable)
task_b = PythonOperator(task_id='B', python_callable=python_callable)
task_c = PythonOperator(task_id='C', python_callable=python_callable)
if 'D' in init_state:
task_d = PythonOperator(task_id='D', python_callable=python_callable)
for upstream, downstream in dependencies.items():
dag.set_dependency(upstream, downstream)
scheduler_job = SchedulerJob(subdir=os.devnull)
scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
dag_run = dag.create_dagrun(run_id='test_dagrun_fast_follow', state=State.RUNNING)
task_instance_a = TaskInstance(task_a, dag_run.execution_date, init_state['A'])
task_instance_b = TaskInstance(task_b, dag_run.execution_date, init_state['B'])
task_instance_c = TaskInstance(task_c, dag_run.execution_date, init_state['C'])
if 'D' in init_state:
task_instance_d = TaskInstance(task_d, dag_run.execution_date, init_state['D'])
session.merge(task_instance_d)
session.merge(task_instance_a)
session.merge(task_instance_b)
session.merge(task_instance_c)
session.flush()
job1 = LocalTaskJob(
task_instance=task_instance_a, ignore_ti_state=True, executor=SequentialExecutor()
)
job1.task_runner = StandardTaskRunner(job1)
job2 = LocalTaskJob(
task_instance=task_instance_b, ignore_ti_state=True, executor=SequentialExecutor()
)
job2.task_runner = StandardTaskRunner(job2)
settings.engine.dispose()
job1.run()
self.validate_ti_states(dag_run, first_run_state, error_message)
if second_run_state:
job2.run()
self.validate_ti_states(dag_run, second_run_state, error_message)
if scheduler_job.processor_agent:
scheduler_job.processor_agent.end()
def test_task_sigkill_works_with_retries(self, dag_maker):
"""
Test that ensures that tasks are retried when they receive sigkill
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
retry_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def retry_callback(context):
with shared_mem_lock:
retry_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure_2'
def task_function(ti):
os.kill(os.getpid(), signal.SIGKILL)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(
dag_id='test_mark_failure_2', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
retries=1,
retry_delay=timedelta(seconds=2),
on_retry_callback=retry_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.start()
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
time.sleep(0.4)
process.join()
ti.refresh_from_db()
assert ti.state == State.UP_FOR_RETRY
assert retry_callback_called.value == 1
assert task_terminated_externally.value == 1
def test_process_sigterm_works_with_retries(self, dag_maker):
"""
Test that ensures that task runner sets tasks to retry when they(task runner)
receive sigterm
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
retry_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def retry_callback(context):
with shared_mem_lock:
retry_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure_2'
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
with dag_maker(dag_id='test_mark_failure_2'):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
retries=1,
retry_delay=timedelta(seconds=2),
on_retry_callback=retry_callback,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.start()
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING and ti.pid is not None:
break
time.sleep(0.2)
os.kill(process.pid, signal.SIGTERM)
process.join()
ti.refresh_from_db()
assert ti.state == State.UP_FOR_RETRY
assert retry_callback_called.value == 1
assert task_terminated_externally.value == 1
def test_task_exit_should_update_state_of_finished_dagruns_with_dag_paused(self):
"""Test that with DAG paused, DagRun state will update when the tasks finishes the run"""
dag = DAG(dag_id='test_dags', start_date=DEFAULT_DATE)
op1 = PythonOperator(task_id='dummy', dag=dag, owner='airflow', python_callable=lambda: True)
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=dag.following_schedule(DEFAULT_DATE),
is_active=True,
is_paused=True,
)
session.add(orm_dag)
session.flush()
# Write Dag to DB
dagbag = DagBag(dag_folder="/dev/null", include_examples=False, read_dags_from_db=False)
dagbag.bag_dag(dag, root_dag=dag)
dagbag.sync_to_db()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
assert dr.state == State.RUNNING
ti = TaskInstance(op1, dr.execution_date)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.run()
session.add(dr)
session.refresh(dr)
assert dr.state == State.SUCCESS
@pytest.fixture()
def clean_db_helper():
yield
db.clear_db_jobs()
db.clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, execution_date=DEFAULT_DATE, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(18):
job.run()
|
echo_02_threads.py
|
# echo_02_threads.py
import socket
import threading
def run_server(host='127.0.0.1', port=55555):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen()
while True:
client_sock, addr = sock.accept()
print('Connection from', addr)
thread = threading.Thread(target=handle_client, args=[client_sock])
thread.start()
def handle_client(sock):
while True:
received_data = sock.recv(4096)
if not received_data:
break
sock.sendall(received_data)
print('Client disconnected:', sock.getpeername())
sock.close()
if __name__ == '__main__':
run_server()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.