source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
coro.py
|
'''
Async/Coroutine related utilities.
'''
import os
import queue
import atexit
import asyncio
import inspect
import logging
import functools
import multiprocessing
import concurrent.futures
logger = logging.getLogger(__name__)
import synapse.exc as s_exc
import synapse.glob as s_glob
import synapse.common as s_common
def iscoro(item):
return inspect.iscoroutine(item)
async def agen(item):
'''
Wrap an async_generator *or* generator in an async_generator.
Notes:
Do not use this for a synchronous generator which would cause
non-blocking IO; otherwise that IO will block the ioloop.
'''
if getattr(item, '__aiter__', None) is not None:
async for x in item:
yield x
return
for x in item:
yield x
def executor(func, *args, **kwargs):
'''
Execute a non-coroutine function in the ioloop executor pool.
Args:
func: Function to execute.
*args: Args for the function.
**kwargs: Kwargs for the function.
Examples:
Execute a blocking API call in the executor pool::
import requests
def block(url, params=None):
return requests.get(url, params=params).json()
fut = s_coro.executor(block, 'http://some.tld/thign')
resp = await fut
Returns:
asyncio.Future: An asyncio future.
'''
def real():
return func(*args, **kwargs)
return asyncio.get_running_loop().run_in_executor(None, real)
class Event(asyncio.Event):
async def timewait(self, timeout=None):
if timeout is None:
await self.wait()
return True
try:
await asyncio.wait_for(self.wait(), timeout)
except asyncio.TimeoutError:
return False
return True
async def event_wait(event: asyncio.Event, timeout=None):
'''
Wait on an an asyncio event with an optional timeout
Returns:
true if the event got set, False if timed out
'''
if timeout is None:
await event.wait()
return True
try:
await asyncio.wait_for(event.wait(), timeout)
except asyncio.TimeoutError:
return False
return True
async def waittask(task, timeout=None):
'''
Await a task without cancelling it when you time out.
Returns:
boolean: True if the task completed before the timeout.
'''
futu = asyncio.get_running_loop().create_future()
task.add_done_callback(futu.set_result)
try:
await asyncio.wait_for(futu, timeout=timeout)
return True
except asyncio.TimeoutError:
return False
finally:
task.remove_done_callback(futu.set_result)
async def ornot(func, *args, **kwargs):
'''
Calls func and awaits it if a returns a coroutine.
Note:
This is useful for implementing a function that might take a telepath proxy object or a local object, and you
must call a non-async method on that object.
This is also useful when calling a callback that might either be a coroutine function or a regular function.
Usage:
ok = await s_coro.ornot(maybeproxy.allowed, 'path')
'''
retn = func(*args, **kwargs)
if iscoro(retn):
return await retn
return retn
class GenrHelp:
def __init__(self, genr):
assert genr is not None
self.genr = genr
def __aiter__(self):
return self.genr
def __iter__(self):
try:
while True:
item = s_glob.sync(self.genr.__anext__())
yield item
except StopAsyncIteration:
return
except GeneratorExit:
# Raised if a synchronous consumer exited an iterator early.
# Signal the generator to close down.
s_glob.sync(self.genr.aclose())
raise
async def spin(self):
async for x in self.genr:
pass
async def list(self):
return [x async for x in self.genr]
def genrhelp(f):
@functools.wraps(f)
def func(*args, **kwargs):
return GenrHelp(f(*args, **kwargs))
return func
def _exectodo(que, todo):
func, args, kwargs = todo
try:
que.put(func(*args, **kwargs))
except Exception as e:
que.put(e)
async def spawn(todo, timeout=None, ctx=None):
'''
Run a todo (func, args, kwargs) tuple in a multiprocessing subprocess.
Args:
todo (tuple): A tuple of function, ``*args``, and ``**kwargs``.
timeout (int): The timeout to wait for the todo function to finish.
ctx (multiprocess.Context): A optional multiprocessing context object.
Notes:
The contents of the todo tuple must be able to be pickled for execution.
This means that locally bound functions are not eligible targets for spawn.
Returns:
The return value of executing the todo function.
'''
if ctx is None:
ctx = multiprocessing.get_context('spawn')
que = ctx.Queue()
proc = ctx.Process(target=_exectodo, args=(que, todo))
def execspawn():
proc.start()
while True:
try:
# we have to block/wait on the queue because the sender
# could need to stream the return value in multiple chunks
retn = que.get(timeout=1)
# now that we've retrieved the response, it should have exited.
proc.join()
return retn
except queue.Empty:
if not proc.is_alive():
proc.join()
raise s_exc.SpawnExit(code=proc.exitcode)
try:
coro = executor(execspawn)
retn = await asyncio.wait_for(coro, timeout=timeout)
if isinstance(retn, Exception):
raise retn
return retn
except (asyncio.CancelledError, asyncio.TimeoutError):
proc.terminate()
raise
# shared process pool
forkpool = None
if multiprocessing.current_process().name == 'MainProcess':
# only create the forkpool in the MainProcess...
try:
mpctx = multiprocessing.get_context('forkserver')
max_workers = int(os.getenv('SYN_FORKED_WORKERS', 1))
forkpool = concurrent.futures.ProcessPoolExecutor(mp_context=mpctx, max_workers=max_workers)
atexit.register(forkpool.shutdown)
except OSError as e: # pragma: no cover
logger.warning(f'Failed to init forkserver pool, fallback enabled: {e}', exc_info=True)
def set_pool_logging(logger_, logconf):
# This must be called before any calls to forked()
todo = s_common.todo(s_common.setlogging, logger_, **logconf)
if forkpool is not None:
forkpool._initializer = _runtodo
forkpool._initargs = (todo,)
def _runtodo(todo):
return todo[0](*todo[1], **todo[2])
async def forked(func, *args, **kwargs):
'''
Execute a target function in the forked process pool.
Args:
func: The target function.
*args: Function positional arguments.
**kwargs: Function keyword arguments.
Returns:
The target function return.
Raises:
The function may raise from the target function, or raise a s_exc.FatalErr in the event of a broken forked
process pool. The fatalerr represents a unrecoverable application state.
'''
todo = (func, args, kwargs)
try:
return await asyncio.get_running_loop().run_in_executor(forkpool, _runtodo, todo)
except concurrent.futures.process.BrokenProcessPool as e:
logger.exception(f'Fatal error executing forked task: {func} {args} {kwargs}')
raise s_exc.FatalErr(mesg=f'Fatal error encountered: {e}') from None
|
packeter_single_large.py
|
#!/usr/bin/env python
"""
Send a single large packet over a single connection.
@author: David Siroky (siroky@dasir.cz)
@license: MIT License (see LICENSE.txt or
U{http://www.opensource.org/licenses/mit-license.php})
"""
import time
import logging
import sys
from multiprocessing import Process
sys.path.insert(0, "../..")
import snakemq
import snakemq.link
import snakemq.packeter
###########################################################################
DATA_SIZE = 300 * 1024 * 1024
PORT = 4000
###########################################################################
def srv():
s = snakemq.link.Link()
container = {"start_time": None}
def on_connect(conn_id):
container["start_time"] = time.time()
def on_packet_recv(conn_id, packet):
assert len(packet) == DATA_SIZE
diff = time.time() - container["start_time"]
print "flow: %.02f MBps" % (DATA_SIZE / diff / 1024**2)
def on_disconnect(conn_id):
s.stop()
s.add_listener(("", PORT))
tr = snakemq.packeter.Packeter(s)
tr.on_connect = on_connect
tr.on_packet_recv = on_packet_recv
tr.on_disconnect = on_disconnect
s.loop()
s.cleanup()
###########################################################################
def cli():
s = snakemq.link.Link()
def on_connect(conn_id):
tr.send_packet(conn_id, "x" * DATA_SIZE)
def on_packet_sent(conn_id, packet_id):
s.stop()
s.add_connector(("localhost", PORT))
tr = snakemq.packeter.Packeter(s)
tr.on_connect = on_connect
tr.on_packet_sent = on_packet_sent
s.loop()
s.cleanup()
###########################################################################
# avoid logging overhead
logger = logging.getLogger("snakemq")
logger.setLevel(logging.ERROR)
thr_srv = Process(target=srv)
thr_srv.start()
thr_cli = Process(target=cli)
thr_cli.start()
thr_srv.join()
thr_cli.join()
|
aniplot_TkinterControl.py
|
'''
datecreated: 190930
objective: want to use opencv to make some kind of animated plotting tool.
KJG190930: using cv2 is MUCH MUCH faster, will use this instead of matplotlib
KJG190930: at this point, will use tkinter to try and control the rectangle
KJG191001: tkinter now functional, with multi-key input. now capable of having
high-quality graphing window along with manual input control
applications:
* animated plot
* live updating
* user / computer controlled animation
* computer controlled demo
THINGS TO IMPLEMENT
status | description
done | plot fast-updating (60Hz+) plot area
done | have a rotating rectangle
done | use polygons instead of "rect", in custom function
done | be able to control item with keyboard (one key)
'''
import threading # handling two different sequences at once (getting frames, displaying them)
import tkinter as tk # keyboard control
import cv2
import time
import numpy as np
RED = (0,0,255) # for use with opencv (BGR)
BLU = (255,0,0)
GRN = (0,255,0)
WHT = (255,255,255)
BLK = (0,0,0)
CVFONT = cv2.FONT_HERSHEY_SIMPLEX
IMW=400
IMH=300
def qs(img,title='CLOSE WITH KEYBOARD'):
cv2.imshow(title,img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def rect2(img, center,dims,angle, color,*kargs):
''' general steps:
1. take in parameters
2. rotate rectangle centered at origin
3. translate to given spot.
'''
xc,yc=center
w,h=dims
theta = np.radians(angle)
c,s=np.cos(theta),np.sin(theta)
R=np.array([
[c,-s,0],
[s,c,0],
[0,0,1]]) # 3x3
pts=np.array([
[-w,h,1],
[w,h,1],
[w,-h,1],
[-w,-h,1],
[-w,h,1] ])/2
# rotate points
pts2=pts@R
pts2[:,0]+=xc
pts2[:,1]+=yc
pts2=pts2[:,:2].reshape((-1,1,2)).astype(int)
cv2.polylines(img,[pts2],True,RED)
class Timer:
def __init__(self):
self.t0=time.time() # start time
self._lap = time.time()
def now(self):
''' return time since start of program '''
return time.time()-self.t0
def lap(self):
''' get lap time and reset timer '''
elapsed = time.time() - self._lap
self._lap = time.time()
return elapsed
class Global:
def __init__(self):
self.var=0
gvar = Global()
class KBControl_r0:
def __init__(self):
''' user note: tkinter should only be used in main thread and has issues
working with threading module. do not put this class in separate
thread
src: https://stackoverflow.com/questions/45799121/runtimeerror-calling-tcl-from-different-appartment-tkinter-and-threading
'''
self.R = tk.Tk()
self.F = tk.Frame(self.R, width=100, height=100)
self.F.bind('a',self.leftKey)
self.F.bind('d',self.rightKey)
self.F.bind('q',self.quit)
self.F.focus_set()
self.F.pack()
self.var_dir=tk.IntVar()
def getstatus(self):
print('value:',self.var_dir.get()) # may simplify later
def leftKey(self,event):
self.var_dir.set(0)
gvar.var = 0
def rightKey(self,event):
self.var_dir.set(1)
gvar.var = 1
def quit(self,event):
self.R.quit()
def run(self):
self.R.mainloop()
class KBControl_r1:
def __init__(self):
self.R = tk.Tk()
self.V = tk.StringVar()
self.V.set('0') # initial value
self.a_label = tk.Label(self.R,textvariable = self.V ).pack() # create label object
self.history = [] # create empty list
self.v_dir = ''
self.F = tk.Frame(self.R, width=200, height=200) #create self.F in main window
self.F.bind("<KeyPress>", self.keydown) # bind "keydown" fn to keyPRESS
self.F.bind("<KeyRelease>", self.keyup) # bind "keyup" fn to keyRELEASE
self.F.bind('q',self.quit)
self.F.pack() # activate self.F
self.F.focus_set() # set self.F in focus
def keyup(self,e):
# print e.char # when a key is un-pressed, print to screen
if e.char in self.history :
self.history.pop(self.history.index(e.char)) #remove it from the list
# NOTE: LIST IS NOW UPDATED
self.v_dir = self.direction(self.history)
gvar.var = self.v_dir
self.V.set(self.v_dir) # convert current state of history into string
# here, would send the updated command to the serial port.
def keydown(self,e):
if not e.char in self.history : # if key isn't alrdy in list...
self.history.append(e.char) # add key to END(!) of list
# NOTE: LIST IS NOW UPDATED
self.v_dir = self.direction(self.history)
gvar.var = self.v_dir
self.V.set(self.v_dir) # convert current state of list into string
# here, would send updated command to the serial port
def direction(self,e):
''' Take in list of currently pressed keys, return direction. General
steps:
1. receive list
2. check if list has more than two elements
3. check which two elements active
4. return direction
NOTE: keypad:
1 2 3
4 5 6
7 8 9
0 '''
if(len(e)==1):
# only one button pressed
if('w' in e):
return '2' # NORTH
elif('a' in e):
return '4' # WEST
elif('s' in e):
return '8' # SOUTH
elif('d' in e):
return '6' # EAST
else:
return '0'
elif(len(e)==2):
if('w' in e and 'a' in e):
return '1' # NWEST
elif('w' in e and 'd' in e):
return '3' # NEAST
elif('s' in e and 'a' in e):
return '7' # SWEST
elif('s' in e and 'd' in e):
return '9' # SEAST
else:
return '0'
else:
return '0'
def quit(self,e):
self.R.quit()
def run(self):
self.R.mainloop() # activate whole program
class DisplayWindow:
''' should be capable of putting everything into a thread '''
def __init__(self):
self.xc=IMW/2
self.yc=IMH/2
self.w=50
self.h=100
def run(self):
while(True):
lap = timer.lap()
bkgd = np.ones((IMH,IMW,3))*255 # follows image format
cv2.putText(bkgd,str(round(lap,3)),(50,30),CVFONT,1,BLU)
cv2.putText(bkgd,str(round(timer.now(),3)),(50,60),CVFONT,1,BLU)
cv2.putText(bkgd,str(gvar.var),(50,90),CVFONT,1,BLU)
cv2.circle(bkgd,(int(IMW/2),int(IMH/2)),10,GRN)
rect2(bkgd,(self.xc,self.yc),(self.w,self.h),timer.now()*180,RED)
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2)) # critical for drawing a polygon
cv2.imshow("press 'q' to exit",bkgd)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
timer = Timer()
dw = DisplayWindow()
kbc = KBControl_r1()
#
thread_dw=threading.Thread(target=dw.run,daemon=True) # kill this window if tkinter closes
thread_dw.start()
#
#
# # print('ready to exit')
kbc.run() # tkinter thing, should be final thing to run
|
replay_actions.py
|
#!/usr/bin/python
# NOTE: This code is to a large degree based on DeepMind work for
# AI in StarCraft2, just ported towards the Dota 2 game.
# DeepMind's License is posted below.
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dump out stats about all the actions that are in use in a set of replays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import multiprocessing
import os
import signal
import sys
import threading
import time
import platform
import glob
import json
import math
from future.builtins import range # pylint: disable=redefined-builtin
import six
from six.moves import queue
replay_dir = 'replays'
from absl import app
from absl import flags
from pydota2.lib import world_data
FLAGS = flags.FLAGS
flags.DEFINE_integer("parallel", 1, "How many instances to run in parallel.")
flags.DEFINE_integer("step_mul", 1, "How many game steps per observation.")
flags.DEFINE_string("replays", replay_dir, "Path to a directory of replays.")
import pydota2.protobuf.CMsgBotWorldState_pb2 as _pb
from pydota2.lib.gfile import *
def load_json_file(fname):
fname = JoinPath('pydota2', 'gen_data', fname)
with open(fname, 'r') as infile:
return json.load(infile)
ability_data = load_json_file('abilities.json')
def get_available_replays(path):
d = os.path.join('.', path)
return [os.path.join(d, o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
def sorted_dict_str(d):
return "{%s}" % ", ".join("%s: %s" % (k, d[k]) for k in sorted(d, key=d.get, reverse=True))
class ReplayStats(object):
"""Summary stats of the replays seen so far."""
def __init__(self):
self.replays = 0
self.steps = 0
#TODO - add other stats we want to track
self.heroes = collections.defaultdict(int)
self.unit_ids = collections.defaultdict(int)
self.valid_abilities = collections.defaultdict(int)
self.made_abilities = collections.defaultdict(int)
self.valid_actions = collections.defaultdict(int)
self.made_actions = collections.defaultdict(int)
self.crashing_replays = set()
self.invalid_replays = set()
def merge(self, other):
"""Merge another ReplayStats into this one."""
def merge_dict(a, b):
for k, v in six.iteritems(b):
a[k] += v
self.replays += other.replays
self.steps += other.steps
#TODO - as above, add merging of other stats we track
merge_dict(self.heroes, other.heroes)
merge_dict(self.unit_ids, other.unit_ids)
merge_dict(self.valid_abilities, other.valid_abilities)
merge_dict(self.made_abilities, other.made_abilities)
merge_dict(self.valid_actions, other.valid_actions)
merge_dict(self.made_actions, other.made_actions)
self.crashing_replays |= other.crashing_replays
self.invalid_replays |= other.invalid_replays
def __str__(self):
len_sorted_dict = lambda s: (len(s), sorted_dict_str(s))
len_sorted_list = lambda s: (len(s), sorted(s))
return "\n\n".join((
"Replays: %s, Steps total: %s" % (self.replays, self.steps),
#TODO - print other stats we track
"Heroes: %s\n%s" % len_sorted_dict(self.heroes),
"Unit ids: %s\n%s" % len_sorted_dict(self.unit_ids),
"Valid abilities: %s\n%s" % len_sorted_dict(self.valid_abilities),
"Made abilities: %s\n%s" % len_sorted_dict(self.made_abilities),
"Valid actions: %s\n%s" % len_sorted_dict(self.valid_actions),
"Made actions: %s\n%s" % len_sorted_dict(self.made_actions),
"Crashing replays: %s\n%s" % len_sorted_list(self.crashing_replays),
"Invalid replays: %s\n%s" % len_sorted_list(self.invalid_replays),
))
class ProcessStats(object):
"""Stats for a worker process."""
def __init__(self, proc_id):
self.proc_id = proc_id
self.time = time.time()
self.stage = ""
self.replay = ""
self.replay_stats = ReplayStats()
def update(self, stage):
self.time = time.time()
self.stage = stage
def __str__(self):
return ("[%2d] replay: %10s, replays: %5d, steps: %7d, game loops: %7s, "
"last: %12s, %3d s ago" % (
self.proc_id, self.replay, self.replay_stats.replays,
self.replay_stats.steps,
self.replay_stats.steps * FLAGS.step_mul, self.stage,
time.time() - self.time))
def valid_replay(info):
"""Make sure the replay isn't corrupt, and is worth looking at."""
#TODO - figure out what metrics to use to determine if either
# an error occurred or it's just a low-level MMR replay not
# worth learning from
return True
class ReplayProcessor(multiprocessing.Process):
"""A Process that pulls replays and processes them."""
def __init__(self, proc_id, replay_queue, stats_queue):
super(ReplayProcessor, self).__init__()
self.stats = ProcessStats(proc_id)
self.replay_queue = replay_queue
self.stats_queue = stats_queue
def run(self):
signal.signal(signal.SIGTERM, lambda a, b: sys.exit()) # Exit quietly.
self._update_stage("spawn")
replay_name = "none"
while True:
self._print("Starting up a new Dota2 replay instance.")
self._update_stage("launch")
try:
for _ in range(300):
try:
replay_path = self.replay_queue.get()
except queue.Empty:
self._update_stage("done")
self._print("Empty queue, returning")
return
try:
replay_name = os.path.basename(replay_path)
self.stats.replay = replay_name
self._print("Got replay: '%s'" % replay_path)
# TODO - below is test code to see what Valve has fixed
#self._ingest_frame(os.path.join(replay_path, '000058.bin'))
self._update_stage("open replay directory")
#TODO - process the replay info (total game time, winner, timestep interval)
replay_info = self.summarize_replay(replay_path)
#self.replay_data = queue.Queue(maxsize=100)
#self.load_replay(replay_path)
self._print((" Replay Info %s " % replay_name).center(60, "-"))
#self._print(replay_info)
self.print_summary(replay_info)
self._print("-" * 60)
if valid_replay(replay_info):
self._update_stage("process replay")
self.process_replay(replay_path, replay_info['team_id'])
finally:
self.replay_queue.task_done()
self._update_stage("shutdown")
except KeyboardInterrupt:
return
#except:
# print("[Run Replay] Unexpected error:", sys.exc_info()[0])
# self.stats.replay_stats.crashing_replays.add(replay_name)
# raise
def _ingest_frame(self, frame_name):
"""Load a specific frame into an object."""
try:
proto_frame = open(frame_name, 'rb')
data_frame = _pb.CMsgBotWorldState()
data_frame.ParseFromString(proto_frame.read())
#TODO - uncomment to see protobuf values
#print(data_frame)
proto_frame.close()
return data_frame
except Exception as e:
print('Protobuf Frame Loading Error: %s for frame %s' % (str(e), frame_name))
pass
def load_replay(self, replay_path):
"""Load the replay data into memory through time-ordered JSON objects."""
self._update_stage("loading replay into memory")
files = sorted(glob.glob(os.path.join(replay_path, '*.bin')))
for fname in files[0:100]:
try:
self.replay_data.put(self._ingest_frame(fname))
except Exception as e:
print('Protobuf loading error: %s for file %s' % (str(e), fname))
pass
def print_summary(self, summary):
if summary["ancient_hp_2"] > summary["ancient_hp_3"]:
self._print("Radiant Victory")
else:
self._print("Dire Victory")
minutes = int(summary['game_length']/60)
seconds = int(summary['game_length'] - minutes*60)
self._print("Game Length: %d Min, %d Sec" % (minutes, seconds))
self._print("Heroes:")
for i in range(0,10):
try:
self._print("\t%s" % (summary['player_'+str(i)]))
except KeyError:
pass
def summarize_replay(self, replay_path):
"""Summarize the replay (length of time, winner, heroes, roles)."""
self._update_stage("summarizing replay")
files = sorted(glob.glob(os.path.join(replay_path, '*.bin')))
data = {}
indx = 0
for fname in [files[0], files[-1]]:
try:
# load the first and last frame in our replay
data[indx] = self._ingest_frame(fname)
except Exception as e:
print('Protobuf loading error: %s for file %s' % (str(e), fname))
break
indx += 1
info = {}
try:
frame_indx = int(os.path.basename(files[-1])[:-4])+1
print('Replay Length: %d' % (frame_indx))
info['game_length'] = data[1].game_time - data[0].game_time
info['frame_delta'] = info['game_length']/frame_indx
info['team_id'] = data[0].team_id
for unit in data[1].units:
if unit.unit_type == 9:
info['ancient_hp_' + str(unit.team_id)] = unit.health
for player in data[1].players:
name = "<UNKNOWN>"
net_worth = 0
for unit in data[1].units:
if unit.player_id == player.player_id and unit.unit_type == 1:
name = unit.name[14:]
net_worth = unit.net_worth
info['player_'+str(player.player_id)] = "[%d] Hero: %15s, NetWorth: %5d, K: %2d, D: %2d, A: %2d" % (player.player_id, name, net_worth, player.kills, player.deaths, player.assists)
except:
print("[Summarize Replay] Unexpected error:", sys.exc_info()[0])
print(data[0])
print(data[1])
raise
print(info)
return info
def process_replay(self, replay_path, team_id):
"""Process a single replay, updating the stats."""
self._update_stage("start_replay")
self.stats.replay_stats.replays += 1
files = sorted(glob.glob(os.path.join(replay_path, '*.bin')))
max_frames = int(os.path.basename(files[-1])[:-4])+1
ws = None
anim_activities = {}
ab_activities = {}
tree_activities = {}
roshan_kills = []
courier_kills = []
for fname in files:
self.stats.replay_stats.steps += 1
step = int(os.path.basename(fname)[:-4])+1
data = self._ingest_frame(fname)
self._update_stage('Step %d of %d - Observe' % (step, max_frames))
# TODO - complete the actual Reinforcement Learning
if data:
#print('Game State: %d -- Step %d' % (data.game_state, step))
if data.game_state in [4,5]:
#if step == 400:
# print(data)
# break
if not ws:
ws = world_data.WorldData(data)
else:
ws.update_world_data(data)
pids = ws.get_player_ids()
for pid in pids:
player = ws.get_player_by_id(pid)
activity = player.get_anim_activity()
#mv_delta = player.get_movement_vector()
#mv_dist = mv_delta.len()
#if math.fabs(mv_delta.x) >= 10.0 or math.fabs(mv_delta.y) >= 10.0:
# print("[%f] %s {%d} moved %.2f units @ %.2f <%.2f> degrees <%.2f, %.2f>" % (data.dota_time, player.get_name(), activity, mv_dist, player.udata.facing, mv_delta.heading(), mv_delta.x, mv_delta.y))
if not activity in anim_activities.keys():
anim_activities[activity] = 1
else:
anim_activities[activity] += 1
for ab_event in data.ability_events:
if not ab_event.ability_id in ab_activities.keys():
ab_activities[ab_event.ability_id] = [(data.dota_time, ab_event.player_id, ab_event.unit_handle, ab_event.location, ab_event.is_channel_start)]
else:
ab_activities[ab_event.ability_id].append((data.dota_time, ab_event.player_id, ab_event.unit_handle, ab_event.location, ab_event.is_channel_start))
"""
try:
p_name = ws.get_player_by_id(ab_event.player_id).get_name()
except:
p_name = str(ab_event.player_id)
pass
try:
u_name = ws.get_unit_by_handle(data.units, ab_event.unit_handle).name
except:
u_name = str(ab_event.unit_handle)
pass
print("[%d] used by %s against %s" % (ab_event.ability_id, p_name, u_name))
"""
for tree_event in data.tree_events:
if not tree_event.tree_id in tree_activities.keys():
tree_activities[tree_event.tree_id] = [(tree_event.location, tree_event.destroyed, tree_event.respawned, tree_event.delayed)]
else:
tree_activities[tree_event.tree_id].append((data.dota_time, tree_event.location, tree_event.destroyed, tree_event.respawned, tree_event.delayed))
for rk in data.roshan_killed_events:
roshan_kills.append((data.dota_time, rk))
for ck in data.courier_killed_events:
courier_kills.append((data.dota_time, ck))
if len(ws.good_hero_units) > 0:
print("Allied Hero Units:")
for uh in ws.good_hero_units:
print(uh)
if len(ws.bad_hero_units) > 0:
print("Enemy Hero Units:")
for uh in ws.bad_hero_units:
print(uh)
anim_count = 0
for k in anim_activities.keys():
anim_count += anim_activities[k]
print('Animation Activities')
for k in sorted(anim_activities.keys()):
print("[%4d] Count: %5d, Perct: %5.2f%%" % (k, anim_activities[k], 100.0*float(anim_activities[k])/float(anim_count)))
ab_count = 0
for k in ab_activities.keys():
ab_count += len(ab_activities[k])
print('Item/Ability Use Activites')
for k in sorted(ab_activities.keys()):
if k < 5000:
print("[%4d] Count: %5d, Perct: %5.2f%%" % (k, len(ab_activities[k]), 100.0*float(len(ab_activities[k]))/float(ab_count)))
else:
a_name = "<UNKNOWN>"
try:
a_name = ability_data[str(k)]['Name']
except:
pass
print("[%4d] Count: %5d, Perct: %5.2f%% -- %s" % (k, len(ab_activities[k]), 100.0*float(len(ab_activities[k]))/float(ab_count), a_name))
print('\n%d Tree Events' % (len(tree_activities.keys())))
print('\n%d Courier Kills' % (len(courier_kills)))
for ck in courier_kills:
print(ck)
print(str(ws.units[ck[1].killer_unit_handle]))
print('\n%d Roshan Kills' % (len(roshan_kills)))
for rk in roshan_kills:
print(rk)
print(str(ws.units[rk[1].killer_unit_handle]))
def _print(self, s):
for line in str(s).strip().splitlines():
print("[%s] %s" % (self.stats.proc_id, line))
sys.stdout.flush()
def _update_stage(self, stage):
self.stats.update(stage)
self.stats_queue.put(self.stats)
def stats_printer(stats_queue):
"""A thread that consumes stats_queue and prints them every 10 seconds."""
proc_stats = [ProcessStats(i) for i in range(FLAGS.parallel)]
print_time = start_time = time.time()
width = 107
running = True
while running:
print_time += 10
while time.time() < print_time:
try:
s = stats_queue.get(True, print_time - time.time())
if s is None: # Signal to print and exit NOW!
running = False
break
proc_stats[s.proc_id] = s
except queue.Empty:
pass
replay_stats = ReplayStats()
for s in proc_stats:
replay_stats.merge(s.replay_stats)
print((" Summary %0d secs " % (print_time - start_time)).center(width, "="))
print(replay_stats)
print(" Process stats ".center(width, "-"))
print("\n".join(str(s) for s in proc_stats))
print("=" * width)
def replay_queue_filler(replay_queue, replay_list):
"""A thread that fills the replay_queue with replay filenames."""
for replay_path in replay_list:
replay_queue.put(replay_path)
def main(unused_argv):
stats_queue = multiprocessing.Queue()
stats_thread = threading.Thread(target=stats_printer, args=(stats_queue,))
stats_thread.start()
# TODO - get rid of the [1:] at end, currently testing looking only at RADIANT replay
FLAGS.replays = get_available_replays(replay_dir)
#print(FLAGS.replays)
try:
# For some reason buffering everything into a JoinableQueue makes the
# program not exit, so save it into a list then slowly fill it into the
# queue in a separate thread. Grab the list synchronously so we know there
# is work in the queue before the Dota2 processes actually run, otherwise
# The replay_queue.join below succeeds without doing any work, and exits.
print("Getting replay list:", FLAGS.replays)
replay_list = sorted(FLAGS.replays)
print(len(replay_list), "replays found.\n")
replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10)
replay_queue_thread = threading.Thread(target=replay_queue_filler,
args=(replay_queue, replay_list))
replay_queue_thread.daemon = True
replay_queue_thread.start()
for i in range(FLAGS.parallel):
p = ReplayProcessor(i, replay_queue, stats_queue)
p.daemon = True
p.start()
time.sleep(1) # Stagger startups, otherwise they seem to conflict somehow
replay_queue.join() # Wait for the queue to empty.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, exiting.")
finally:
stats_queue.put(None) # Tell the stats_thread to print and exit.
stats_thread.join()
if __name__ == "__main__":
app.run(main)
|
TimeLapse.py
|
import picamera
import os
import time
import threading
import subprocess
import Movie
def runAsync(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t
return run
class Timer(object):
def __init__(self):
self.oldTime = -1
def startTimer(self):
self.oldTime = round(time.time(), 3)
def stopTimer(self):
self.newTime = round(time.time(), 3)
def getDifference(self):
return self.newTime - self.oldTime
class TimeLapse(object):
def __init__(self, path):
if (path[-1] == '/'):
self.path = path
else:
self.path = path + '/'
# On se place dans le dossier de travail
os.chdir(path)
#Camera Raspberry
self.camera = picamera.PiCamera()
self.camera.resolution = (1920, 1080)
self.camera.exposure_mode = 'auto'
self.FPS = None
self.tpsRendu = None
self.tpsFonctionnement = None
#@runAsync
def capturePhoto(self,number):
#self.TIMESTAMP = time.strftime("%d-%m-%Y_%H-%M-%S")
#self.camera.capture('photo_{}.jpg'.format(self.TIMESTAMP))
self.camera.capture('photo_{}.jpg'.format(number))
def startTimeLapse(self, FPS, tpsRendu, tpsFonctionnement, isAsync=True):
self.FPS = FPS
self.tpsRendu = tpsRendu
self.tpsFonctionnement = tpsFonctionnement
self.interval = (tpsFonctionnement*3600) / (FPS* tpsRendu)
for i in range(0, self.FPS * self.tpsRendu):
print("{} / {}".format(i+1, self.FPS * self.tpsRendu))
#TODO: Check si interval < 8s
if isAsync:
threading._start_new_thread(target=self.capturePhoto, args=i)
else:
self.oldTime = int(round(time.time()) * 1000)
self.capturePhoto(i)
self.delay = self.interval - (int(round(time.time() * 1000)) - self.oldTime) * 0.001
if self.delay <= 0:
continue
else:
time.sleep(self.delay)
def setSharpness(self, sharpness):
self.camera.sharpness = sharpness
def setISO(self, ISO):
self.camera.iso = ISO
def makeVideo(self):
pass
def sendVideo(self):
#logging.info("Serveur video demare")
p = subprocess.Popen('nc -l -p 12345 < TimeLapse.mp4', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.communicate()
path = "/home/pi/photo2"
timelapse = TimeLapse(path)
timelapse.startTimeLapse(24, 30, 0.25, False)
movie = Movie.MovieHandler(path)
movie.makeVideo()
timelapse.sendVideo()
|
perf.py
|
#!/usr/bin/env python3.9
import multiprocessing as mp
import threading as th
import os
def do():
if os.system('./checker.py check localhost:4040 1>/dev/null 2>/dev/null') != 0x6500:
print('pref test failed')
def fef(x):
print(f'\b\b\b\b\b\b\b\b\b{x // 1000 * 100}%')
threads = []
for i in range(3):
t = th.Thread(target=do, args=())
t.start()
threads.append(t)
for t in threads:
t.join()
with mp.Pool() as mp:
mp.map(fef, [i for i in range(1000)])
|
supervisor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been intialized before returning a session to the training code. The
non-chief tasks depend on the chief taks for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self, graph=None, ready_op=USE_DEFAULT, is_chief=True,
init_op=USE_DEFAULT, init_feed_dict=None,
local_init_op=USE_DEFAULT, logdir=None,
summary_op=USE_DEFAULT, saver=USE_DEFAULT,
global_step=USE_DEFAULT, save_summaries_secs=120,
save_model_secs=600, recovery_wait_secs=30, stop_grace_secs=120,
checkpoint_basename="model.ckpt", session_manager=None,
summary_writer=USE_DEFAULT, init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from merge_all_summaries(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 ot tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(ready_op=ready_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._started_threads = []
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op, graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self, ready_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.initialize_all_variables()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.all_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initilizes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = logging_ops.merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
self._started_threads.extend(threads)
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitely.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
self._started_threads.extend(threads)
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
self._started_threads.append(looper)
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
join_threads = []
join_threads.extend(self._started_threads)
if threads is not None:
join_threads.extend(threads)
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(join_threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
self._started_threads = []
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type == "Variable" and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
timing.py
|
import datetime
import time
import threading
import state
def update_time():
while True:
now = datetime.datetime.now()
state.set_time(now.hour, now.minute, now.second)
time.sleep(1)
t = threading.Thread(target=update_time)
t.start()
|
tetris.py
|
#!/usr/bin/env python3
from zencad import *
import threading
import time
import random
import types
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import threading
w, h = 10, 20
sz = 10
FIELDS = []
body = box(sz*w+2*sz, sz, sz*h+2*sz, center=True) - \
box(sz*w, sz, sz*h, center=True)
class FalledBody:
def __init__(self, indexes, color):
self.indexes = indexes
self.color = color
self.curcoord = (w//2, h - self.get_height())
def get_height(self):
maxh = 0
for p in self.indexes:
if p[1] > maxh:
maxh = p[1]
return maxh + 1
def draw(self):
for p in self.indexes:
coords = (p[0] + self.curcoord[0], p[1] + self.curcoord[1])
FIELDS[coords[1]][coords[0]].cube.set_color(self.color)
FIELDS[coords[1]][coords[0]].cube.hide(False)
FIELDS[coords[1]][coords[0]].type = 1
def hide(self):
for p in self.indexes:
coords = (p[0] + self.curcoord[0], p[1] + self.curcoord[1])
FIELDS[coords[1]][coords[0]].cube.hide(True)
FIELDS[coords[1]][coords[0]].type = 0
def fall(self):
self.curcoord = (self.curcoord[0], self.curcoord[1]-1)
def check_valid(self, newcoord, indexes):
for p in indexes:
coords = (p[0] + newcoord[0], p[1] + newcoord[1])
if coords[0] < 0 or coords[0] >= w:
return False
if coords[1] < 0 or coords[1] >= h:
return False
if FIELDS[coords[1]][coords[0]].type == 2:
return False
return True
def can_fall(self):
for p in self.indexes:
coords = (p[0] + self.curcoord[0], p[1] + self.curcoord[1] - 1)
if coords[1] < 0 or FIELDS[coords[1]][coords[0]].type == 2:
return False
return True
def keep(self):
for p in self.indexes:
coords = (p[0] + self.curcoord[0], p[1] + self.curcoord[1])
FIELDS[coords[1]][coords[0]].type = 2
def up_handle(self):
newindexes = [(-ind[1], ind[0]) for ind in self.indexes]
valid = self.check_valid(self.curcoord, newindexes)
if valid:
self.hide()
self.indexes = newindexes
self.draw()
def down_handle(self):
validcoord = self.curcoord
itcoord = self.curcoord
while self.check_valid(itcoord, self.indexes):
validcoord = itcoord
itcoord = (itcoord[0], itcoord[1]-1)
self.hide()
self.curcoord = validcoord
self.draw()
def xmove_handle(self, add):
newcoord = (self.curcoord[0] + add, self.curcoord[1])
valid = self.check_valid(newcoord, self.indexes)
if valid:
self.hide()
self.curcoord = newcoord
self.draw()
def right_handle(self):
self.xmove_handle(+1)
def left_handle(self):
self.xmove_handle(-1)
class Field:
def __init__(self, i, j):
self.shp = box(sz, center=True)
self.shp = self.shp.translate(-w*sz/2+sz/2,
0, -h*sz/2+sz/2).translate(sz*j, 0, sz*i)
self.coords = i, j
self.cube = disp(self.shp)
self.cube.hide(True)
self.type = 0
def copy(self, oth):
if oth.type == 0:
self.cube.hide(True)
else:
self.cube.hide(False)
self.cube.set_color(oth.cube.color())
self.type = oth.type
def clean():
for i in range(h):
istype2 = 0
for j in range(w):
if FIELDS[i][j].type == 2:
istype2 += 1
if istype2 == w:
for ii in range(i, h-1):
for j in range(w):
FIELDS[ii][j].copy(FIELDS[ii+1][j])
for j in range(w):
FIELDS[h-1][j].type = 0
FIELDS[h-1][j].cube.hide(True)
for i in range(h):
FIELDS.append([])
for j in range(w):
FIELDS[-1].append(Field(i, j))
def make_falled_body():
choices = [
([(0, 1), (0, 0), (0, -1), (-1, -1)], Color.blue),
([(0, 2), (0, 1), (0, 0), (0, -1)], Color.cian),
([(1, 1), (0, 1), (1, 0), (0, 0)], Color.yellow),
([(0, 2), (0, 1), (0, 0), (1, 0)], Color.orange),
([(-1, 1), (0, 1), (0, 0), (1, 0)], Color.red),
([(-1, 0), (0, 0), (1, 0), (0, 1)], Color.magenta),
([(-1, 0), (0, 0), (0, 1), (1, 1)], Color.green)
]
tpl = random.choice(choices)
return FalledBody(*tpl)
def redraw():
zencad.gui.application.DISPLAY_WIDGET.view.redraw()
lock = QMutex()
falled_body = None
def timer_loop(wdg):
global falled_body
lock.lock()
if falled_body is None:
falled_body = make_falled_body()
falled_body.draw()
else:
if falled_body.can_fall():
falled_body.hide()
falled_body.fall()
falled_body.draw()
else:
falled_body.keep()
falled_body = None
clean()
lock.unlock()
# redraw()
def animate_settings(wdg, animate_thread):
def keyPressEvent(self, ev):
if falled_body is None:
return
lock.lock()
if ev.key() == Qt.Key_Up:
falled_body.up_handle()
elif ev.key() == Qt.Key_Down:
falled_body.down_handle()
elif ev.key() == Qt.Key_Right:
falled_body.right_handle()
elif ev.key() == Qt.Key_Left:
falled_body.left_handle()
clean()
wdg.redraw()
lock.unlock()
animate_thread.set_animate_step(0.75)
raw_keyPressEvent = wdg.keyPressEvent
wdg.keyPressEvent = types.MethodType(keyPressEvent, wdg)
#thr = threading.Thread(target=timer_loop)
# thr.start()
disp(body)
show(animate=timer_loop, preanimate=animate_settings)
|
shareingvar.py
|
# -*- coding:utf-8 -*-
import threading
import time
"""
多个线程方法中可以共用全局变量.
查看work1线程对全局变量的修改,
在work2中能否查看修改后的结果.
"""
"""
# 定义全局变量
num = 0
# work1
def work1():
# 声明num是一个全局变量
global num
for i in range(10):
num += 1
print("work1--------",num)
# work2
def work2():
# num可以在多个线程中共享.
print("work2=======",num)
if __name__=="__main__":
# 创建2个子线程
t1 = threading.Thread(target=work1)
t2 = threading.Thread(target=work2)
# 启动线程
t1.start()
t2.start()
# 判断线程数量不等于1,一直循环睡眠,保证print时,在t1和t2执行结束后,在print主线程.
while len(threading.enumerate()) != 1:
time.sleep(1)
# 在t1和t2,线程执行完毕后再打印num
print("main-------------",num)
"""
"""
多线程--共享全局变量问题
1.问题:
假设两个线程t1和t2都要对全局变量num(默认是0)进行加1运算,t1和t2都各对num加10次,num的最终结果为20.
但是由于是多线程同时操作,有可能出现下列情况:
1) 在num=0时,t1取得num=0,此时系统把t1调度为"sleeping"状态,把t2转换为"running"状态,t2也获得num=0
2) 然后t2对得到的值进行加1并赋给num,获得num=1.
3) 然后系统又把t2调度为"sleeping",把t2转为"running",线程t1又把它之前得到的0加1后赋值给num.
4) 这样导致虽然t1和t2都对num加1,但结果仍然是num=1
"""
# 定义全局变量
num = 0
# work1
def work1():
# 声明num是一个全局变量
global num
for i in range(1000000):
num += 1
print("work1--------",num)
# work2
def work2():
# 声明num是一个全局变量
global num
for i in range(1000000):
num += 1
# num可以在多个线程中共享.
print("work2=======",num)
if __name__=="__main__":
# 创建2个子线程
t1 = threading.Thread(target=work1)
t2 = threading.Thread(target=work2)
# 启动线程
t1.start()
# 优先让t1线程优先执行,t1执行完毕后,t2才能执行.
t1.join()
t2.start()
# 判断线程数量不等于1,一直循环睡眠,保证print时,在t1和t2执行结束后,在print主线程.
while len(threading.enumerate()) != 1:
time.sleep(1)
# 在t1和t2,线程执行完毕后再打印num
print("main-------------",num)
# 结论:当多个线程修改同一个资源时,会出现资源竞争,导致计算结果有误.
|
cloud.py
|
"""
Object Store plugin for Cloud storage.
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
safe_relpath,
string_as_bool,
umask_fix_perms,
)
from galaxy.util.sleeper import Sleeper
from ..objectstore import convert_bytes, ObjectStore
try:
from cloudbridge.cloud.factory import CloudProviderFactory, ProviderList
from cloudbridge.cloud.interfaces.exceptions import InvalidNameException
except ImportError:
CloudProviderFactory = None
ProviderList = None
log = logging.getLogger(__name__)
NO_CLOUDBRIDGE_ERROR_MESSAGE = (
"Cloud ObjectStore is configured, but no CloudBridge dependency available."
"Please install CloudBridge or modify ObjectStore configuration."
)
class Cloud(ObjectStore):
"""
Object store that stores objects as items in an cloud storage. A local
cache exists that is used as an intermediate location for files between
Galaxy and the cloud storage.
"""
def __init__(self, config, config_xml):
super(Cloud, self).__init__(config)
if CloudProviderFactory is None:
raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE)
self.staging_path = self.config.file_path
self.transfer_progress = 0
self._parse_config_xml(config_xml)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring AWS-S3 Connection")
aws_config = {'aws_access_key': self.access_key,
'aws_secret_key': self.secret_key}
self.conn = CloudProviderFactory().create_provider(ProviderList.AWS, aws_config)
def _parse_config_xml(self, config_xml):
try:
a_xml = config_xml.findall('auth')[0]
self.access_key = a_xml.get('access_key')
self.secret_key = a_xml.get('secret_key')
b_xml = config_xml.findall('bucket')[0]
self.bucket = b_xml.get('name')
self.max_chunk_size = int(b_xml.get('max_chunk_size', 250))
cn_xml = config_xml.findall('connection')
if not cn_xml:
cn_xml = {}
else:
cn_xml = cn_xml[0]
self.host = cn_xml.get('host', None)
self.port = int(cn_xml.get('port', 6000))
self.multipart = string_as_bool(cn_xml.get('multipart', 'True'))
self.is_secure = string_as_bool(cn_xml.get('is_secure', 'True'))
self.conn_path = cn_xml.get('conn_path', '/')
c_xml = config_xml.findall('cache')[0]
self.cache_size = float(c_xml.get('size', -1))
self.staging_path = c_xml.get('path', self.config.object_store_cache_path)
for d_xml in config_xml.findall('extra_dir'):
self.extra_dirs[d_xml.get('type')] = d_xml.get('path')
log.debug("Object cache dir: %s", self.staging_path)
log.debug(" job work dir: %s", self.extra_dirs['job_work'])
except Exception:
# Toss it back up after logging, we can't continue loading at this point.
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
try:
bucket = self.conn.storage.buckets.get(bucket_name)
if bucket is None:
log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name)
bucket = self.conn.storage.buckets.create(bucket_name)
log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name)
return bucket
except InvalidNameException:
log.exception("Invalid bucket name -- unable to continue")
raise
except Exception:
# These two generic exceptions will be replaced by specific exceptions
# once proper exceptions are exposed by CloudBridge.
log.exception("Could not get bucket '{}'".format(bucket_name))
raise Exception
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None,
obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(obj.id))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_cloud(self, rel_path):
try:
obj = self.bucket.objects.get(rel_path)
if obj:
return obj.size
except Exception:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.objects.list(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
exists = True if self.bucket.objects.get(rel_path) is not None else False
except Exception:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.objects.get(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
with open(self._get_cache_path(rel_path), "w+") as downloaded_file_handle:
key.save_content(downloaded_file_handle)
return True
except Exception:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None):
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file,
rel_path)
return True
if from_string:
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload(source_file)
else:
self.bucket.objects.get(rel_path).upload(source_file)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file,
os.path.getsize(source_file), rel_path)
self.transfer_progress = 0 # Reset transfer progress counter
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload_from_file(source_file)
else:
self.bucket.objects.get(rel_path).upload_from_file(source_file)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except Exception:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_cloud(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_cloud(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check cloud
in_cloud = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_cloud:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_cloud:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_cloud:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try cloud. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_cloud(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.objects.list(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = self.bucket.objects.get(rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except Exception:
log.exception("Could not delete key '%s' from cloud", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s'
% (str(obj), str(kwargs)))
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on cloud
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = self.bucket.objects.get(rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except Exception:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
|
bert_service_simple.py
|
# bert 模型华为部署inference代码 v1
# by clz 20211216
# 参考 https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0301.html
import logging
import threading
import numpy as np
import tensorflow as tf
import os
from PIL import Image
from model_service.tfserving_model_service import TfServingBaseService
from .bert_tokenization import FullTokenizer
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class bert_service(TfServingBaseService):
def __init__(self, model_name, model_path):
self.model_name = model_name
self.model_path = model_path
self.model = None
self.predict = None
self.max_length = 256 #文本长度截断
# label文件可以在这里加载,在后处理函数里使用
# label.txt放在obs和模型包的目录
# with open(os.path.join(self.model_path, 'label.txt')) as f:
# self.label = json.load(f)
# 非阻塞方式加载saved_model模型,防止阻塞超时
thread = threading.Thread(target=self.load_model)
thread.start()
# tokenizer
self.tokenizer = FullTokenizer(vocab_file=os.path.join("vocab.txt")
def load_model(self):
"""
加载模型
"""
self.model = tf.keras.models.load_model(self.model_path)
self.predict = self.model.predict
def _preprocess(self, data):
# 截断处理
max_seq_len_fix = self.max_length - 2 # cls sep词要减去
processed_sentences = []
for k, v in data.items():
for file_name, text_content in v.items():
if len(text_content) > max_seq_len_fix:
processed_sentences.append(text_content[:max_seq_len_fix])
else:
processed_sentences.append(text_content)
# tokenizer 将每个字id化
pred_tokens = map(self.tokenizer.tokenize, processed_sentences)
pred_tokens = map(lambda tok: ["[CLS]"] + tok + ["[SEP]"], pred_tokens)
pred_token_ids = list(map(self.tokenizer.convert_tokens_to_ids, pred_tokens))
pred_token_ids = map(lambda tids: tids + [0] * (max_seq_len_fix - len(tids)), pred_token_ids)
pred_token_ids = np.array(list(pred_token_ids))
return pred_token_ids
def _inference(self, data):
return self.predict(data)
def _postprocess(self, data):
return {
"result": data[:,1] # predict的第二列是1的概率
}
|
78_customer_order_details_new.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import csv
import xmlrpc.client as xmlrpclib
import multiprocessing as mp
from scriptconfig import URL, DB, UID, PSW, WORKERS
# ==================================== SALE ORDER LINE ====================================
def update_sale_order_line(pid, data_pool, error_ids, product_ids, uom_ids, tax_code_ids, tax_ids, order_tax_code_ids):
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
while data_pool:
try:
data = data_pool.pop()
order_id = data.get('order_id')
order_lines = sock.execute(DB, UID, PSW, 'sale.order.line', 'search_read', [('order_id','=',order_id)], ['product_id', 'product_uom'])
order_line_ids = {rec['product_id'][0]: rec['product_uom'][0] for rec in order_lines}
for line in data.get('lines', []):
product_id = product_ids.get(line.get('ITEM-CODE', '').strip())
code = str(line.get('ORDERING-UOM')).strip() + '_' + str(line.get('QTY-IN-ORDERING-UM')).strip()
code = uom_ids.get(code)
if not product_id and not code:
error_ids.append()
continue
if product_id in order_line_ids and code == order_line_ids[product_id]:
continue
vals = {
'order_id': order_id,
'product_id': product_id,
'name': line.get('ITEM-DESC').strip(),
'price_unit': line.get('PRICE-DISCOUNTED').strip(),
'product_uom_qty': line.get('QTY-ORDERED').strip(),
'is_last': False,
'working_cost':line.get('TRUE-FIXED-COST').strip(),
'lst_price':line.get('PRICE-DISCOUNTED').strip(),
'product_uom': code,
}
tax = ''
if line.get('TAX-CODE') == '1':
tax = tax_ids.get(float(tax_code_ids.get(order_tax_code_ids.get(line.get('ORDER-NO')))))
vals['tax_id'] = [(6, 0, [tax])]
res = sock.execute(DB, UID, PSW, 'sale.order.line', 'create', vals)
print(pid, 'Create - SALE ORDER LINE', order_id , res)
except Exception as e:
print(e)
def sync_sale_order_lines():
manager = mp.Manager()
data_pool = manager.list()
error_ids = manager.list()
process_Q = []
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
res = sock.execute(DB, UID, PSW, 'sale.order', 'search_read', [], ['name'])
order_ids = {rec['name'] : rec['id'] for rec in res}
fp = open('files/omlordr2.csv', 'r')
csv_reader = csv.DictReader(fp)
order_lines = {}
for vals in csv_reader:
ord_no = vals.get('ORDER-NO', '').strip()
order_id = order_ids.get(ord_no)
if order_id:
lines = order_lines.setdefault(order_id, [])
lines.append(vals)
fp.close()
data_pool = manager.list([{'order_id': order, 'lines': order_lines[order]} for order in order_lines])
res = sock.execute(DB, UID, PSW, 'product.product', 'search_read', ['|', ('active', '=', False), ('active', '=', True)], ['default_code'])
products = {rec['default_code']: rec['id'] for rec in res}
product_ids = manager.dict(products)
uoms = sock.execute(DB, UID, PSW, 'uom.uom', 'search_read', [], ['id','name'])
uom_ids = {uom['name']:uom['id'] for uom in uoms}
taxes = sock.execute(DB, UID, PSW, 'account.tax', 'search_read', [('name', '!=', 'Sale Tax' )], ['id','amount'])
tax1 = {float(tax['amount']): tax['id'] for tax in taxes}
tax_ids = manager.dict(tax1)
fp1 = open('files/fiscal.csv', 'r')
csv_reader1 = csv.DictReader(fp1)
tax_codes={}
for line in csv_reader1:
tax_codes[line.get('TAX-AUTH-CODE').strip()] = line.get('TAX-AUTH-PCT')
tax_code_ids = manager.dict(tax_codes)
fp2 = open('files/omlordr1.csv', 'r')
csv_reader2 = csv.DictReader(fp2)
oder_tax_codes={}
for line in csv_reader2:
oder_tax_codes[line.get('ORDER-NO').strip()] = line.get('TAX-AUTH-CODE')
order_tax_code_ids = manager.dict(oder_tax_codes)
res = None
order_ids = None
taxes = None
tax1 = None
order_lines = None
products = None
for i in range(WORKERS):
pid = "Worker-%d" % (i + 1)
worker = mp.Process(name=pid, target=update_sale_order_line, args=(pid, data_pool, error_ids, product_ids, uom_ids, tax_code_ids, tax_ids, order_tax_code_ids))
process_Q.append(worker)
worker.start()
for worker in process_Q:
worker.join()
if __name__ == "__main__":
# SALE ORDER LINE
sync_sale_order_lines()
|
MTSv_prune.py
|
import subprocess, shutil
from io import BytesIO
import argparse
import fnmatch
import os, datetime
from ftplib import FTP, all_errors
from time import sleep
import gzip
import tarfile
from multiprocessing import Pool, Queue, Process, Manager, RLock
import pickle, json
lock = RLock()
def serialization(gi2tx,fasta_path, txdump_path):
tx2gi = {}
gi2ind = {}
print("Parsing taxid to Unique ID")
for i in gi2tx:
try:
with gzip.open(i) as file:
for line in file:
line = line.strip().split(b'\t')
try:
tx2gi[line[1].strip()].append(line[0].strip())
except:
tx2gi[line[1].strip()] = [line[0].strip()]
except:
with open(i, "rb") as file:
for line in file:
line = line.strip().split(b'\t')
try:
tx2gi[line[1].strip()].append(line[0].strip())
except:
tx2gi[line[1].strip()] = [line[0].strip()]
print("Indexing Fasta")
with open(fasta_path, "rb") as file:
for line in file:
if chr(line[0]) == ">":
gi = line.split(b' ',2)[1].split(b':')[1].strip()
gi2ind[gi] = file.tell()-len(line)
print("Mapping taxid to index")
for key in tx2gi.keys():
temp = []
for gi in tx2gi[key]:
try:
temp.append(gi2ind[gi])
except KeyError:
continue
tx2gi[key] = temp
tx_ids, child2parent = taxids2name(txdump_path)
print("Serializing")
out = os.path.abspath(fasta_path).rsplit(".",1)[0]+".p"
with open(out, "wb") as file:
pickle.dump([tx_ids, child2parent,tx2gi], file)
def acc_serialization(acc2tx,fasta_path, txdump_path, overwrite=True):
tx2gi = {}
acc2ind = {}
# print("Indexing Fasta")
with open(fasta_path, "rb") as file:
for line in file:
if chr(line[0]) == ">":
acc = line.split(b' ', 1)[0][1:].split(b'.')[0].strip()
acc2ind[acc] = file.tell() - len(line)
# print("Parsing taxid to Unique ID")
for i in acc2tx:
try:
with gzip.open(i) as file:
for line in file:
line = line.strip().split(b'\t')
try:
acc2ind[line[0].strip()]
except KeyError:
continue
try:
tx2gi[line[2].strip()].append(line[0].strip())
except:
tx2gi[line[2].strip()] = [line[0].strip()]
except:
with open(i, "rb") as file:
for line in file:
line = line.strip().split(b'\t')
try:
acc2ind[line[0].strip()]
except KeyError:
continue
try:
tx2gi[line[2].strip()].append(line[0].strip())
except:
tx2gi[line[2].strip()] = [line[0].strip()]
# print("Mapping taxid to index")
for key in tx2gi.keys():
temp = []
for gi in tx2gi[key]:
try:
temp.append(acc2ind[gi])
except KeyError:
continue
tx2gi[key] = temp
tx_ids, child2parent = taxids2name(txdump_path)
# print("Serializing")
out = os.path.abspath(fasta_path).rsplit(".",1)[0]+".p"
with open(out, "wb") as file:
pickle.dump([tx_ids, child2parent,tx2gi], file)
return out
def deserialization(pickle_path):
with open(pickle_path, "rb") as file:
return pickle.load(file)
# Perform a binary search of a list, returning the found index or -1 if not
def binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi
hi = len(a)-1
lo = 0
mid = (hi-lo)//2
while True:
try:
if lo >= hi:
if a[mid] == x: return mid
else: return -1
if a[mid] == x:
return mid
elif a[mid] < x:
lo = mid +1
else:
hi = mid -1
mid = ((hi - lo) // 2)+lo
except:
print(x,lo,hi,mid)
break
def taxids2name(dump_path):
dump = tarfile.open(dump_path, "r:gz")
tax_ids = {}
names = dump.extractfile(dump.getmember('nodes.dmp')).read().split(b'\t|\n')
roller = {}
for l in names:
if len(l) == 0:
continue
tokens = l.split(b'\t|\t')
c_taxid = tokens[0].decode().strip()
p_taxid = tokens[1].decode().strip()
roller[tokens[0]] = (tokens[1],tokens[2])
try:
tax_ids[c_taxid]
except KeyError:
tax_ids[c_taxid] = []
try:
if p_taxid != c_taxid:
tax_ids[p_taxid].append(c_taxid)
except:
tax_ids[p_taxid] = [c_taxid]
return tax_ids, roller
# Performs breadth first search of NCBI tree returning a set of leaf tax nodes with offsets in the fasta DB
def get_tree(tx_ids, tx, terminals):
# try:
# terminals[tx.encode()]
nodes = {tx}
# except KeyError:
# nodes = set()
try:
cur_level = tx_ids[tx]
except KeyError:
return nodes
temp = []
while True:
for tx in cur_level:
try:
terminals[tx.encode()]
nodes.add(tx)
except KeyError:
pass
finally:
temp += tx_ids[tx]
if not temp:
break
cur_level = temp
temp = []
return nodes
# This is used to roll a NCBI taxonomic ID to a desired rank
def roll_up(tx_id, rank, c2p, prev_roll=None):
if tx_id:
try:
if prev_roll:
return prev_roll[tx_id]
else:
raise KeyError
except KeyError:
try:
cur = tx_id
# nxt = c2p[cur][0]
while cur != b'1':
# print(cur)
if c2p[cur][1] == rank:
if prev_roll:
prev_roll[tx_id] = cur
# print(tx_id)
return cur
else:
cur = c2p[cur][0]
if tx_id and cur == b'1':
prev_roll[tx_id] = tx_id
except TypeError:
pass
except KeyError:
return None
return tx_id
# else:
# return
# This is the function that will extract the taxonomic sequences of interest from the parsed flat file fasta
# process is:
# Parse inclusive and exclusive taxids
# deserialize previously built NCBI tree structure and byte offset information
# Call function (uses depth first search) to get a set of all child of the taxid repeat for tax ids to exclude
# use set difference to get desired leaf taxids
# Opens fasta DB and out file reading sequence in from start of sequence header roll up occuring at runtime
def clip(in_tx,ru_rank, ex_tx, name, min,maximum,fasta_path, pickle_path, chunk_size = 2,debug=False):
if debug:
return os.path.abspath(name)
# try:
chunk_size *= 1000000000
if len(in_tx) ==1:
temp = []
try:
with open(in_tx[0], "r") as file:
for line in file:
temp.append(line.strip())
in_tx = temp
except FileNotFoundError:
pass
if ex_tx and len(ex_tx) ==1:
temp = []
try:
with open(ex_tx[0],"r") as file:
for line in file:
temp.append(line.strip())
ex_tx = temp
except FileNotFoundError:
pass
tx_ids, child2parent, positions = deserialization(pickle_path)
taxons = set()
# print("Getting TaxIds")
for i in in_tx:
taxons = taxons.union(get_tree(tx_ids, i, positions))
if ex_tx:
# print("\tPruning")
for i in ex_tx:
taxons = taxons.difference(get_tree(tx_ids, i, positions))
if not name:
name = "_".join(in_tx)
if ex_tx:
name += "_not_{0}".format("_".join(ex_tx))
name += "_seqs.fasta"
if ru_rank:
ru_rank = ru_rank.encode()
seq = bytearray()
line_count = 0
bins = first_fit(list(taxons), positions, os.path.getsize(fasta_path), chunk_size)
chunk = 0
ret_list = []
with open(fasta_path, "rb") as fasta:
for bin in bins:
srt_taxons = list(sorted(bin))
while srt_taxons:
name = "_{}.".format(chunk).join(name.rsplit(".",1))
# byte_count = 0
out = open(name,"wb")
# with open(name, "wb") as out:
while srt_taxons:
tx = srt_taxons.pop()
if tx:
tx = tx.encode().strip()
try:
positions[tx].sort()
except KeyError:
continue
if ru_rank:
rr_tx = roll_up(tx, ru_rank, child2parent)
else:
rr_tx = tx
if not rr_tx:
continue
positions[tx].sort()
for off in positions[tx]:
fasta.seek(off)
header = fasta.readline()
line = fasta.readline()
while line and chr(line[0]) != ">":
seq += line
line = fasta.readline()
line_count += len(line.strip())
if line_count >= min and float(line_count)<= maximum:
gi = header.split(b' ',2)[1].split(b':')[1]
out.write(b'>'+gi+b'-'+rr_tx+b'\n')
out.write(seq)
line_count = 0
seq = bytearray()
if out.tell() > chunk_size:
chunk += 1
name = "{}.fasta".format(name.rsplit("_", 1)[0])
name = "_{}.".format(chunk).join(name.rsplit(".", 1))
out.close()
out = open(name, "wb")
chunk += 1
out.close()
ret_list.append(os.path.abspath(name))
name = "{}.fasta".format(name.rsplit("_", 1)[0])
return ret_list
def first_fit(taxa_list, positions, file_size, threshold):
pos = []
for key in positions.keys():
pos += positions[key]
pos.sort()
tax2bytes = {x:0 for x in taxa_list}
# print(positions)
for tax in taxa_list:
ind = -1
try:
positions[tax.encode()]
except KeyError:
continue
for offset in positions[tax.encode()]:
# print(tax)
ind = binary_search(pos, offset)
if ind != -1:
try:
tax2bytes[tax] += pos[ind+1] - pos[ind]
except IndexError:
tax2bytes[tax] += file_size - pos[ind]
sorted_taxon = sorted(tax2bytes.items(), key=lambda kv: kv[1], reverse=True)
bins = [set()]
bins_size = [0]
found = False
for item in sorted_taxon:
for ind, bin in enumerate(bins):
if bins_size[ind]+item[1] <= threshold or bins_size[ind] == 0:
bin.add(item[0])
bins_size[ind] += item[1]
found = True
break
if not found:
bins.append({item[0]})
bins_size.append(item[1])
found = False
return bins
# writes a new or updates json config file
def gen_json(configuration, args):
if args.update and args.configuration_path:
with open(args.configuration_path, "w") as file:
json.dump(configuration, file, sort_keys=True)
else:
with open(args.output + ".json", "w") as file:
json.dump(configuration, file, sort_keys=True, indent=4)
# Returns a dictionary of the config file
def parse_json(args):
try:
with open(args.configuration_path, "r") as file:
return json.load(file)
except:
with open(args, "r") as file:
return json.load(file)
# Parses command line arguments into dictionary of arguments or adds to one. Can be serialized
def arg_unwrappers(args, arguments=None):
if not arguments:
arguments = {}
if args.serialization_path:
arguments['serialization-path'] = os.path.abspath( args.serialization_path)
try:
arguments['serialization-path']
except KeyError:
arguments['serialization-path'] = ""
if args.fasta_path:
arguments['fasta-path'] = os.path.abspath(args.fasta_path)
try:
arguments['fasta-path']
except KeyError:
arguments['fasta-path'] = ""
if args.minimum_length:
arguments['minimum-length'] = args.minimum_length
try:
arguments['minimum-length']
except KeyError:
arguments['minimum-length'] = 0
if args.maximum_length:
arguments['maximum-length'] = args.maximum_length
try:
arguments['maximum-length']
except KeyError:
arguments['maximum-length'] = float('inf')
if args.taxdump_path:
arguments['taxdump-path'] = os.path.abspath(args.taxdump_path)
try:
arguments['taxdump-path']
except:
arguments['taxdump-path'] = ""
# if args.gi_to_taxid_paths:
# temp = []
# for i in args.gi_to_taxid_path:
# temp.append(os.path.abspath(i))
#
# arguments['gi-to-taxid-paths'] = temp
# try:
# arguments['gi-to-taxid-paths']
# except:
# arguments['gi-to-taxid-paths'] = []
if args.acc_to_taxid_paths:
temp = []
for i in args.acc_to_taxid_paths:
temp.append(os.path.abspath(i))
arguments['acc-to-taxid-paths'] = temp
try:
arguments['acc-to-taxid-paths']
except:
arguments['acc-to-taxid-paths'] = []
if args.rollup_rank:
arguments['rollup-rank'] = args.rollup_rank.lower()
try:
arguments['rollup-rank']
except:
arguments['rollup-rank'] = ""
return arguments
def oneclickjson(path):
arguments = []
for fh in os.listdir(os.path.join(path, "artifacts")):
if fnmatch.fnmatch(fh, "*.fas"):
arguments.append({})
fh = fh.split(".")[0]
arguments[-1]['serialization-path'] = os.path.abspath( os.path.join(path, "artifacts","{0}.p".format(fh)))
arguments[-1]['fasta-path'] = os.path.abspath(os.path.join(path, "artifacts", "{0}.fas".format(fh)))
arguments[-1]['minimum-length'] = 0
arguments[-1]['maximum-length'] = float('inf')
arguments[-1]['taxdump-path'] = os.path.abspath(os.path.join(path, "artifacts","taxdump.tar.gz"))
arguments[-1]['acc-to-taxid-paths'] = []
for fp in os.listdir(os.path.join(path,"artifacts")):
if fnmatch.fnmatch(fp, "*accession2taxid*"):
arguments[-1]['acc-to-taxid-paths'].append(os.path.abspath(os.path.join(path,"artifacts",fp)))
arguments[-1]['rollup-rank'] = "species"
with open(os.path.abspath(os.path.join(path,"artifacts", "{0}.json".format(fh))), "w") as file:
json.dump(arguments[-1], file, sort_keys=True, indent=4)
return arguments
def build_db( flat_list_in_fp, fasta_out_fp, keyword_out_fp, source_out_fp, thread_count, gi_to_word):
start_dir = os.getcwd()
script_dir = os.path.dirname(os.path.realpath(__file__))
flat_list_in_fp = os.path.abspath(flat_list_in_fp)
os.chdir(script_dir)
command_one = "g++ -std=c++11 -pthread -static-libstdc++ taxidtool.cpp -o db_builder"
command_two = "./db_builder {0} {1}.tmp {2}.tsv {3}".format(flat_list_in_fp, fasta_out_fp, fasta_out_fp.rsplit(".",1)[0],
thread_count)
command_three = "mtsv-db-build {0} {1}.tmp {2}.tsv {3}".format(flat_list_in_fp, fasta_out_fp, fasta_out_fp.rsplit(".",1)[0],
thread_count)
if not os.path.isfile(fasta_out_fp+".tmp") and not os.path.isfile(fasta_out_fp):
try:
subprocess.run(command_three.split())
except:
try:
subprocess.run(command_two.split())
except:
subprocess.run(command_one.split())
subprocess.run(command_two.split())
count = 0
if os.path.isfile(fasta_out_fp+".tmp") and not os.path.isfile(fasta_out_fp):
with open("{0}.tmp".format(fasta_out_fp), "rb") as start_file:
with open(fasta_out_fp+".temp", "wb") as end_file:
for line in start_file:
if chr(line[0]) == ">":
line = line.split(b'GI:')
if len(line) == 2:
line[1] = line[1].split(b' ',1)[1].strip()+b'\n'
line[0] = line[0].strip()
header = line
else:
header = line[0].split(b' ', 1)
end_file.write(" GI:{0} ".format(count).encode().join(header))
count += 1
else:
end_file.write(line)
os.rename(fasta_out_fp+".temp", fasta_out_fp)
os.remove("{0}.tmp".format(fasta_out_fp))
os.chdir(start_dir)
def ftp_dl(x):
# raw_path = path
ftp_path = "ftp.ncbi.nlm.nih.gov"
connection = FTP(ftp_path, timeout=10000)
connection.login()
while x:
try:
fp_path = x.pop()
except IndexError:
break
raw_path = fp_path[0]
fp_path = fp_path[1]
try:
outpath = os.path.join(raw_path, os.path.basename(fp_path))
if os.path.isfile(outpath.strip(".gz")):
continue
file_size = connection.size(fp_path)
if not os.path.isfile(outpath) or file_size != os.path.getsize(outpath):
with open(outpath, "wb") as out_file:
connection.retrbinary("RETR {0}".format(fp_path), out_file.write)
else:
connection.sendcmd('NOOP')
except all_errors as e:
connection.close()
# print(e)
connection = FTP(ftp_path, timeout=10000)
connection.login()
except:
with lock:
with open(os.path.join(os.path.dirname(os.path.dirname(fp_path)),
"/artifacts/ftp_dl.log" ),"a" ) as out:
out.write("{0}\n".format(fp_path))
try:
connection.quit()
except:
pass
def pull(path="",thread_count=1,databases ={"genbank"} ):
if not path:
string_date = datetime.datetime.now().strftime("%b-%d-%Y")
else:
string_date = path
raw_path = "{}/".format(string_date)
config_path = "artifacts/exclude.json"
ftp_path = "ftp.ncbi.nlm.nih.gov"
genbank_dir ="/genbank/"
assembly_gb = "/genomes/genbank/"
assembly_rs = "/genomes/refseq/"
assembly_gb_summary = "assembly_summary_genbank.txt"
assembly_rs_summary = "assembly_summary_refseq.txt"
exclude = "suffix_exclude"
try:
os.makedirs(os.path.join(raw_path,"artifacts/"))
except:
pass
try:
os.makedirs(os.path.join(raw_path,"flat_files/"))
except:
pass
try:
configurations = parse_json(os.path.join(raw_path, config_path))
except:
configurations = {}
configurations[exclude] = ["gbenv", "gbsyn",
"gbchg" , "gbcon" , "gbnew",
"gbrel", "gbtsa"]
with open(os.path.join(raw_path,config_path), "w") as file:
json.dump(configurations, file, sort_keys=True)
configurations = parse_json(os.path.join(raw_path, config_path))
exclude = set(configurations[exclude])
connection = FTP(ftp_path)
connection.login()
gb_download = []
to_download = []
level2path = {}
if "genbank" in databases:
for fp in connection.nlst(genbank_dir):
base_fp = os.path.basename(fp)
for ind,char in enumerate(base_fp):
try:
# print(ind)
int(char)
if base_fp[:ind] in exclude:
break
else:
gb_download.append(fp)
to_download.append((os.path.join(raw_path,"flat_files/"),fp))
break
except:
continue
level2path[b'genbank'] = gb_download
reader = BytesIO()
connection.retrbinary("RETR {0}{1}".format(assembly_rs,assembly_rs_summary) ,reader.write)
reader.seek(0)
for line in reader:
if chr(line[0]) == "#":
continue
line = line.strip().split(b'\t')
# try:
if line[13] == b"Partial":
continue
try:
if line[20].strip():
continue
except:
pass
db = line[11].strip().decode().lower().replace(" ", "_")
if db in databases:
try:
temp = line[19].split(ftp_path.encode(),1)[1].decode()
temp_path = "{0}/{1}_genomic.gbff.gz".format(temp, os.path.basename(temp))
except:
continue
try:
level2path[db.encode()].append(temp_path)
except:
level2path[db.encode()] = [temp_path]
to_download.append((os.path.join(raw_path,"flat_files"),temp_path))
reader = BytesIO()
connection.retrbinary("RETR {0}{1}".format(assembly_gb,assembly_gb_summary) ,reader.write)
reader.seek(0)
for line in reader:
if chr(line[0]) == "#":
continue
line = line.strip().split(b'\t')
if line[13] == b"Partial":
continue
try:
if line[20].strip():
continue
except:
pass
db = line[11].strip().decode().lower().replace(" ", "_")
if db in databases:
try:
temp = line[19].split(ftp_path.encode(),1)[1].decode()
temp_path = "{0}/{1}_genomic.gbff.gz".format(temp, os.path.basename(temp))
except:
continue
try:
level2path[db.encode()].append(temp_path)
except:
level2path[db.encode()] = [temp_path]
to_download.append((os.path.join(raw_path,"flat_files/"), temp_path) )
artifacts = [(os.path.join(raw_path,"artifacts/"),"/pub/taxonomy/taxdump.tar.gz")]
tax_path = "/pub/taxonomy/accession2taxid/"
for file in connection.nlst(tax_path):
if not fnmatch.fnmatch(os.path.basename(file), 'dead*') and not fnmatch.fnmatch(file, '*md5'):
artifacts.append((os.path.join(raw_path, "artifacts"), file))
man = Manager()
connection.quit()
to_download += artifacts
to_download = man.list(to_download)
proc = [Process(target = ftp_dl, args=(to_download,)) for i in range(thread_count)]
with open(os.path.join(raw_path,"artifacts", "ftp_dl.log"), "w"):
pass
for p in proc:
p.start()
for p in proc:
p.join()
for i in level2path.keys():
if i.decode() in exclude:
continue
fp = "{0}_ff.txt".format(i.decode().replace(" ","_"))
with open(os.path.join(raw_path,"artifacts/",fp), "w") as out_file:
for line in level2path[i]:
out_file.write("{0}\n".format(os.path.join(os.path.abspath(raw_path),"flat_files/", os.path.basename(line))))
return string_date
if __name__ =="__main__":
parser = argparse.ArgumentParser(description="TaxClipper is intended to be used to parse sequences based on NCBI taxid")
group = parser.add_mutually_exclusive_group(required=True)
# group.add_argument("-oc", "--oneclick", "-oneclick",action='store_true')
# group.add_argument("-ocdl", "--oneclickdl", "-oneclickdl",action='store_true')
# group.add_argument("-ocdc", "--oneclickdc", "-oneclickdc",action='store_true')
group.add_argument("-pl", "--pull", "-pull",action='store_true')
group.add_argument("-gc", "--generate-config","-generate-config", action='store_true',
help="generates a configuration file in current directory or as specified by output")
group.add_argument("-bdb", "--build-database", "-build-database", action='store_true',
help="Builds a sequence database from a file list of NCBI flatfiles")
# group.add_argument("-bigi", "--build-index-gi","-build-index-gi",action='store_true',
# help="Builds a serialization of the fasta file using gi2taxid information")
group.add_argument("-biacc", "--build-index-acc","-build-index-acc",action='store_true',
help="Builds a serialization of the fasta file using acc2taxid information")
group.add_argument("-c", "--clip", "-clip",action='store_true')
# group.add_argument("-ce", "--clip-exclusive","-clip-exclusive",action='store_true')
# group.add_argument("-ci", "--clip-inclusive","-clip-inclusive",action='store_true')
group = parser.add_argument_group()
group.add_argument("-u", "--update", "-update", action='store_true')
group.add_argument("-cp", "--configuration-path", "-configuration-path",
help="Path to configuration JSON built using generate-config flag")
group.add_argument("-tp","--taxdump-path", "-taxdump-path", help="Path to taxdump.tar.gz file"
)
group.add_argument("-sp","--serialization-path", "-serialization-path",
help="Path to *.p file built using build-index_* flag")
# group.add_argument("-kp", "--keyword-path","-keyword-path",
# help="Path to keyword serialization")
group.add_argument("-fp","--fasta-path", "-fasta-path" ,
help="Path to fasta DB built using build-database flag")
# group.add_argument("-g2t", "--gi-to-taxid-paths", "-gi-to-taxid-paths", nargs='*',
# help="Path to gi to nucl files")
group.add_argument("-fl","--file-list", "-file-list" ,
help="Path to file list of paths to GenBank Flat Files")
group.add_argument("-a2t", "--acc-to-taxid-paths", "-acc-to-taxid-paths", nargs='*',
help="Path to accesion to nucl file")
group.add_argument("-txi","--tax-id-include","-tax-id-include", nargs='*',
help="NCBI TaxIDs or Path to file list of TaxIDs to include")
group.add_argument("-txe","--tax-id-exclude","-tax-id-exclude", nargs='*',
help="NCBI TaxIDs or Path to file list of TaxIDs to include")
group.add_argument("-min","--minimum-length","-minimum-length", type=int,
help="Integer for minimum length of sequences to include")
group.add_argument("-max","--maximum-length","-maximum-length", type=int,
help="Integer for maximum length of sequences to include")
group.add_argument("-rur","--rollup-rank","-rollup-rank",
help="NCBI rank to set sequence headers i.e. species, genus, family et cetera")
group.add_argument("-t", "--threads", "-threads", type=int,
help="Specify total threads to spawn in DB creation")
# group.add_argument("-ow", "--overwrite", "-overwrite", help="Specify total threads to spawn in FM-index creation",
# action='store_true')
group.add_argument("-o", "--output","-output",
help="path for output file without extension relevant extension will be appended")
group.add_argument("-p","--path","-path", help="Path to dated folder containing artifacts")
args = parser.parse_args()
if args.oneclick:
databases = {"genbank", "complete_genome", "scaffold", "contig", "chromosome"}
exclude = {"complete_Genome", "contig"}
if args.threads:
threads = args.threads
else:
threads = 1
if args.path:
dl_folder = args.path
else:
dl_folder = ""
dl_folder = pull(thread_count=threads,excluded=exclude, path=dl_folder)
for fp in os.listdir(os.path.join(dl_folder,"artifacts/")):
if fnmatch.fnmatch(fp, "*_ff.txt"):
build_db(os.path.join(dl_folder,"artifacts/",fp), os.path.join(dl_folder,"artifacts/","{0}.fas".format(fp.split("_ff")[0])),os.devnull, os.devnull, threads, os.devnull)
arguments = oneclickjson(dl_folder)
with Pool(threads) as p:
p.starmap(acc_serialization, [(argument['acc-to-taxid-paths'], argument['fasta-path'], argument['taxdump-path']) for argument in arguments ] )
# (arguments['acc-to-taxid-paths'], arguments['fasta-path'], arguments['taxdump-path'])
# shutil.rmtree(os.path.join(dl_folder,"flat_files"))
elif args.pull:
if args.threads and args.tax_id_exclude:
pull(thread_count=args.threads, excluded=set(args.tax_id_exclude))
elif args.threads:
pull(thread_count=args.threads)
elif args.tax_id_exclude:
pull(excluded=set(args.tax_id_exclude))
else:
pull()
else:
if args.configuration_path:
arguments = arg_unwrappers(args,parse_json(args))
else:
arguments = arg_unwrappers(args)
if args.build_database:
if args.threads:
threads = args.threads
else:
threads = 1
if args.file_list:
# build_db("chromosomes.list","chromosome.fasta", "test.kw", "test.src", 16, "test.g2w")
# build_db("complete_genome.list","complete.fasta", "test.kw", "test.src", 16, "test.g2w")
build_db(*"{2} {0} {0}.kw {0}.src {1} {0}.g2w".format(args.output, threads, args.file_list).split())
else:
print("FASTA Database creation requires a path to a file list of GenBank Flat Files")
elif args.build_index_acc:
if arguments['taxdump-path'] and arguments['fasta-path'] and arguments['acc-to-taxid-paths']:
acc_serialization(arguments['acc-to-taxid-paths'],arguments['fasta-path'],arguments['taxdump-path'])
else:
parser.error("Serialization requires paths to taxdump, fasta database and accession2taxid files")
if args.update:
arguments['serialization-path'] = os.path.abspath(args.output+".p")
gen_json(arguments, args)
elif args.clip:
clip(args.tax_id_include,arguments['rollup-rank'], args.tax_id_exclude,
args.output,arguments['minimum-length'], arguments['maximum-length'], arguments['fasta-path'], arguments['serialization-path'])
elif args.update and args.configuration_path:
gen_json(arg_unwrappers(args,parse_json(args)),args)
elif args.generate_config:
gen_json(arg_unwrappers(args),args)
|
backend.py
|
import time
import hashlib
import json
import requests
import base64
from flask import Flask, request
from multiprocessing import Process, Pipe
import ecdsa
from config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Returns a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hashlib.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
return Block(0, time.time(), {
"proof-of-work": 9,
"coin": None,
"data": "{'from': 'network', 'data': 'RDPStudio BlockChain System Started(This is the genesis block)', 'signature': None, 'message': None}"},
"0")
# Node's blockchain copy
BLOCKCHAIN = [create_genesis_block()]
""" Stores the data that this node has in a list.
If the node you sent the data adds a block
it will get accepted, but there is a chance it gets
discarded and your data goes back as if it was never
processed"""
NODE_PENDING_data = []
NODE_PENDING_datas = []
def proof_of_work(last_proof, blockchain):
# Creates a variable that we will use to find our next proof of work
incrementer = last_proof + 1
# Keep incrementing the incrementer until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
start_time = time.time()
while not (incrementer % 7919 == 0 and incrementer % last_proof == 0):
incrementer += 1
# Check if any node found the solution every 60 seconds
if int((time.time()-start_time) % 60) == 0:
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain:
# (False: another node got proof first, new blockchain)
return False, new_blockchain
# Once that number is found, we can return it as a proof of our work
return incrementer, blockchain
def mine(a, blockchain, node_pending_data):
BLOCKCHAIN = blockchain
NODE_PENDING_data = node_pending_data
while True:
"""Mining is the only way that new coins can be created.
In order to prevent too many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get the last proof of work
last_block = BLOCKCHAIN[-1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if not proof[0]:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# ...we reward the miner by adding a data
# First we load all pending data sent to the node server
NODE_PENDING_data = requests.get(url = MINER_NODE_URL + '/txion', params = {'update':MINER_ADDRESS}).content
NODE_PENDING_datas = requests.get(url = MINER_NODE_URL + '/data', params = {'update':MINER_ADDRESS}).content
NODE_PENDING_data = json.loads(NODE_PENDING_data)
NODE_PENDING_datas = json.loads(NODE_PENDING_datas)
# Then we add the mining reward
NODE_PENDING_data.append({
"from": "network",
"to": MINER_ADDRESS,
"amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"coin": list(NODE_PENDING_data),
"data": list(NODE_PENDING_datas)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty data list
NODE_PENDING_data = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"content": new_block_data,
"hash": last_block_hash
}) + "\n")
a.send(BLOCKCHAIN)
requests.get(url = MINER_NODE_URL + '/blocks', params = {'update':MINER_ADDRESS})
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(url = node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
tempchain = []
for tempblock in BLOCKCHAIN:
tempchain.append(Block(tempblock["index"],tempblock["timestamp"],tempblock["data"],tempblock["previous_hash"]))
BLOCKCHAIN = tempchain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submitted chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
# Converts our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash,
"previous_hash": block.previous_hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json)
return chain_to_send
@node.route('/txion', methods=['GET', 'POST'])
def tx():
"""Each data sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. data only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the data data
new_txion = request.get_json()
# Then we add the data to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_data.append(new_txion)
# Because the data was successfully
# submitted, we log it to our console
print("New coin trans")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "coin trans submission successful\n"
else:
return "coin trans submission failed. Wrong signature\n"
# Send pending data to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_data)
# Empty data list
NODE_PENDING_data[:] = []
return pending
@node.route('/data', methods=['GET', 'POST'])
def data():
"""Each data sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. data only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the data data
new_txion = request.get_json()
# Then we add the data to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_datas.append(new_txion)
# Because the data was successfully
# submitted, we log it to our console
print("New data")
print("Message: {0}".format(new_txion['data']))
# Then we let the client know it worked out
return "data submission successful\n"
else:
return "data submission failed. Wrong signature\n"
# Send pending data to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_datas)
# Empty data list
NODE_PENDING_datas[:] = []
return pending
def validate_signature(public_key, signature, message):
"""Verifies if the signature is correct. This is used to prove
it's you (and not someone else) trying to do a data with your
address. Called when a user tries to submit a new data.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
# Try changing into an if/else statement as except is too broad.
try:
return vk.verify(signature, message.encode())
except:
return False
def welcome_msg():
print(""" =========================================\n
SIMPLE DATA v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
welcome_msg()
# Start mining
a, b = Pipe()
p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_data))
p1.start()
# Start server to receive data
p2 = Process(target=node.run(), args=b)
p2.start()
|
13_customer_terms.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from xmlrpc import client as xmlrpclib
import multiprocessing as mp
from scriptconfig import URL, DB, UID, PSW, WORKERS
# =================================== C U S T O M E R ========================================
def update_customer_terms(pid, data_pool, write_ids, error_ids):
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
while data_pool:
try:
data = data_pool.pop()
code = data.get('TERM-CODE')
vals = {'name': data.get('TERM-DESC'),
'note': data.get('TERM-DESC'),
'active': True,
'order_type': 'sale',
'code': code,
'discount_per': data.get('TERM-DISC-PCT', 0),
'due_days': data.get('TERM-DISC-DAYS', 0),
}
res = write_ids.get(code, [])
if res:
sock.execute(DB, UID, PSW, 'account.payment.term', 'write', res, vals)
print(pid, 'UPDATE - CUSTOMER TERM', res)
else:
vals['line_ids'] = [(0, 0, {'type': 'balance', 'days': int(data.get('TERM-NET-DUE', 0) or 0)})]
res = sock.execute(DB, UID, PSW, 'account.payment.term', 'create', vals)
print(pid, 'CREATE - CUSTOMER TERM', res)
if not data_pool:
break
except:
break
def sync_terms():
manager = mp.Manager()
data_pool = manager.list()
error_ids = manager.list()
write_ids = manager.dict()
process_Q = []
fp = open('files/rclterm1.csv', 'r')
csv_reader = csv.DictReader(fp)
for vals in csv_reader:
data_pool.append(vals)
fp.close()
domain = [('order_type', '=', 'sale')]
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
res = sock.execute(DB, UID, PSW, 'account.payment.term', 'search_read', domain, ['id', 'code'])
write_ids = {term['code']: term['id'] for term in res}
res = None
term_codes = None
for i in range(WORKERS):
pid = "Worker-%d" % (i + 1)
worker = mp.Process(name=pid, target=update_customer_terms, args=(pid, data_pool, write_ids, error_ids))
process_Q.append(worker)
worker.start()
for worker in process_Q:
worker.join()
if __name__ == "__main__":
# PARTNER
sync_terms()
|
segment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import math
import os
from os.path import exists, join, split
import threading
import glob
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import drn
import data_transforms as transforms
try:
from modules import batchnormsync
except ImportError:
pass
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALETTE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
TRIPLET_PALETTE = np.asarray([
[0, 0, 0, 255],
[217, 83, 79, 255],
[91, 192, 222, 255]], dtype=np.uint8)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DRNSeg(nn.Module):
def __init__(self, model_name, classes, pretrained_model=None,
pretrained=False, use_torch_up=False):
super(DRNSeg, self).__init__()
model = drn.__dict__.get(model_name)(
pretrained=pretrained, num_classes=18)
pmodel = nn.DataParallel(model)
if pretrained_model is not None:
pmodel.load_state_dict(pretrained_model)
self.base = nn.Sequential(*list(model.children())[:-2])
self.seg = nn.Conv2d(model.out_dim, classes,
kernel_size=1, bias=True)
self.softmax = nn.LogSoftmax()
m = self.seg
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
if use_torch_up:
self.up = nn.UpsamplingBilinear2d(scale_factor=8)
else:
up = nn.ConvTranspose2d(classes, classes, 16, stride=8, padding=4,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
self.up = up
def forward(self, x):
x = self.base(x)
x = self.seg(x)
y = self.up(x)
return self.softmax(y), x
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.seg.parameters():
yield param
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = self.data_dir
label_path = self.data_dir.replace("images", "annotations")
self.image_list = [
filename for filename in glob.glob(image_path + '*.jpg')]
self.label_list = [
filename for filename in glob.glob(label_path + '*.png')]
assert len(self.image_list) == len(self.label_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = []
self.label_list = []
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
# data = list(self.transforms(*data))
out_data = list(self.transforms(*data))
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))[0]
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = self.data_dir
label_path = self.data_dir.replace("images", "annotations")
self.image_list = [
filename for filename in glob.glob(image_path + '*.jpg')]
self.label_list = [
filename for filename in glob.glob(label_path + '*.png')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
try:
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score))
except Exception as err:
print(err)
logger.info(' * Score {top1.avg:.3f}'.format(top1=score))
# Writing to log file
try:
with open('val_results.txt', 'w') as file:
file.write('Loss {loss.avg:.4f} * Score {top1.avg:.3f} Time {batch_time.avg:.3f}'.format(
loss=losses, top1=score, batch_time=batch_time))
except Exception as err:
print(err)
return score.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.item()
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
try:
# measure data loading time
data_time.update(time.time() - end)
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
except Exception as err:
print(err)
# Writing to log file
try:
with open('train_results.txt', 'w') as file:
file.write('Epoch: [{0}]\t'
'Time ({batch_time.avg:.3f})\t'
'Data ({data_time.avg:.3f})\t'
'Loss ({loss.avg:.4f})\t'
'Score ({top1.avg:.3f})'.format(
epoch, batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
except Exception as err:
print(err)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=False)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
# Data loading code
train_dir = 'E:/Dataset/Dataset10k/images/training/'
val_dir = 'E:/Dataset/Dataset10k/images/validation/'
args.data_dir = train_dir
info = json.load(open('info.json', 'r'))
normalize = transforms.Normalize(mean=info['mean'],
std=info['std'])
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.extend([transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_loader = torch.utils.data.DataLoader(
SegList(train_dir, 'train', transforms.Compose(t),
list_dir=args.list_dir),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True, drop_last=True
)
val_loader = torch.utils.data.DataLoader(
SegList(val_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir),
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=True, drop_last=True
)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_path = 'checkpoint_latest.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % 1 == 0:
history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name) in enumerate(eval_data_loader):
try:
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(
pred, name, output_dir + '_color',
TRIPLET_PALETTE if num_classes == 3 else CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
except Exception as err:
print(err)
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
# pdb.set_trace()
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
# _, pred = torch.max(torch.from_numpy(final), 1)
# pred = pred.cpu().numpy()
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
val_dir = 'E:/Dataset/Dataset10k/images/validation/'
args.data_dir = val_dir
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
pretrained=False)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = json.load(open('info.json', 'r'))
normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
scales = [0.5, 0.75, 1.25, 1.5, 1.75]
if args.ms:
dataset = SegListMS(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), scales, list_dir=args.list_dir)
else:
dataset = SegList(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir, out_name=True)
test_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
logger.info('mAP: %f', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir',
default='E:/Dataset/Dataset10k/images/training/')
parser.add_argument('-l', '--list-dir', default='E:/Dataset/Dataset10k/list/',
help='List dir to look for train_images.txt etc. '
'It is the same with --data-dir if not set.')
parser.add_argument('-c', '--classes', default=18, type=int)
parser.add_argument('-s', '--crop-size', default=840, type=int)
parser.add_argument('--step', type=int, default=100)
parser.add_argument('--arch', default='drn_d_105')
parser.add_argument('--batch-size', type=int, default=1, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
#parser.add_argument('--lr-mode', type=str, default='step')
parser.add_argument('--lr-mode', type=str, default='poly')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--resume', default=False, type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained',
default=False, type=str, metavar='PATH',
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--random-scale', default=2, type=float)
#parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=10, type=int)
#parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--bn-sync', default=False, action='store_true')
parser.add_argument('--ms', action='store_true',
help='Turn on multi-scale testing')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('--test-suffix', default='', type=str)
args = parser.parse_args()
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
if args.bn_sync:
drn.BatchNorm = batchnormsync.BatchNormSync
return args
def main():
args = parse_args()
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
|
conftest.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import json
import time
import random
import os.path
import tempfile
import threading
import subprocess
import pytest
import requests
# Variables ===================================================================
PORT = random.randint(20000, 60000)
URL = "http://127.0.0.1:%d" % PORT
API_URL = URL + "/api_v1/"
_SERVER_HANDLER = None
# Functions ===================================================================
def circuit_breaker_http_retry(max_retry=10):
for i in range(max_retry):
try:
print "Connecting to server .. %d/%d" % (i + 1, max_retry)
return requests.get(URL).raise_for_status()
except Exception:
time.sleep(1)
raise IOError("Couldn't connect to thread with HTTP server. Aborting.")
def _create_alt_settings():
alt_settings = {
"WEB_ADDR": "127.0.0.1",
"WEB_PORT": web_port(),
"WEB_SERVER": "paste",
"WEB_DEBUG": True,
"WEB_RELOADER": True,
"WEB_BE_QUIET": True,
}
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(json.dumps(alt_settings))
return f.name
# Setup =======================================================================
@pytest.fixture(scope="session", autouse=True)
def web_port():
return PORT
@pytest.fixture(scope="session", autouse=True)
def web_url():
return URL
@pytest.fixture(scope="session", autouse=True)
def web_api_url():
return API_URL
@pytest.fixture(scope="session", autouse=True)
def bottle_server(request):
alt_conf_path = _create_alt_settings()
# run the bottle REST server
def run_bottle():
# prepare path to the command
command_path = os.path.join(
os.path.dirname(__file__),
"../bin/wa_kat_server.py"
)
assert os.path.exists(command_path)
# replace settings with mocked file
my_env = os.environ.copy()
my_env["SETTINGS_PATH"] = alt_conf_path
# run the server
global _SERVER_HANDLER
_SERVER_HANDLER = subprocess.Popen(command_path, env=my_env)
serv = threading.Thread(target=run_bottle)
serv.setDaemon(True)
serv.start()
# add finalizer which shutdowns the server and remove temporary file
def shutdown_server():
if _SERVER_HANDLER:
_SERVER_HANDLER.terminate()
os.unlink(alt_conf_path)
request.addfinalizer(shutdown_server)
# wait until the connection with server is created
circuit_breaker_http_retry()
|
test_linsolve.py
|
from __future__ import division, print_function, absolute_import
import sys
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns)
import pytest
from pytest import raises as assert_raises
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
from scipy._lib._numpy_compat import suppress_warnings
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
class TestFactorized(object):
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
with assert_raises(RuntimeError, match="Factor is exactly singular"):
self._check_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
msg = "can only factor square matrices"
with assert_raises(ValueError, match=msg):
factorized(self.A[:, :4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
with assert_raises(ValueError, match="is of incompatible size"):
solve(b)
with assert_raises(ValueError, match="is of incompatible size"):
solve(B)
with assert_raises(ValueError,
match="object too deep for desired array"):
solve(BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
msg = "object too deep for desired array"
with assert_raises(ValueError, match=msg):
solve(B)
with assert_raises(ValueError, match=msg):
solve(BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
with assert_raises(TypeError, match="Cannot cast array data"):
solve(b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(np.ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
with assert_raises(RuntimeError,
match="UMFPACK_ERROR_invalid_matrix"):
factorized(A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
assert_equal(A.has_sorted_indices, 1)
class TestLinsolve(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = "%r %r" % (spmatrix, badop)
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
class TestSplu(object):
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = "k=%r" % (k,)
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.slow
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
class TestSpsolveTriangular(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
n = 5
A = csr_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@pytest.mark.slow
@sup_sparse_efficiency
def test_random(self):
def random_triangle_matrix(n, lower=True):
A = scipy.sparse.random(n, n, density=0.1, format='coo')
if lower:
A = scipy.sparse.tril(A)
else:
A = scipy.sparse.triu(A)
A = A.tocsr(copy=False)
for i in range(n):
A[i, i] = np.random.rand() + 1
return A
np.random.seed(1234)
for lower in (True, False):
for n in (10, 10**2, 10**3):
A = random_triangle_matrix(n, lower=lower)
for m in (1, 10):
for b in (np.random.rand(n, m),
np.random.randint(-9, 9, (n, m)),
np.random.randint(-9, 9, (n, m)) +
np.random.randint(-9, 9, (n, m)) * 1j):
x = spsolve_triangular(A, b, lower=lower)
assert_array_almost_equal(A.dot(x), b)
x = spsolve_triangular(A, b, lower=lower,
unit_diagonal=True)
A.setdiag(1)
assert_array_almost_equal(A.dot(x), b)
|
testing_server.py
|
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False):
# print("Capturing expected headers:")
# print(expected_headers)
assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers)
for key, val in expected_headers.items():
assert isinstance(key, str)
assert isinstance(val, str)
cookie_key = uuid.uuid4().hex
log = logging.getLogger("Main.TestServer")
sucuri_reqs_1 = 0
sucuri_reqs_2 = 0
sucuri_reqs_3 = 0
class MockServerRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def validate_headers(self):
for key, value in expected_headers.items():
if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding':
# So PhantomJS monkeys with accept-encoding headers
# Just ignore that particular header, I guess.
pass
# Selenium is fucking retarded, and I can't override the user-agent
# and other assorted parameters via their API at all.
elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language':
pass
# Chromium is just broken completely for the accept header
elif (is_annoying_pjs or is_selenium_garbage_chromium or is_chromium or skip_header_checks) and key == 'Accept':
pass
elif not skip_header_checks:
v1 = value.replace(" ", "")
v2 = self.headers[key]
if v2 is None:
v2 = ""
v2 = v2.replace(" ", "")
test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : expect: '{}' -> received:'{}' ({})".format(
key,
value,
self.headers[key],
{
'is_annoying_pjs' : is_annoying_pjs,
'is_chromium' : is_chromium,
'is_selenium_garbage_chromium' : is_selenium_garbage_chromium,
'skip_header_checks' : skip_header_checks,
},
)
)
def _get_handler(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
# print("Path: ", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
self.validate_headers()
except Exception:
self.send_response(500)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Headers failed validation!")
raise
if self.path == "/":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/favicon.ico":
self.send_response(404)
self.end_headers()
elif self.path == "/raw-txt":
self.send_response(200)
self.send_header('Content-type', "text/plain")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html-decode":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html/real":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Root OK?</body></html>")
elif self.path == "/compressed/deflate":
self.send_response(200)
self.send_header('Content-Encoding', 'deflate')
self.send_header('Content-type', "text/html")
self.end_headers()
inb = b"Root OK?"
cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)
t1 = cobj.compress(inb) + cobj.flush()
self.wfile.write(t1)
elif self.path == "/compressed/gzip":
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(gzip.compress(b"Root OK?"))
elif self.path == "/json/invalid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT")
elif self.path == "/json/valid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/json/no-coding":
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/filename/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/path-only-trailing-slash/":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-html-suffix":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='lolercoaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='loler coaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\"loler coaster.html\"")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/explicit-html-mime":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/redirect/bad-1":
self.send_response(302)
self.end_headers()
elif self.path == "/redirect/bad-2":
self.send_response(302)
self.send_header('location', "bad-2")
self.end_headers()
elif self.path == "/redirect/bad-3":
self.send_response(302)
self.send_header('location', "gopher://www.google.com")
self.end_headers()
elif self.path == "/redirect/from-1":
self.send_response(302)
self.send_header('location', "to-1")
self.end_headers()
elif self.path == "/redirect/to-1":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-1")
elif self.path == "/redirect/from-2":
self.send_response(302)
self.send_header('uri', "to-2")
self.end_headers()
elif self.path == "/redirect/to-2":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-2")
elif self.path == "/redirect/from-3":
self.send_response(302)
newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1])
self.send_header('uri', newurl)
self.end_headers()
elif self.path == "/password/expect":
# print("Password")
# print(self.headers)
self.send_response(200)
self.end_headers()
if not 'Authorization' in self.headers:
self.wfile.write(b"Password not sent!!")
return
val = self.headers['Authorization']
passval = val.split(" ")[-1]
passstr = base64.b64decode(passval)
if passstr == b'lol:wat':
self.wfile.write(b"Password Ok?")
else:
self.wfile.write(b"Password Bad!")
elif self.path == "/content/have-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>")
elif self.path == "/content/no-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
##################################################################################################################################
# Cookie stuff
##################################################################################################################################
elif self.path == '/cookie_test':
cook = cookies.SimpleCookie()
cook['cookie_test_key'] = cookie_key
cook['cookie_test_key']['path'] = "/"
cook['cookie_test_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())
self.end_headers()
self.wfile.write(b"<html><body>CF Cookie Test</body></html>")
elif self.path == '/cookie_require':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cookie_test_key' and cook_value == cookie_key:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>")
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie is missing</body></html>")
##################################################################################################################################
# Sucuri validation
##################################################################################################################################
elif self.path == '/sucuri_shit_3':
# I'd like to get this down to just 2 requests (cookie bounce, and fetch).
# Doing that requires pulling html content out of chromium, though.
# Annoying.
nonlocal sucuri_reqs_3
sucuri_reqs_3 += 1
if sucuri_reqs_3 > 3:
raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit_2':
# This particular path is the one we should already have a cookie for.
# As such, we expect one request only
nonlocal sucuri_reqs_2
sucuri_reqs_2 += 1
if sucuri_reqs_2 > 1:
raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit':
nonlocal sucuri_reqs_1
sucuri_reqs_1 += 1
if sucuri_reqs_1 > 4:
raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1)
# print("Fetch for ", self.path)
# print("Cookies:", self.headers.get_all('Cookie', failobj=[]))
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
##################################################################################################################################
# Cloudflare validation
##################################################################################################################################
elif self.path == '/cloudflare_under_attack_shit_2':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cloudflare_under_attack_shit':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594':
cook = cookies.SimpleCookie()
cook['cloudflare_validate_key'] = cookie_key
cook['cloudflare_validate_key']['path'] = "/"
cook['cloudflare_validate_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())
self.end_headers()
body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>"
self.wfile.write(body.encode("utf-8"))
##################################################################################################################################
# Handle requests for an unknown path
##################################################################################################################################
else:
test_context.assertEqual(self.path, "This shouldn't happen!")
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
log.info("Request for URL path: '%s'", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
return self._get_handler()
except Exception as e:
log.error("Exception in handler!")
for line in traceback.format_exc().split("\n"):
log.error(line)
raise e
return MockServerRequestHandler
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_server(assertion_class,
from_wg,
port_override = None,
is_chromium = None,
is_selenium_garbage_chromium = False,
is_annoying_pjs = False,
skip_header_checks = False
):
# Configure mock server.
if port_override:
mock_server_port = port_override
else:
mock_server_port = get_free_port()
expected_headers = dict(from_wg.browserHeaders)
print(from_wg)
print(expected_headers)
assert isinstance(expected_headers, dict)
captured_server = capture_expected_headers(
expected_headers = expected_headers,
test_context = assertion_class,
is_chromium = is_chromium,
is_selenium_garbage_chromium = is_selenium_garbage_chromium,
is_annoying_pjs = is_annoying_pjs,
skip_header_checks = skip_header_checks
)
retries = 4
for x in range(retries + 1):
try:
mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)
break
except OSError:
time.sleep(0.2)
if x >= retries:
raise
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_port, mock_server, mock_server_thread
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 19
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
configuration.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get("core", "FERNET_KEY")
configuration.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
import subprocess
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
import subprocess
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import subprocess
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_bash2 = self.dag_bash2.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run, and the text of
# the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_bash2.dag_id,
"execution_date": self.dagrun_bash2.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(self.dagrun_bash2.execution_date.strftime("%Y-%m-%d %H:%M"), resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=test_example_bash_operator')
self.assertIn("test_example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=run_this_last&"
"dag_id=test_example_bash_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=run_this_last&'
'dag_id=test_example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=runme_1&"
"dag_id=test_example_bash_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=test_example_bash_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("run_this_last", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_subtype='mixed'
)
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.get('smtp', 'SMTP_USER'),
configuration.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
reporting_send.py
|
#!/usr/bin/python
import SocketServer
import socket
import threading
import sys
import os
import shlex
import subprocess
import Queue
import platform
import time
import datetime
import argparse
import traceback
uploadPort=4445
#syscallList=list()
Timer=0
TimerInterval = 15 * 60 * 1000 #15 mins
serverUrl = 'https://agent-data.insightfinder.com'
def get_args():
parser = argparse.ArgumentParser(description='Script retrieves arguments for insightfinder system call tracing.')
parser.add_argument('-d', '--HOME_DIR', type=str, help='The HOME directory of Insight syscall trace', required=True)
parser.add_argument('-D', '--ROOT_DIR', type=str, help='The Root directory of Insight agent', required=True)
parser.add_argument('-w', '--SERVER_URL', type=str, help='URL of the insightfinder Server', required=False)
args = parser.parse_args()
homepath = args.HOME_DIR
rootpath = args.ROOT_DIR
global serverUrl
if args.SERVER_URL != None:
serverUrl = args.SERVER_URL
return homepath, rootpath
def checkPrivilege():
euid = os.geteuid()
if euid != 0:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
class prepareThreads(threading.Thread):
def __init__(self,command,path):
super(prepareThreads, self).__init__()
self.command=command
self.path=path
def run(self):
global homepath
global rootpath
proc = subprocess.Popen(self.command, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out,err) = proc.communicate()
if "failed" in str(err) or "ERROR" in str(err):
print "Preparing System call tracing logs failed."
sys.exit()
print out
def sendFile(self):
global homepath
global rootpath
global Timer
global TimerInterval
request = self.recv(1024)
print str(datetime.datetime.now())
print request
sysTrace=shlex.split(request)
msgType = sysTrace[0]
sysTrace_timestamp = sysTrace[1]
procname = sysTrace[2]
if msgType != "GET":
return
current = int(round(time.time() * 1000))
if Timer == 0:
Timer = current
else:
if current - Timer < TimerInterval:
print "Two continious msg interval is " + str(current - Timer) + "ms < " + str(TimerInterval) + "ms"
return
os.chdir(homepath+"/buffer")
buffers = list()
for files in os.listdir("."):
if files.startswith("buffer_"):
buffers.append(files.split('buffer_')[1])
buffers.sort()
if int(sysTrace_timestamp) <= int(buffers[0]):
response="SYSCALL-TRACE/1.0 404 Not Found\n"
print response
#self.send(response)
else:
Timer = current
command = "sudo " + homepath + "/fetchSysCall.sh" + " -i " + buffers[0]
print command
thread = prepareThreads(command,homepath)
thread.start()
thread.join()
filename="syscall_" + buffers[0] + ".log"
#The only thing matters here is that we don't know the procname
#command = "sudo python " + homepath + "/preProcessing.py" + " -d " + homepath + " -p " + procname + " -f " + filename
#print command
#thread = prepareThreads(command,homepath)
#thread.start()
#thread.join()
fnameSeg = filename
#fnameSeg = filename + "_segmented_withContext.log"
fname = homepath + "/data/" + fnameSeg
command = "tar zcvf " + homepath + "/data/" + fnameSeg + ".tar.gz " + homepath + "/data/" + fnameSeg
print command
thread = prepareThreads(command,homepath)
thread.start()
thread.join()
command = "cd " + rootpath +" && unset http_proxy https_proxy && python common/reportBinary.py -f syscall/data/" + fnameSeg + ".tar.gz -m binaryFileReplay -T 1 -S " + sysTrace_timestamp + " -w "+serverUrl
print command
thread = prepareThreads(command,rootpath)
thread.start()
thread.join()
#command = "sudo rm -rf " + homepath + "/data/" + filename + "*"
#print command
#thread = prepareThreads(command,homepath)
#thread.start()
#thread.join()
def acceptThread():
acceptor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
acceptor.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
acceptor.bind(('', int(uploadPort)))
acceptor.listen(5)
cur_thread=threading.current_thread()
while True:
(clientSock,clientAddr)=acceptor.accept()
print "====Output Request:"
msg = "Connected to " + str(clientAddr[0]) + ":" + str(clientAddr[1])
print msg
thread3=threading.Thread(target=sendFile(clientSock))
thread3.daemon=True
thread3.start()
#thread3.join()
acceptor.close()
return
def main():
thread1=threading.Thread(target=acceptThread)
thread1.daemon=True
thread1.start()
#thread1.join()
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
sys.exit(0)
if __name__=="__main__":
global homepath
global rootpath
checkPrivilege()
homepath, rootpath = get_args()
main()
|
blockchain_processor.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from json import dumps, load
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from storage import Storage
from utils import logger, hash_decode, hash_encode, Hash, header_from_string, header_to_string, ProfiledThread, \
rev_hex, int_to_hex4
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
# monitoring
self.avg_time = 0,0,0
self.time_ref = time.time()
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.merkle_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_fees = {}
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {} # addr -> (txid, delta)
self.mempool_unconfirmed = {} # txid -> set of unconfirmed inputs
self.mempool_hashes = set()
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.bitcoind_url = 'http://%s:%s@%s:%s/' % (
config.get('bitcoind', 'bitcoind_user'),
config.get('bitcoind', 'bitcoind_password'),
config.get('bitcoind', 'bitcoind_host'),
config.get('bitcoind', 'bitcoind_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
# start catch_up thread
if config.getboolean('leveldb', 'profiler'):
filename = os.path.join(config.get('leveldb', 'path'), 'profile')
print_log('profiled thread', filename)
self.blockchain_thread = ProfiledThread(filename, target = self.do_catch_up)
else:
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
if not self.shared.stopped():
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("bitcoind is responding")
self.shared.unpause()
time.sleep(10)
def set_time(self):
self.time_ref = time.time()
def print_time(self, num_tx):
delta = time.time() - self.time_ref
# leaky averages
seconds_per_block, tx_per_second, n = self.avg_time
alpha = (1. + 0.01 * n)/(n+1)
seconds_per_block = (1-alpha) * seconds_per_block + alpha * delta
alpha2 = alpha * delta / seconds_per_block
tx_per_second = (1-alpha2) * tx_per_second + alpha2 * num_tx / delta
self.avg_time = seconds_per_block, tx_per_second, n+1
if self.storage.height%100 == 0 \
or (self.storage.height%10 == 0 and self.storage.height >= 100000)\
or self.storage.height >= 200000:
msg = "block %d (%d %.2fs) %s" %(self.storage.height, num_tx, delta, self.storage.get_root_hash().encode('hex'))
msg += " (%.2ftx/s, %.2fs/block)" % (tx_per_second, seconds_per_block)
run_blocks = self.storage.height - self.start_catchup_height
remaining_blocks = self.bitcoind_height - self.storage.height
if run_blocks>0 and remaining_blocks>0:
remaining_minutes = remaining_blocks * seconds_per_block / 60
new_blocks = int(remaining_minutes / 10) # number of new blocks expected during catchup
blocks_to_process = remaining_blocks + new_blocks
minutes = blocks_to_process * seconds_per_block / 60
rt = "%.0fmin"%minutes if minutes < 300 else "%.1f hours"%(minutes/60)
msg += " (eta %s, %d blocks)" % (rt, remaining_blocks)
print_log(msg)
def wait_on_bitcoind(self):
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise BaseException()
def bitcoind(self, method, params=()):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
print_log("cannot reach bitcoind...")
self.wait_on_bitcoind()
else:
if r['error'] is not None:
if r['error'].get('code') == -28:
print_log("bitcoind still warming up...")
self.wait_on_bitcoind()
continue
raise BaseException(r['error'])
break
return r.get('result')
@staticmethod
def block2header(b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
return self.block2header(b)
def init_headers(self, db_height):
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
@staticmethod
def hash_header(header):
header_bytes = header_to_string(header).decode('hex')
if (header["version"] >= 7):
hash_bytes = Hash(header_bytes) # new rules, sha256
else:
hash_bytes = HashScrypt(header_bytes) # old scrypt header hashing
return rev_hex(hash_bytes.encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if chunk_index in self.chunk_cache:
del self.chunk_cache[chunk_index]
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
if chunk:
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.bitcoind('getrawtransaction', (txid, 0))
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_unconfirmed_history(self, addr):
hist = []
with self.mempool_lock:
for tx_hash, delta in self.mempool_hist.get(addr, ()):
height = -1 if self.mempool_unconfirmed.get(tx_hash) else 0
fee = self.mempool_fees.get(tx_hash)
hist.append({'tx_hash':tx_hash, 'height':height, 'fee':fee})
return hist
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
hist = self.storage.get_history(addr)
hist.extend(self.get_unconfirmed_history(addr))
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
h = self.get_unconfirmed_history(addr)
return sum([x[1] for x in h])
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''.join(tx.get('tx_hash') + ':%d:' % tx.get('height') for tx in tx_points)
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height, cache_only):
with self.cache_lock:
out = self.merkle_cache.get(tx_hash)
if out is not None:
return out
if cache_only:
return -1
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
out = {"block_height": height, "merkle": s, "pos": tx_pos}
with self.cache_lock:
if len(self.merkle_cache) > self.max_cache_size:
logger.info("clearing merkle cache")
self.merkle_cache.clear()
self.merkle_cache[tx_hash] = out
return out
@staticmethod
def deserialize_block(block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, revert=False):
touched_addr = set()
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.bitcoind_height, undo_info)
# add the max
self.storage.save_height(block_hash, block_height)
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
# batch write modified nodes
self.storage.batch_write()
# return length for monitoring
return len(tx_hashes)
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
del self.watched_addresses[addr]
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', ())
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex4(pos)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.bitcoind('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
error = e.args[0]
if error["code"] == -26:
# If we return anything that's not the transaction hash,
# it's considered an error message
message = error["message"]
if "non-mandatory-script-verify-flag" in message:
result = "Your client produced a transaction that is not accepted by the Bitcoin network any more. Please upgrade to Electrum 2.5.1 or newer\n"
else:
result = "The transaction was rejected by network rules.(" + message + ")\n" \
"[" + params[0] + "]"
else:
result = error["message"] # do send an error
print_log("error:", result)
elif method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height, cache_only)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.bitcoind('getrawtransaction', (tx_hash, 0))
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.bitcoind('estimatefee', (num,))
elif method == 'blockchain.relayfee':
result = self.relayfee
else:
raise BaseException("unknown method:%s" % method)
return result
def get_block(self, block_hash):
block = self.bitcoind('getblock', (block_hash,))
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": (txid,),
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
logger.error("bitcoind error (getfullblock)")
self.wait_on_bitcoind()
continue
try:
rawtxdata = []
for ir in r:
assert ir['error'] is None, "Error: make sure you run bitcoind with txindex=1; use -reindex if needed."
rawtxdata.append(ir['result'])
except BaseException as e:
logger.error(str(e))
self.wait_on_bitcoind()
continue
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
self.start_catchup_height = self.storage.height
prev_root_hash = None
n = 0
while not self.shared.stopped():
# are we done yet?
info = self.bitcoind('getinfo')
self.relayfee = info.get('relayfee')
self.bitcoind_height = info.get('blocks')
bitcoind_block_hash = self.bitcoind('getblockhash', (self.bitcoind_height,))
if self.storage.last_hash == bitcoind_block_hash:
self.up_to_date = True
break
self.set_time()
revert = (random.randint(1, 100) == 1) if self.test_reorgs and self.storage.height>100 else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.bitcoind('getblockhash', (self.storage.height + 1,))
except BaseException, e:
revert = True
next_block = self.get_block(next_block_hash if not revert else self.storage.last_hash)
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
n = self.import_block(next_block, next_block_hash, self.storage.height+1)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
else:
# revert current block
block = self.get_block(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
n = self.import_block(block, self.storage.last_hash, self.storage.height, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
# print time
self.print_time(n)
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
t0 = time.time()
mempool_hashes = set(self.bitcoind('getrawmempool'))
touched_addresses = set()
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
out_sum = 0
for x in tx.get('outputs'):
addr = x.get('address', '')
value = x['value']
out_values.append((addr, value))
if not addr:
continue
v = mpa.get(addr, 0)
v += value
mpa[addr] = v
touched_addresses.add(addr)
out_sum += value
self.mempool_fees[tx_hash] = -out_sum
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
# check all inputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
# are we spending unconfirmed inputs?
unconfirmed = set()
input_sum = 0
for x in tx.get('inputs'):
prev_hash = x.get('prevout_hash')
prev_n = x.get('prevout_n')
mpv = self.mempool_values.get(prev_hash)
if mpv:
addr, value = mpv[prev_n]
unconfirmed.add(prev_hash)
else:
txi = (prev_hash + int_to_hex4(prev_n)).decode('hex')
try:
addr = self.storage.get_address(txi)
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
# we can proceed
input_sum += value
if not addr:
continue
v = mpa.get(addr, 0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_unconfirmed[tx_hash] = unconfirmed
self.mempool_addresses[tx_hash] = mpa
self.mempool_fees[tx_hash] += input_sum
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
del self.mempool_addresses[tx_hash]
del self.mempool_values[tx_hash]
del self.mempool_unconfirmed[tx_hash]
del self.mempool_fees[tx_hash]
touched_addresses.update(addresses)
# remove deprecated entries from mempool_hist
new_mempool_hist = {}
for addr in self.mempool_hist.iterkeys():
h = self.mempool_hist[addr]
hh = []
for tx_hash, delta in h:
if tx_hash in self.mempool_addresses:
hh.append((tx_hash, delta))
if hh:
new_mempool_hist[addr] = hh
# add new transactions to mempool_hist
for tx_hash in new_tx.iterkeys():
addresses = self.mempool_addresses[tx_hash]
for addr, delta in addresses.iteritems():
h = new_mempool_hist.get(addr, [])
if (tx_hash, delta) not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
t1 = time.time()
if t1-t0>1:
print_log('mempool_update', t1-t0, len(self.mempool_hashes), len(self.mempool_hist))
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
# print_log("cache: invalidating", address)
del self.history_cache[address]
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
self.catch_up()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': (self.storage.height,),
})
if self.sent_header != self.header:
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': (self.header,),
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': (addr, status),
})
|
SampleService_test.py
|
# These tests cover the integration of the entire system and do not go into details - that's
# what unit tests are for. As such, typically each method will get a single happy path test and
# a single unhappy path test unless otherwise warranted.
# Tests of the auth user lookup and workspace wrapper code are at the bottom of the file.
import datetime
import json
import os
import tempfile
import requests
import time
import uuid
import yaml
import copy
from configparser import ConfigParser
from pytest import fixture, raises
from threading import Thread
from kafka import KafkaConsumer
from kafka.errors import NoBrokersAvailable
from SampleService.SampleServiceImpl import SampleService
from SampleService.core.errors import (
MissingParameterError, NoSuchWorkspaceDataError, IllegalParameterError)
from SampleService.core.notification import KafkaNotifier
from SampleService.core.user_lookup import KBaseUserLookup, AdminPermission
from SampleService.core.user_lookup import InvalidTokenError, InvalidUserError
from SampleService.core.workspace import WS, WorkspaceAccessType, UPA
from SampleService.core.errors import UnauthorizedError, NoSuchUserError
from SampleService.core.user import UserID
from installed_clients.WorkspaceClient import Workspace as Workspace
from core import test_utils
from core.test_utils import (
assert_ms_epoch_close_to_now,
assert_exception_correct,
find_free_port
)
from arango_controller import ArangoController
from mongo_controller import MongoController
from workspace_controller import WorkspaceController
from auth_controller import AuthController
from kafka_controller import KafkaController
# TODO should really test a start up for the case where the metadata validation config is not
# supplied, but that's almost never going to be the case and the code is trivial, so YAGNI
VER = '0.2.5'
_AUTH_DB = 'test_auth_db'
_WS_DB = 'test_ws_db'
_WS_TYPE_DB = 'test_ws_type_db'
TEST_DB_NAME = 'test_sample_service'
TEST_COL_SAMPLE = 'samples'
TEST_COL_VERSION = 'versions'
TEST_COL_VER_EDGE = 'ver_to_sample'
TEST_COL_NODES = 'nodes'
TEST_COL_NODE_EDGE = 'node_edges'
TEST_COL_DATA_LINK = 'data_link'
TEST_COL_WS_OBJ_VER = 'ws_obj_ver_shadow'
TEST_COL_SCHEMA = 'schema'
TEST_USER = 'user1'
TEST_PWD = 'password1'
USER_WS_READ_ADMIN = 'wsreadadmin'
TOKEN_WS_READ_ADMIN = None
USER_WS_FULL_ADMIN = 'wsfulladmin'
TOKEN_WS_FULL_ADMIN = None
WS_READ_ADMIN = 'WS_READ_ADMIN'
WS_FULL_ADMIN = 'WS_FULL_ADMIN'
USER_SERVICE = 'serviceuser'
TOKEN_SERVICE = None
USER1 = 'user1'
TOKEN1 = None
USER2 = 'user2'
TOKEN2 = None
USER3 = 'user3'
TOKEN3 = None
USER4 = 'user4'
TOKEN4 = None
USER5 = 'user5'
TOKEN5 = None
USER_NO_TOKEN1 = 'usernt1'
USER_NO_TOKEN2 = 'usernt2'
USER_NO_TOKEN3 = 'usernt3'
KAFKA_TOPIC = 'sampleservice'
def create_deploy_cfg(auth_port, arango_port, workspace_port, kafka_port):
cfg = ConfigParser()
ss = 'SampleService'
cfg.add_section(ss)
cfg[ss]['auth-service-url'] = (f'http://localhost:{auth_port}/testmode/' +
'api/legacy/KBase/Sessions/Login')
cfg[ss]['auth-service-url-allow-insecure'] = 'true'
cfg[ss]['auth-root-url'] = f'http://localhost:{auth_port}/testmode'
cfg[ss]['auth-token'] = TOKEN_SERVICE
cfg[ss]['auth-read-admin-roles'] = 'readadmin1'
cfg[ss]['auth-full-admin-roles'] = 'fulladmin2'
cfg[ss]['arango-url'] = f'http://localhost:{arango_port}'
cfg[ss]['arango-db'] = TEST_DB_NAME
cfg[ss]['arango-user'] = TEST_USER
cfg[ss]['arango-pwd'] = TEST_PWD
cfg[ss]['workspace-url'] = f'http://localhost:{workspace_port}'
cfg[ss]['workspace-read-admin-token'] = TOKEN_WS_READ_ADMIN
cfg[ss]['kafka-bootstrap-servers'] = f'localhost:{kafka_port}'
cfg[ss]['kafka-topic'] = KAFKA_TOPIC
cfg[ss]['sample-collection'] = TEST_COL_SAMPLE
cfg[ss]['version-collection'] = TEST_COL_VERSION
cfg[ss]['version-edge-collection'] = TEST_COL_VER_EDGE
cfg[ss]['node-collection'] = TEST_COL_NODES
cfg[ss]['node-edge-collection'] = TEST_COL_NODE_EDGE
cfg[ss]['data-link-collection'] = TEST_COL_DATA_LINK
cfg[ss]['workspace-object-version-shadow-collection'] = TEST_COL_WS_OBJ_VER
cfg[ss]['schema-collection'] = TEST_COL_SCHEMA
metacfg = {
'validators': {
'foo': {'validators': [{'module': 'SampleService.core.validator.builtin',
'callable_builder': 'noop'
}],
'key_metadata': {'a': 'b', 'c': 'd'}
},
'stringlentest': {'validators': [{'module': 'SampleService.core.validator.builtin',
'callable_builder': 'string',
'parameters': {'max-len': 5}
},
{'module': 'SampleService.core.validator.builtin',
'callable_builder': 'string',
'parameters': {'keys': 'spcky', 'max-len': 2}
}],
'key_metadata': {'h': 'i', 'j': 'k'}
}
},
'prefix_validators': {
'pre': {'validators': [{'module': 'core.config_test_vals',
'callable_builder': 'prefix_validator_test_builder',
'parameters': {'fail_on_arg': 'fail_plz'}
}],
'key_metadata': {'1': '2'}
}
}
}
metaval = tempfile.mkstemp('.cfg', 'metaval-', dir=test_utils.get_temp_dir(), text=True)
os.close(metaval[0])
with open(metaval[1], 'w') as handle:
yaml.dump(metacfg, handle)
cfg[ss]['metadata-validator-config-url'] = f'file://{metaval[1]}'
deploy = tempfile.mkstemp('.cfg', 'deploy-', dir=test_utils.get_temp_dir(), text=True)
os.close(deploy[0])
with open(deploy[1], 'w') as handle:
cfg.write(handle)
return deploy[1]
@fixture(scope='module')
def mongo():
mongoexe = test_utils.get_mongo_exe()
tempdir = test_utils.get_temp_dir()
wt = test_utils.get_use_wired_tiger()
mongo = MongoController(mongoexe, tempdir, wt)
wttext = ' with WiredTiger' if wt else ''
print(f'running mongo {mongo.db_version}{wttext} on port {mongo.port} in dir {mongo.temp_dir}')
yield mongo
del_temp = test_utils.get_delete_temp_files()
print(f'shutting down mongo, delete_temp_files={del_temp}')
mongo.destroy(del_temp)
@fixture(scope='module')
def auth(mongo):
global TOKEN_SERVICE
global TOKEN_WS_FULL_ADMIN
global TOKEN_WS_READ_ADMIN
global TOKEN1
global TOKEN2
global TOKEN3
global TOKEN4
global TOKEN5
jd = test_utils.get_jars_dir()
tempdir = test_utils.get_temp_dir()
auth = AuthController(jd, f'localhost:{mongo.port}', _AUTH_DB, tempdir)
print(f'Started KBase Auth2 {auth.version} on port {auth.port} ' +
f'in dir {auth.temp_dir} in {auth.startup_count}s')
url = f'http://localhost:{auth.port}'
test_utils.create_auth_role(url, 'fulladmin1', 'fa1')
test_utils.create_auth_role(url, 'fulladmin2', 'fa2')
test_utils.create_auth_role(url, 'readadmin1', 'ra1')
test_utils.create_auth_role(url, 'readadmin2', 'ra2')
test_utils.create_auth_role(url, WS_READ_ADMIN, 'wsr')
test_utils.create_auth_role(url, WS_FULL_ADMIN, 'wsf')
test_utils.create_auth_user(url, USER_SERVICE, 'serv')
TOKEN_SERVICE = test_utils.create_auth_login_token(url, USER_SERVICE)
test_utils.create_auth_user(url, USER_WS_READ_ADMIN, 'wsra')
TOKEN_WS_READ_ADMIN = test_utils.create_auth_login_token(url, USER_WS_READ_ADMIN)
test_utils.set_custom_roles(url, USER_WS_READ_ADMIN, [WS_READ_ADMIN])
test_utils.create_auth_user(url, USER_WS_FULL_ADMIN, 'wsrf')
TOKEN_WS_FULL_ADMIN = test_utils.create_auth_login_token(url, USER_WS_FULL_ADMIN)
test_utils.set_custom_roles(url, USER_WS_FULL_ADMIN, [WS_FULL_ADMIN])
test_utils.create_auth_user(url, USER1, 'display1')
TOKEN1 = test_utils.create_auth_login_token(url, USER1)
test_utils.set_custom_roles(url, USER1, ['fulladmin1'])
test_utils.create_auth_user(url, USER2, 'display2')
TOKEN2 = test_utils.create_auth_login_token(url, USER2)
test_utils.set_custom_roles(url, USER2, ['fulladmin1', 'fulladmin2', 'readadmin2'])
test_utils.create_auth_user(url, USER3, 'display3')
TOKEN3 = test_utils.create_auth_login_token(url, USER3)
test_utils.set_custom_roles(url, USER3, ['readadmin1'])
test_utils.create_auth_user(url, USER4, 'display4')
TOKEN4 = test_utils.create_auth_login_token(url, USER4)
test_utils.create_auth_user(url, USER5, 'display5')
TOKEN5 = test_utils.create_auth_login_token(url, USER5)
test_utils.set_custom_roles(url, USER5, ['fulladmin2'])
test_utils.create_auth_user(url, USER_NO_TOKEN1, 'displaynt1')
test_utils.create_auth_user(url, USER_NO_TOKEN2, 'displaynt2')
test_utils.create_auth_user(url, USER_NO_TOKEN3, 'displaynt3')
yield auth
del_temp = test_utils.get_delete_temp_files()
print(f'shutting down auth, delete_temp_files={del_temp}')
auth.destroy(del_temp)
@fixture(scope='module')
def workspace(auth, mongo):
jd = test_utils.get_jars_dir()
tempdir = test_utils.get_temp_dir()
ws = WorkspaceController(
jd,
mongo,
_WS_DB,
_WS_TYPE_DB,
f'http://localhost:{auth.port}/testmode',
tempdir)
print(f'Started KBase Workspace {ws.version} on port {ws.port} ' +
f'in dir {ws.temp_dir} in {ws.startup_count}s')
wsc = Workspace(f'http://localhost:{ws.port}', token=TOKEN_WS_FULL_ADMIN)
wsc.request_module_ownership('Trivial')
wsc.administer({'command': 'approveModRequest', 'module': 'Trivial'})
wsc.register_typespec({
'spec': '''
module Trivial {
/* @optional dontusethisfieldorifyoudomakesureitsastring */
typedef structure {
string dontusethisfieldorifyoudomakesureitsastring;
} Object;
/* @optional dontusethisfieldorifyoudomakesureitsastring */
typedef structure {
string dontusethisfieldorifyoudomakesureitsastring;
} Object2;
};
''',
'dryrun': 0,
'new_types': ['Object', 'Object2']
})
wsc.release_module('Trivial')
yield ws
del_temp = test_utils.get_delete_temp_files()
print(f'shutting down workspace, delete_temp_files={del_temp}')
ws.destroy(del_temp, False)
@fixture(scope='module')
def arango():
arangoexe = test_utils.get_arango_exe()
arangojs = test_utils.get_arango_js()
tempdir = test_utils.get_temp_dir()
arango = ArangoController(arangoexe, arangojs, tempdir)
create_test_db(arango)
print('running arango on port {} in dir {}'.format(arango.port, arango.temp_dir))
yield arango
del_temp = test_utils.get_delete_temp_files()
print('shutting down arango, delete_temp_files={}'.format(del_temp))
arango.destroy(del_temp)
def create_test_db(arango):
systemdb = arango.client.db(verify=True) # default access to _system db
systemdb.create_database(TEST_DB_NAME, [{'username': TEST_USER, 'password': TEST_PWD}])
return arango.client.db(TEST_DB_NAME, TEST_USER, TEST_PWD)
def clear_db_and_recreate(arango):
arango.clear_database(TEST_DB_NAME, drop_indexes=True)
db = create_test_db(arango)
db.create_collection(TEST_COL_SAMPLE)
db.create_collection(TEST_COL_VERSION)
db.create_collection(TEST_COL_VER_EDGE, edge=True)
db.create_collection(TEST_COL_NODES)
db.create_collection(TEST_COL_NODE_EDGE, edge=True)
db.create_collection(TEST_COL_DATA_LINK, edge=True)
db.create_collection(TEST_COL_WS_OBJ_VER)
db.create_collection(TEST_COL_SCHEMA)
return db
@fixture(scope='module')
def kafka():
kafka_bin_dir = test_utils.get_kafka_bin_dir()
tempdir = test_utils.get_temp_dir()
kc = KafkaController(kafka_bin_dir, tempdir)
print('running kafka on port {} in dir {}'.format(kc.port, kc.temp_dir))
yield kc
del_temp = test_utils.get_delete_temp_files()
print('shutting down kafka, delete_temp_files={}'.format(del_temp))
kc.destroy(del_temp, dump_logs_to_stdout=False)
@fixture(scope='module')
def service(auth, arango, workspace, kafka):
portint = test_utils.find_free_port()
clear_db_and_recreate(arango)
# this is completely stupid. The state is calculated on import so there's no way to
# test the state creation normally.
cfgpath = create_deploy_cfg(auth.port, arango.port, workspace.port, kafka.port)
os.environ['KB_DEPLOYMENT_CONFIG'] = cfgpath
from SampleService import SampleServiceServer
Thread(target=SampleServiceServer.start_server, kwargs={'port': portint}, daemon=True).start()
time.sleep(0.05)
port = str(portint)
print('running sample service at localhost:' + port)
yield port
# shutdown the server
# SampleServiceServer.stop_server() <-- this causes an error. the start & stop methods are
# bugged. _proc is only set if newprocess=True
@fixture
def sample_port(service, arango, workspace, kafka):
clear_db_and_recreate(arango)
workspace.clear_db()
# _clear_kafka_messages(kafka) # too expensive to run after every test
# kafka.clear_all_topics() # too expensive to run after every test
yield service
def test_init_fail():
# init success is tested via starting the server
init_fail(None, ValueError('config is empty, cannot start service'))
cfg = {}
init_fail(cfg, ValueError('config is empty, cannot start service'))
cfg['arango-url'] = None
init_fail(cfg, MissingParameterError('config param arango-url'))
cfg['arango-url'] = 'crap'
init_fail(cfg, MissingParameterError('config param arango-db'))
cfg['arango-db'] = 'crap'
init_fail(cfg, MissingParameterError('config param arango-user'))
cfg['arango-user'] = 'crap'
init_fail(cfg, MissingParameterError('config param arango-pwd'))
cfg['arango-pwd'] = 'crap'
init_fail(cfg, MissingParameterError('config param sample-collection'))
cfg['sample-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param version-collection'))
cfg['version-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param version-edge-collection'))
cfg['version-edge-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param node-collection'))
cfg['node-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param node-edge-collection'))
cfg['node-edge-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param data-link-collection'))
cfg['data-link-collection'] = 'crap'
init_fail(cfg, MissingParameterError(
'config param workspace-object-version-shadow-collection'))
cfg['workspace-object-version-shadow-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param schema-collection'))
cfg['schema-collection'] = 'crap'
init_fail(cfg, MissingParameterError('config param auth-root-url'))
cfg['auth-root-url'] = 'crap'
init_fail(cfg, MissingParameterError('config param auth-token'))
cfg['auth-token'] = 'crap'
init_fail(cfg, MissingParameterError('config param workspace-url'))
cfg['workspace-url'] = 'crap'
init_fail(cfg, MissingParameterError('config param workspace-read-admin-token'))
cfg['workspace-read-admin-token'] = 'crap'
cfg['kafka-bootstrap-servers'] = 'crap'
init_fail(cfg, MissingParameterError('config param kafka-topic'))
cfg['kafka-topic'] = 'crap'
# get_validators is tested elsewhere, just make sure it'll error out
cfg['metadata-validator-config-url'] = 'https://kbase.us/services'
init_fail(cfg, ValueError(
'Failed to open validator configuration file at https://kbase.us/services: Not Found'))
def init_fail(config, expected):
with raises(Exception) as got:
SampleService(config)
assert_exception_correct(got.value, expected)
def test_status(sample_port):
res = requests.post('http://localhost:' + sample_port, json={
'method': 'SampleService.status',
'params': [],
'version': 1.1,
'id': 1 # don't do this. This is bad practice
})
assert res.status_code == 200
s = res.json()
# print(s)
assert len(s['result']) == 1 # results are always in a list
assert_ms_epoch_close_to_now(s['result'][0]['servertime'])
assert s['result'][0]['state'] == 'OK'
assert s['result'][0]['message'] == ""
assert s['result'][0]['version'] == VER
# ignore git url and hash, can change
def get_authorized_headers(token):
headers = {'accept': 'application/json'}
if token is not None:
headers['authorization'] = token
return headers
def _check_kafka_messages(kafka, expected_msgs, topic=KAFKA_TOPIC, print_res=False):
kc = KafkaConsumer(
topic,
bootstrap_servers=f'localhost:{kafka.port}',
auto_offset_reset='earliest',
group_id='foo') # quiets warnings
try:
res = kc.poll(timeout_ms=2000) # 1s not enough? Seems like a lot
if print_res:
print(res)
assert len(res) == 1
assert next(iter(res.keys())).topic == topic
records = next(iter(res.values()))
assert len(records) == len(expected_msgs)
for i, r in enumerate(records):
assert json.loads(r.value) == expected_msgs[i]
# Need to commit here? doesn't seem like it
finally:
kc.close()
def _clear_kafka_messages(kafka, topic=KAFKA_TOPIC):
kc = KafkaConsumer(
topic,
bootstrap_servers=f'localhost:{kafka.port}',
auto_offset_reset='earliest',
group_id='foo') # quiets warnings
try:
kc.poll(timeout_ms=2000) # 1s not enough? Seems like a lot
# Need to commit here? doesn't seem like it
finally:
kc.close()
def test_create_and_get_sample_with_version(sample_port, kafka):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
# version 1
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'},
'stringlentest': {'foooo': 'barrr',
'spcky': 'fa'},
'prefixed': {'safe': 'args'}
},
'meta_user': {'a': {'b': 'c'}},
'source_meta': [
{'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}},
{'key': 'stringlentest',
'skey': 'ya fer sure',
'svalue': {'just': 'some', 'data': 42}}
]
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
# version 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '68',
'params': [{
'sample': {'name': 'mysample2',
'id': id_,
'node_tree': [{'id': 'root2',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'bat'}},
'meta_user': {'a': {'b': 'd'}}
}
]
},
'prior_version': 1
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 2
# get version 1
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
assert_ms_epoch_close_to_now(j['save_date'])
del j['save_date']
assert j == {
'id': id_,
'version': 1,
'user': USER1,
'name': 'mysample',
'node_tree': [{'id': 'root',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'},
'stringlentest': {'foooo': 'barrr',
'spcky': 'fa'},
'prefixed': {'safe': 'args'}
},
'meta_user': {'a': {'b': 'c'}},
'source_meta': [
{'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}},
{'key': 'stringlentest',
'skey': 'ya fer sure',
'svalue': {'just': 'some', 'data': 42}}
],
}]
}
# get version 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '43',
'params': [{'id': id_}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
assert_ms_epoch_close_to_now(j['save_date'])
del j['save_date']
assert j == {
'id': id_,
'version': 2,
'user': USER1,
'name': 'mysample2',
'node_tree': [{'id': 'root2',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'bat'}},
'meta_user': {'a': {'b': 'd'}},
'source_meta': [],
}]
}
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1},
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 2}
])
def test_create_and_get_samples(sample_port, kafka):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
# first sample
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'},
'stringlentest': {'foooo': 'barrr',
'spcky': 'fa'},
'prefixed': {'safe': 'args'}
},
'meta_user': {'a': {'b': 'c'}},
'source_meta': [
{'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}},
{'key': 'stringlentest',
'skey': 'ya fer sure',
'svalue': {'just': 'some', 'data': 42}}
]
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id1_ = ret.json()['result'][0]['id']
# second sample
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '68',
'params': [{
'sample': {'name': 'mysample2',
'node_tree': [{'id': 'root2',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'bat'}},
'meta_user': {'a': {'b': 'd'}}
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id2_ = ret.json()['result'][0]['id']
# get both samples
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_samples',
'version': '1.1',
'id': '42',
'params': [{'samples': [{'id': id1_, 'version': 1}, {'id': id2_, 'version': 1}]}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
for s in j:
assert_ms_epoch_close_to_now(s['save_date'])
del s['save_date']
print('-'*80)
import json
print(json.dumps(j))
print('-'*80)
assert j == [{
'id': id1_,
'version': 1,
'user': USER1,
'name': 'mysample',
'node_tree': [{
'id': 'root',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'},
'stringlentest': {'foooo': 'barrr',
'spcky': 'fa'},
'prefixed': {'safe': 'args'}
},
'meta_user': {'a': {'b': 'c'}},
'source_meta': [
{'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}},
{'key': 'stringlentest',
'skey': 'ya fer sure',
'svalue': {'just': 'some', 'data': 42}}
],
}]
}, {
'id': id2_,
'version': 1,
'user': USER1,
'name': 'mysample2',
'node_tree': [{'id': 'root2',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'bat'}},
'meta_user': {'a': {'b': 'd'}},
'source_meta': []
}]
}]
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id1_, 'sample_ver': 1},
{'event_type': 'NEW_SAMPLE', 'sample_id': id2_, 'sample_ver': 1}
])
def test_create_sample_as_admin(sample_port):
_create_sample_as_admin(sample_port, None, TOKEN2, USER2)
def test_create_sample_as_admin_impersonate_user(sample_port):
_create_sample_as_admin(sample_port, ' ' + USER4 + ' ', TOKEN4, USER4)
def _create_sample_as_admin(sample_port, as_user, get_token, expected_user):
url = f'http://localhost:{sample_port}'
# verison 1
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'}
},
'meta_user': {'a': {'b': 'c'}}
}
]
},
'as_admin': 1,
'as_user': as_user
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
# get
ret = requests.post(url, headers=get_authorized_headers(get_token), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
assert_ms_epoch_close_to_now(j['save_date'])
del j['save_date']
assert j == {
'id': id_,
'version': 1,
'user': expected_user,
'name': 'mysample',
'node_tree': [{'id': 'root',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'}
},
'meta_user': {'a': {'b': 'c'}},
'source_meta': [],
}]
}
def test_create_sample_version_as_admin(sample_port):
_create_sample_version_as_admin(sample_port, None, USER2)
def test_create_sample_version_as_admin_impersonate_user(sample_port):
_create_sample_version_as_admin(sample_port, USER3, USER3)
def _create_sample_version_as_admin(sample_port, as_user, expected_user):
url = f'http://localhost:{sample_port}'
# verison 1
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'},
'stringlentest': {'foooo': 'barrr',
'spcky': 'fa'},
'prefixed': {'safe': 'args'}
},
'meta_user': {'a': {'b': 'c'}}
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
# version 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '68',
'params': [{
'sample': {'name': 'mysample2',
'id': id_,
'node_tree': [{'id': 'root2',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'bat'}},
'meta_user': {'a': {'b': 'd'}}
}
]
},
'as_admin': 1,
'as_user': as_user
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 2
# get version 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '43',
'params': [{'id': id_}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
assert_ms_epoch_close_to_now(j['save_date'])
del j['save_date']
assert j == {
'id': id_,
'version': 2,
'user': expected_user,
'name': 'mysample2',
'node_tree': [{'id': 'root2',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'bat'}},
'meta_user': {'a': {'b': 'd'}},
'source_meta': [],
}]
}
def test_get_samples_fail_no_samples(sample_port):
_test_get_samples_fail(sample_port, None,
'Missing or incorrect "samples" field - must provide a list of samples to retrieve.')
_test_get_samples_fail(sample_port, "im a random sample id string!",
'Missing or incorrect "samples" field - must provide a list of samples to retrieve.')
_test_get_samples_fail(sample_port, [],
'Cannot provide empty list of samples - must provide at least one sample to retrieve.')
def _test_get_samples_fail(sample_port, samples, message):
params = {'samples': samples}
_request_fail(sample_port, 'get_samples', TOKEN1, params, message)
def test_get_sample_public_read(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_replace_acls(url, id_, TOKEN1, {'public_read': 1})
for token in [TOKEN4, None]: # unauthed user and anonymous user
s = _get_sample(url, token, id_)
assert_ms_epoch_close_to_now(s['save_date'])
del s['save_date']
assert s == {
'id': id_,
'version': 1,
'user': 'user1',
'name': 'mysample',
'node_tree': [{'id': 'root',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
{'id': 'foo',
'parent': 'root',
'type': 'TechReplicate',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
}
]
}
def _get_sample(url, token, id_):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '43',
'params': [{'id': str(id_)}]
})
# print(ret.text)
assert ret.ok is True
return ret.json()['result'][0]
def test_get_sample_as_admin(sample_port):
url = f'http://localhost:{sample_port}'
# verison 1
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'}
},
'meta_user': {'a': {'b': 'c'}}
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
# token3 has read admin but not full admin
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'version': 1, 'as_admin': 1}]
})
print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
assert_ms_epoch_close_to_now(j['save_date'])
del j['save_date']
assert j == {
'id': id_,
'version': 1,
'user': USER1,
'name': 'mysample',
'node_tree': [{'id': 'root',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'},
},
'meta_user': {'a': {'b': 'c'}},
'source_meta': [],
}]
}
def test_create_sample_fail_no_nodes(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': None
}
}]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30001 Illegal input parameter: sample node tree ' +
'must be present and a list')
def test_create_sample_fail_bad_metadata(sample_port):
_create_sample_fail_bad_metadata(
sample_port, {'stringlentest': {}},
'Sample service error code 30001 Illegal input parameter: Error for node at index 0: ' +
'Controlled metadata value associated with metadata key stringlentest is null or empty')
_create_sample_fail_bad_metadata(
sample_port, {'stringlentest': {'foooo': 'barrrr'}},
'Sample service error code 30010 Metadata validation failed: Node at index 0: ' +
'Key stringlentest: Metadata value at key foooo is longer than max length of 5')
_create_sample_fail_bad_metadata(
sample_port, {'stringlentest': {'foooo': 'barrr', 'spcky': 'baz'}},
'Sample service error code 30010 Metadata validation failed: Node at index 0: Key ' +
'stringlentest: Metadata value at key spcky is longer than max length of 2')
_create_sample_fail_bad_metadata(
sample_port, {'prefix': {'fail_plz': 'yes, or principal sayof'}},
"Sample service error code 30010 Metadata validation failed: Node at index 0: " +
"Prefix validator pre, key prefix: pre, prefix, {'fail_plz': 'yes, or principal sayof'}")
_create_sample_fail_bad_metadata(
sample_port, {'prefix': {'foo': 'bar'}},
'Sample service error code 30001 Illegal input parameter: Error for node at ' +
'index 0: Duplicate source metadata key: prefix',
sourcemeta=[
{'key': 'prefix', 'skey': 'a', 'svalue': {'a': 'b'}},
{'key': 'prefix', 'skey': 'b', 'svalue': {'c': 'd'}}
])
def _create_sample_fail_bad_metadata(sample_port, meta, expected, sourcemeta=None):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_controlled': meta,
'source_meta': sourcemeta
}
]
}
}]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_create_sample_fail_permissions(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
_replace_acls(url, id_, TOKEN1, {'read': [USER2]})
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'id': id_,
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
f'Sample service error code 20000 Unauthorized: User user2 cannot write to sample {id_}')
def test_create_sample_fail_admin_bad_user_name(sample_port):
_create_sample_fail_admin_as_user(
sample_port, 'bad\tuser',
'Sample service error code 30001 Illegal input parameter: userid contains ' +
'control characters')
def test_create_sample_fail_admin_no_such_user(sample_port):
_create_sample_fail_admin_as_user(
sample_port, USER4 + 'impostor',
'Sample service error code 50000 No such user: user4impostor')
def _create_sample_fail_admin_as_user(sample_port, user, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
},
'as_admin': 'true',
'as_user': user
}]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_create_sample_fail_admin_permissions(sample_port):
url = f'http://localhost:{sample_port}'
# token 3 only has read permissions
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
},
'as_admin': 1,
'as_user': USER4
}]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 20000 Unauthorized: User user3 does not have the ' +
'necessary administration privileges to run method create_sample')
def test_get_sample_fail_bad_id(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_[:-1]}]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30001 Illegal input parameter: ' +
f'id {id_[:-1]} must be a UUID string')
def test_get_sample_fail_permissions(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
_get_sample_fail(
url, TOKEN2, {'id': id_},
f'Sample service error code 20000 Unauthorized: User user2 cannot read sample {id_}')
_get_sample_fail(
url, None, {'id': id_},
f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}')
_get_sample_fail(
url, None, {'id': id_, 'as_admin': 1},
'Sample service error code 20000 Unauthorized: Anonymous users ' +
'may not act as service administrators.')
def test_get_sample_fail_admin_permissions(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
_get_sample_fail(
url, TOKEN4, {'id': id_, 'as_admin': 1},
'Sample service error code 20000 Unauthorized: User user4 does not have the ' +
'necessary administration privileges to run method get_sample')
def _get_sample_fail(url, token, params, expected):
# user 4 has no admin permissions
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [params]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_and_replace_acls(sample_port, kafka):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
_assert_acl_contents(url, id_, TOKEN1, {
'owner': USER1,
'admin': [],
'write': [],
'read': [],
'public_read': 0
})
_replace_acls(url, id_, TOKEN1, {
'admin': [USER2],
'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3],
'read': [USER_NO_TOKEN3, USER4]
})
# test that people in the acls can read
for token in [TOKEN2, TOKEN3, TOKEN4]:
_assert_acl_contents(url, id_, token, {
'owner': USER1,
'admin': [USER2],
'write': [USER3, USER_NO_TOKEN1, USER_NO_TOKEN2],
'read': [USER4, USER_NO_TOKEN3],
'public_read': 0
})
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
del j['save_date']
assert j == {
'id': id_,
'version': 1,
'user': USER1,
'name': 'mysample',
'node_tree': [{
'id': 'root',
'type': 'BioReplicate',
'parent': None,
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
}]
}
# test admins and writers can write
for token, version in ((TOKEN2, 2), (TOKEN3, 3)):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '68',
'params': [{
'sample': {'name': f'mysample{version}',
'id': id_,
'node_tree': [{'id': f'root{version}',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == version
# check one of the writes
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'version': 2}]
})
# print(ret.text)
assert ret.ok is True
j = ret.json()['result'][0]
assert_ms_epoch_close_to_now(j['save_date'])
del j['save_date']
assert j == {
'id': id_,
'version': 2,
'user': USER2,
'name': 'mysample2',
'node_tree': [{'id': 'root2',
'parent': None,
'type': 'BioReplicate',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
}]
}
# test that an admin can replace ACLs
_replace_acls(url, id_, TOKEN2, {
'admin': [USER_NO_TOKEN2],
'write': [],
'read': [USER2],
'public_read': 1
})
_assert_acl_contents(url, id_, TOKEN1, {
'owner': USER1,
'admin': [USER_NO_TOKEN2],
'write': [],
'read': [USER2],
'public_read': 1
})
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1},
{'event_type': 'ACL_CHANGE', 'sample_id': id_},
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 2},
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 3},
{'event_type': 'ACL_CHANGE', 'sample_id': id_},
])
def test_get_acls_public_read(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_replace_acls(url, id_, TOKEN1, {'public_read': 1})
for token in [TOKEN4, None]: # user with no explicit perms and anon user
_assert_acl_contents(url, id_, token, {
'owner': USER1,
'admin': [],
'write': [],
'read': [],
'public_read': 1
})
def test_get_acls_as_admin(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
# user 3 has admin read rights only
_assert_acl_contents(url, id_, TOKEN3, {
'owner': USER1,
'admin': [],
'write': [],
'read': [],
'public_read': 0
},
as_admin=1)
def test_replace_acls_as_admin(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_assert_acl_contents(url, id_, TOKEN1, {
'owner': USER1,
'admin': [],
'write': [],
'read': [],
'public_read': 0
})
_replace_acls(url, id_, TOKEN2, {
'admin': [USER2],
'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3],
'read': [USER_NO_TOKEN3, USER4],
'public_read': 1
},
as_admin=1)
_assert_acl_contents(url, id_, TOKEN1, {
'owner': USER1,
'admin': [USER2],
'write': [USER3, USER_NO_TOKEN1, USER_NO_TOKEN2],
'read': [USER4, USER_NO_TOKEN3],
'public_read': 1
})
def _replace_acls(url, id_, token, acls, as_admin=0, print_resp=False):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '67',
'params': [{'id': id_, 'acls': acls, 'as_admin': as_admin}]
})
if print_resp:
print(ret.text)
assert ret.ok is True
assert ret.json() == {'version': '1.1', 'id': '67', 'result': None}
def _assert_acl_contents(url, id_, token, expected, as_admin=0, print_resp=False):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_sample_acls',
'version': '1.1',
'id': '47',
'params': [{'id': id_, 'as_admin': as_admin}]
})
if print_resp:
print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0] == expected
def test_get_acls_fail_no_id(sample_port):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
id_ = ret.json()['result'][0]['id']
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'ids': id_}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30000 Missing input parameter: id')
def test_get_acls_fail_permissions(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_get_acls_fail_permissions(
url, TOKEN2, {'id': id_},
f'Sample service error code 20000 Unauthorized: User user2 cannot read sample {id_}')
_get_acls_fail_permissions(
url, None, {'id': id_},
f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}')
_get_acls_fail_permissions(
url, None, {'id': id_, 'as_admin': 1},
'Sample service error code 20000 Unauthorized: Anonymous users ' +
'may not act as service administrators.')
def _get_acls_fail_permissions(url, token, params, expected):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_sample_acls',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_acls_fail_admin_permissions(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
# user 4 has no admin perms
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'as_admin': 1}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 20000 Unauthorized: User user4 does not have the ' +
'necessary administration privileges to run method get_sample_acls')
def _create_generic_sample(url, token):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{
'sample': {'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
},
{'id': 'foo',
'parent': 'root',
'type': 'TechReplicate',
}
]
}
}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == 1
return ret.json()['result'][0]['id']
def test_replace_acls_fail_no_id(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'ids': id_}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30000 Missing input parameter: id')
def test_replace_acls_fail_bad_acls(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'acls': ['foo']}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30001 Illegal input parameter: ' +
'ACLs must be supplied in the acls key and must be a mapping')
def test_replace_acls_fail_permissions(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_replace_acls(url, id_, TOKEN1, {
'admin': [USER2],
'write': [USER3],
'read': [USER4]
})
for user, token in ((USER3, TOKEN3), (USER4, TOKEN4)):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'acls': {}}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
f'Sample service error code 20000 Unauthorized: User {user} cannot ' +
f'administrate sample {id_}')
def test_replace_acls_fail_admin_permissions(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
for user, token in ((USER1, TOKEN1), (USER3, TOKEN3), (USER4, TOKEN4)):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'acls': {}, 'as_admin': 1}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
f'Sample service error code 20000 Unauthorized: User {user} does not have the ' +
'necessary administration privileges to run method replace_sample_acls')
def test_replace_acls_fail_bad_user(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_,
'acls': {
'admin': [USER2, 'a'],
'write': [USER3],
'read': [USER4, 'philbin_j_montgomery_iii']
}
}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 50000 No such user: a, philbin_j_montgomery_iii')
def test_replace_acls_fail_user_in_2_acls(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'acls': {'write': [USER2, USER3], 'read': [USER2]}}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30001 Illegal input parameter: ' +
f'User {USER2} appears in two ACLs')
def test_replace_acls_fail_owner_in_another_acl(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.replace_sample_acls',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'acls': {'write': [USER1]}}]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == (
'Sample service error code 30001 Illegal input parameter: ' +
'The owner cannot be in any other ACL')
def test_update_acls(sample_port, kafka):
_update_acls_tst(sample_port, kafka, TOKEN1, False) # owner
_update_acls_tst(sample_port, kafka, TOKEN2, False) # admin
_update_acls_tst(sample_port, kafka, TOKEN5, True) # as_admin = True
def _update_acls_tst(sample_port, kafka, token, as_admin):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_replace_acls(url, id_, TOKEN1, {
'admin': [USER2],
'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3],
'read': [USER_NO_TOKEN3, USER4],
'public_read': 0
})
_update_acls(url, token, {
'id': str(id_),
'admin': [USER4],
'write': [USER2],
'read': [USER_NO_TOKEN2],
'remove': [USER3],
'public_read': 390,
'as_admin': 1 if as_admin else 0,
})
_assert_acl_contents(url, id_, TOKEN1, {
'owner': USER1,
'admin': [USER4],
'write': [USER2, USER_NO_TOKEN1],
'read': [USER_NO_TOKEN2, USER_NO_TOKEN3],
'public_read': 1
})
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1},
{'event_type': 'ACL_CHANGE', 'sample_id': id_},
{'event_type': 'ACL_CHANGE', 'sample_id': id_},
])
def test_update_acls_with_at_least(sample_port, kafka):
_update_acls_tst_with_at_least(sample_port, kafka, TOKEN1, False) # owner
_update_acls_tst_with_at_least(sample_port, kafka, TOKEN2, False) # admin
_update_acls_tst_with_at_least(sample_port, kafka, TOKEN5, True) # as_admin = True
def _update_acls_tst_with_at_least(sample_port, kafka, token, as_admin):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_replace_acls(url, id_, TOKEN1, {
'admin': [USER2],
'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3],
'read': [USER_NO_TOKEN3, USER4],
'public_read': 0
})
_update_acls(url, token, {
'id': str(id_),
'admin': [USER4],
'write': [USER2, USER_NO_TOKEN3],
'read': [USER_NO_TOKEN2, USER5],
'remove': [USER3],
'public_read': 390,
'as_admin': 1 if as_admin else 0,
'at_least': 1,
})
_assert_acl_contents(url, id_, TOKEN1, {
'owner': USER1,
'admin': [USER2, USER4],
'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER_NO_TOKEN3],
'read': [USER5],
'public_read': 1
}, print_resp=True)
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1},
{'event_type': 'ACL_CHANGE', 'sample_id': id_},
{'event_type': 'ACL_CHANGE', 'sample_id': id_},
])
def test_update_acls_fail_no_id(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url, TOKEN1, {'ids': id_},
'Sample service error code 30000 Missing input parameter: id')
def test_update_acls_fail_bad_pub(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url, TOKEN1, {'id': id_, 'public_read': 'thingy'},
'Sample service error code 30001 Illegal input parameter: ' +
'public_read must be an integer if present')
def test_update_acls_fail_permissions(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_replace_acls(url, id_, TOKEN1, {
'admin': [USER2],
'write': [USER3],
'read': [USER4]
})
for user, token in ((USER3, TOKEN3), (USER4, TOKEN4)):
_update_acls_fail(url, token, {'id': id_}, 'Sample service error code 20000 ' +
f'Unauthorized: User {user} cannot administrate sample {id_}')
def test_update_acls_fail_admin_permissions(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
for user, token in ((USER1, TOKEN1), (USER3, TOKEN3), (USER4, TOKEN4)):
_update_acls_fail(
url, token, {'id': id_, 'as_admin': 1},
f'Sample service error code 20000 Unauthorized: User {user} does not have the ' +
'necessary administration privileges to run method update_sample_acls')
def test_update_acls_fail_bad_user(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url,
TOKEN1,
{'id': id_,
'admin': [USER2, 'a'],
'write': [USER3],
'read': [USER4, 'philbin_j_montgomery_iii'],
'remove': ['someguy']
},
'Sample service error code 50000 No such user: a, philbin_j_montgomery_iii, someguy')
def test_update_acls_fail_user_2_acls(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url,
TOKEN1,
{'id': id_,
'admin': [USER2],
'write': [USER3],
'read': [USER4, USER2],
},
'Sample service error code 30001 Illegal input parameter: User user2 appears in two ACLs')
def test_update_acls_fail_user_in_acl_and_remove(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url,
TOKEN1,
{'id': id_,
'admin': [USER2],
'write': [USER3],
'read': [USER4],
'remove': [USER2]
},
'Sample service error code 30001 Illegal input parameter: Users in the remove list ' +
'cannot be in any other ACL')
def test_update_acls_fail_owner_in_another_acl(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url, TOKEN1, {'id': id_, 'write': [USER1]},
'Sample service error code 20000 Unauthorized: ' +
'ACLs for the sample owner user1 may not be modified by a delta update.')
def test_update_acls_fail_owner_in_remove_acl(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN1)
_update_acls_fail(
url, TOKEN1, {'id': id_, 'remove': [USER1]},
'Sample service error code 20000 Unauthorized: ' +
'ACLs for the sample owner user1 may not be modified by a delta update.')
def _update_acls_fail(url, token, params, expected):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.update_sample_acls',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def _update_acls(url, token, params, print_resp=False):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.update_sample_acls',
'version': '1.1',
'id': '67',
'params': [params]
})
if print_resp:
print(ret.text)
assert ret.ok is True
assert ret.json() == {'version': '1.1', 'id': '67', 'result': None}
def _update_samples_acls(url, token, params, print_resp=False):
resp = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.update_samples_acls',
'version': '1.1',
'id': '1729',
'params': [params]
})
if print_resp:
print(resp.text)
return resp
def test_update_acls_many(sample_port):
url = f'http://localhost:{sample_port}'
# create samples
n_samples = 2 # 1000
ids = _create_samples(url, TOKEN1, n_samples, 1)
for id_ in ids:
_update_acls(
url,
TOKEN1,
{
'id': str(id_),
'admin': [],
'write': [],
'read': [USER2],
'remove': [],
'public_read': 1,
'as_admin': 0,
},
print_resp=True,
)
def test_update_acls_many_bulk(sample_port):
url = f'http://localhost:{sample_port}'
# create samples
n_samples = 2 # 1000
ids = _create_samples(url, TOKEN1, n_samples, 1)
resp = _update_samples_acls(
url,
TOKEN1,
{
'ids': ids,
'admin': [],
'write': [],
'read': [USER2],
'remove': [],
'public_read': 1,
'as_admin': 0,
},
print_resp=True,
)
assert resp.ok
assert resp.json()['result'] is None
def test_update_acls_many_bulk_fail(sample_port):
url = f'http://localhost:{sample_port}'
sample_bad_id = str(uuid.UUID('0'*32))
resp = _update_samples_acls(
url,
TOKEN1,
{
'ids': [sample_bad_id],
'admin': [],
'write': [],
'read': [USER2],
'remove': [],
'public_read': 1,
'as_admin': 0,
},
print_resp=True,
)
assert resp.status_code == 500
msg = f"Sample service error code 50010 No such sample: {sample_bad_id}"
assert resp.json()['error']['message'] == msg
def test_get_metadata_key_static_metadata(sample_port):
_get_metadata_key_static_metadata(
sample_port, {'keys': ['foo']}, {'foo': {'a': 'b', 'c': 'd'}})
_get_metadata_key_static_metadata(
sample_port,
{'keys': ['foo', 'stringlentest'], 'prefix': 0},
{'foo': {'a': 'b', 'c': 'd'}, 'stringlentest': {'h': 'i', 'j': 'k'}})
_get_metadata_key_static_metadata(
sample_port, {'keys': ['pre'], 'prefix': 1}, {'pre': {'1': '2'}})
_get_metadata_key_static_metadata(
sample_port, {'keys': ['premature'], 'prefix': 2}, {'pre': {'1': '2'}})
def _get_metadata_key_static_metadata(sample_port, params, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, json={
'method': 'SampleService.get_metadata_key_static_metadata',
'version': '1.1',
'id': '67',
'params': [params]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0] == {'static_metadata': expected}
def test_get_metadata_key_static_metadata_fail_bad_args(sample_port):
_get_metadata_key_static_metadata_fail(
sample_port,
{},
'Sample service error code 30001 Illegal input parameter: keys must be a list')
_get_metadata_key_static_metadata_fail(
sample_port,
{'keys': ['foo', 'stringlentestage'], 'prefix': 0},
'Sample service error code 30001 Illegal input parameter: No such metadata key: ' +
'stringlentestage')
_get_metadata_key_static_metadata_fail(
sample_port,
{'keys': ['premature'], 'prefix': 1},
'Sample service error code 30001 Illegal input parameter: No such prefix metadata key: ' +
'premature')
_get_metadata_key_static_metadata_fail(
sample_port,
{'keys': ['somekey'], 'prefix': 2},
'Sample service error code 30001 Illegal input parameter: No prefix metadata keys ' +
'matching key somekey')
def _get_metadata_key_static_metadata_fail(sample_port, params, error):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, json={
'method': 'SampleService.get_metadata_key_static_metadata',
'version': '1.1',
'id': '67',
'params': [params]
})
# print(ret.text)
assert ret.status_code == 500
assert ret.json()['error']['message'] == error
def _create_sample(url, token, sample, expected_version):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [{'sample': sample}]
})
# print(ret.text)
assert ret.ok is True
assert ret.json()['result'][0]['version'] == expected_version
return ret.json()['result'][0]['id']
def _sample_factory(name):
return {
"sample": {
"name": name,
"node_tree": [{
"id": "root",
"type": "BioReplicate",
},
{
"id": "foo",
"parent": "root",
"type": "TechReplicate",
}
]
}
}
def _create_samples(url, token, n, expected_version, sample_factory=None):
if sample_factory is None:
sample_factory = _sample_factory
ids = []
for i in range(n):
sample = sample_factory(f"sample-{i}")
resp = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.create_sample',
'version': '1.1',
'id': '67',
'params': [sample]
})
assert resp.ok
data = resp.json()["result"][0]
assert data["version"] == expected_version
ids.append(data["id"])
return ids
def _create_link(url, token, expected_user, params, print_resp=False):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.create_data_link',
'version': '1.1',
'id': '42',
'params': [params]
})
if print_resp:
print(ret.text)
assert ret.ok is True
link = ret.json()['result'][0]['new_link']
id_ = link['linkid']
uuid.UUID(id_) # check the ID is a valid UUID
del link['linkid']
created = link['created']
assert_ms_epoch_close_to_now(created)
del link['created']
assert link == {
'id': params['id'],
'version': params['version'],
'node': params['node'],
'upa': params['upa'],
'dataid': params.get('dataid'),
'createdby': expected_user,
'expiredby': None,
'expired': None
}
return id_
def _create_sample_and_links_for_propagate_links(url, token, user):
# create samples
sid = _create_sample(
url,
token,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# ver 2
_create_sample(
url,
token,
{'id': sid,
'name': 'mysample2',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
2
)
# create links
lid1 = _create_link(
url, token, user,
{'id': sid, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'column1'})
lid2 = _create_link(
url, token, user,
{'id': sid, 'version': 1, 'node': 'root', 'upa': '1/2/1', 'dataid': 'column2'})
return sid, lid1, lid2
def _check_data_links(links, expected_links):
assert len(links) == len(expected_links)
for link in links:
assert_ms_epoch_close_to_now(link['created'])
del link['created']
for link in expected_links:
assert link in links
def _check_sample_data_links(url, sample_id, version, expected_links, token):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': sample_id, 'version': version}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
links = ret.json()['result'][0]['links']
_check_data_links(links, expected_links)
def test_create_and_propagate_data_links(sample_port, workspace, kafka):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
{'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
sid, lid1, lid2 = _create_sample_and_links_for_propagate_links(url, TOKEN3, USER3)
# check initial links for both version
expected_links = [
{
'linkid': lid1,
'id': sid,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'column1',
'createdby': USER3,
'expiredby': None,
'expired': None
},
{
'linkid': lid2,
'id': sid,
'version': 1,
'node': 'root',
'upa': '1/2/1',
'dataid': 'column2',
'createdby': USER3,
'expiredby': None,
'expired': None
}
]
_check_sample_data_links(url, sid, 1, expected_links, TOKEN3)
_check_sample_data_links(url, sid, 2, [], TOKEN3)
# propagate data links from sample version 1 to version 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.propagate_data_links',
'version': '1.1',
'id': '38',
'params': [{'id': sid, 'version': 2, 'previous_version': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 1
links = ret.json()['result'][0]['links']
new_link_ids = [i['linkid'] for i in links]
expected_new_links = copy.deepcopy(expected_links)
# propagated links should have new link id, dataid and version
for idx, expected_link in enumerate(expected_new_links):
expected_link['linkid'] = new_link_ids[idx]
expected_link['dataid'] = expected_link['dataid'] + '_2'
expected_link['version'] = 2
_check_data_links(links, expected_new_links)
# check links again for sample version 1 and 2
_check_sample_data_links(url, sid, 1, expected_links, TOKEN3)
_check_sample_data_links(url, sid, 2, expected_new_links, TOKEN3)
def test_create_and_propagate_data_links_type_specific(sample_port, workspace, kafka):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
{'name': 'baz', 'data': {}, 'type': 'Trivial.Object2-1.0'},
]})
sid, lid1, lid2 = _create_sample_and_links_for_propagate_links(url, TOKEN3, USER3)
# check initial links for both version
expected_links = [
{
'linkid': lid1,
'id': sid,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'column1',
'createdby': USER3,
'expiredby': None,
'expired': None
},
{
'linkid': lid2,
'id': sid,
'version': 1,
'node': 'root',
'upa': '1/2/1',
'dataid': 'column2',
'createdby': USER3,
'expiredby': None,
'expired': None
}
]
_check_sample_data_links(url, sid, 1, expected_links, TOKEN3)
_check_sample_data_links(url, sid, 2, [], TOKEN3)
# propagate data links from sample version 1 to version 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.propagate_data_links',
'version': '1.1',
'id': '38',
'params': [{'id': sid, 'version': 2, 'previous_version': 1,
'ignore_types': ['Trivial.Object2']}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 1
links = ret.json()['result'][0]['links']
new_link_ids = [i['linkid'] for i in links]
expected_new_links = copy.deepcopy(expected_links)
expected_new_links.pop()
assert len(expected_new_links) == 1
# propagated links should have new link id, dataid and version
for idx, expected_link in enumerate(expected_new_links):
expected_link['linkid'] = new_link_ids[idx]
expected_link['dataid'] = expected_link['dataid'] + '_2'
expected_link['version'] = 2
_check_data_links(links, expected_new_links)
# check links again for sample version 1 and 2
_check_sample_data_links(url, sid, 1, expected_links, TOKEN3)
_check_sample_data_links(url, sid, 2, expected_new_links, TOKEN3)
def test_create_links_and_get_links_from_sample_basic(sample_port, workspace, kafka):
'''
Also tests that the 'as_user' key is ignored if 'as_admin' is falsy.
'''
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
{'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'},
{'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'}
]})
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
id2 = _create_sample(
url,
TOKEN4,
{'name': 'myothersample',
'node_tree': [{'id': 'root2', 'type': 'BioReplicate'},
{'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2'}
]
},
1
)
# ver 2
_create_sample(
url,
TOKEN4,
{'id': id2,
'name': 'myothersample3',
'node_tree': [{'id': 'root3', 'type': 'BioReplicate'},
{'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3'}
]
},
2
)
# create links
# as_user should be ignored unless as_admin is true
lid1 = _create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/2/2', 'as_user': USER1})
lid2 = _create_link(
url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'column1'})
lid3 = _create_link(
url, TOKEN4, USER4,
{'id': id2, 'version': 1, 'node': 'foo2', 'upa': '1/2/1', 'dataid': 'column2'})
# get links from sample 1
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id1, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
res = ret.json()['result'][0]['links']
expected_links = [
{
'linkid': lid1,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/2/2',
'dataid': None,
'createdby': USER3,
'expiredby': None,
'expired': None
},
{
'linkid': lid2,
'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'column1',
'createdby': USER3,
'expiredby': None,
'expired': None
}
]
assert len(res) == len(expected_links)
for link in res:
assert_ms_epoch_close_to_now(link['created'])
del link['created']
for link in expected_links:
assert link in res
# get links from sample 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id2, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
res = ret.json()['result'][0]['links']
assert_ms_epoch_close_to_now(res[0]['created'])
del res[0]['created']
assert res == [
{
'linkid': lid3,
'id': id2,
'version': 1,
'node': 'foo2',
'upa': '1/2/1',
'dataid': 'column2',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]
# get links from ver 2 of sample 2
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id2, 'version': 2}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
assert ret.json()['result'][0]['links'] == []
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1},
{'event_type': 'NEW_SAMPLE', 'sample_id': id2, 'sample_ver': 1},
{'event_type': 'NEW_SAMPLE', 'sample_id': id2, 'sample_ver': 2},
{'event_type': 'NEW_LINK', 'link_id': lid1},
{'event_type': 'NEW_LINK', 'link_id': lid2},
{'event_type': 'NEW_LINK', 'link_id': lid3},
])
def test_update_and_get_links_from_sample(sample_port, workspace, kafka):
'''
Also tests getting links from a sample using an effective time
'''
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
_replace_acls(url, id1, TOKEN3, {'admin': [USER4]})
# create links
lid1 = _create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
oldlinkactive = datetime.datetime.now()
time.sleep(1)
# update link node
lid2 = _create_link(
url,
TOKEN4,
USER4,
{'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'yay',
'update': 1})
# get current link
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id1, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
res = ret.json()['result'][0]
assert len(res) == 2
assert_ms_epoch_close_to_now(res['effective_time'])
del res['effective_time']
created = res['links'][0]['created']
assert_ms_epoch_close_to_now(created)
del res['links'][0]['created']
assert res == {'links': [
{
'linkid': lid2,
'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'yay',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]}
# get expired link
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{
'id': id1,
'version': 1,
'effective_time': round(oldlinkactive.timestamp() * 1000)}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
res = ret.json()['result'][0]
assert res['links'][0]['expired'] == created - 1
assert_ms_epoch_close_to_now(res['links'][0]['created'] + 1000)
del res['links'][0]['created']
del res['links'][0]['expired']
assert res == {
'effective_time': round(oldlinkactive.timestamp() * 1000),
'links': [
{
'linkid': lid1,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': 'yay',
'createdby': USER3,
'expiredby': USER4,
}
]}
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1},
{'event_type': 'ACL_CHANGE', 'sample_id': id1},
{'event_type': 'NEW_LINK', 'link_id': lid1},
{'event_type': 'NEW_LINK', 'link_id': lid2},
{'event_type': 'EXPIRED_LINK', 'link_id': lid1},
])
def test_create_data_link_as_admin(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create links
lid1 = _create_link(
url,
TOKEN2,
USER2,
{'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'yeet',
'as_admin': 1})
lid2 = _create_link(
url,
TOKEN2,
USER4,
{'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'as_admin': 1,
'as_user': f' {USER4} '})
# get link
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id1, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
res = ret.json()['result'][0]['links']
expected_links = [
{
'linkid': lid1,
'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'yeet',
'createdby': USER2,
'expiredby': None,
'expired': None
},
{
'linkid': lid2,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': None,
'createdby': USER4,
'expiredby': None,
'expired': None
}
]
assert len(res) == len(expected_links)
for link in res:
assert_ms_epoch_close_to_now(link['created'])
del link['created']
for link in expected_links:
assert link in res
def test_get_links_from_sample_exclude_workspaces(sample_port, workspace):
'''
Tests that unreadable workspaces are excluded from link results
'''
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli3 = Workspace(wsurl, token=TOKEN3)
wscli4 = Workspace(wsurl, token=TOKEN4)
# create workspace & objects
wscli3.create_workspace({'workspace': 'foo'})
wscli3.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli4.create_workspace({'workspace': 'bar'})
wscli4.save_objects({'id': 2, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli4.set_permissions({'id': 2, 'new_permission': 'r', 'users': [USER3]})
wscli4.create_workspace({'workspace': 'baz'})
wscli4.save_objects({'id': 3, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli4.set_global_permission({'id': 3, 'new_permission': 'r'})
wscli4.create_workspace({'workspace': 'bat'}) # unreadable
wscli4.save_objects({'id': 4, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create sample
id_ = _create_generic_sample(url, TOKEN3)
_replace_acls(url, id_, TOKEN3, {'admin': [USER4]})
# create links
lid1 = _create_link(
url, TOKEN3, USER3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
lid2 = _create_link(
url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '2/1/1'})
lid3 = _create_link(url, TOKEN4, USER4,
{'id': id_, 'version': 1, 'node': 'foo', 'upa': '3/1/1', 'dataid': 'whee'})
_create_link(
url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '4/1/1'})
# check correct links are returned
ret = _get_links_from_sample(url, TOKEN3, {'id': id_, 'version': 1})
assert_ms_epoch_close_to_now(ret['effective_time'])
res = ret['links']
expected_links = [
{
'linkid': lid1,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': None,
'createdby': USER3,
'expiredby': None,
'expired': None
},
{
'linkid': lid2,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '2/1/1',
'dataid': None,
'createdby': USER4,
'expiredby': None,
'expired': None
},
{
'linkid': lid3,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '3/1/1',
'dataid': 'whee',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]
assert len(res) == len(expected_links)
for link in res:
assert_ms_epoch_close_to_now(link['created'])
del link['created']
for link in expected_links:
assert link in res
# test with anon user
_replace_acls(url, id_, TOKEN3, {'public_read': 1})
ret = _get_links_from_sample(url, None, {'id': id_, 'version': 1})
assert_ms_epoch_close_to_now(ret['effective_time'])
res = ret['links']
expected_links = [
{
'linkid': lid3,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '3/1/1',
'dataid': 'whee',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]
assert len(res) == len(expected_links)
for link in res:
assert_ms_epoch_close_to_now(link['created'])
del link['created']
for link in expected_links:
assert link in res
def _get_links_from_sample(url, token, params, print_resp=False):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [params]
})
if print_resp:
print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
return ret.json()['result'][0]
def test_get_links_from_sample_as_admin(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN4)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create sample
id_ = _create_generic_sample(url, TOKEN4)
# create links
lid = _create_link(url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
# check correct links are returned, user 3 has read admin perms, but not full
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'version': 1, 'as_admin': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
assert len(ret.json()['result'][0]['links']) == 1
link = ret.json()['result'][0]['links'][0]
assert_ms_epoch_close_to_now(link['created'])
del link['created']
assert link == {
'linkid': lid,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': None,
'createdby': USER4,
'expiredby': None,
'expired': None
}
def test_get_links_from_sample_public_read(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN1)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_global_permission({'id': 1, 'new_permission': 'r'})
# create sample
id_ = _create_generic_sample(url, TOKEN1)
# create links
lid = _create_link(url, TOKEN1, USER1, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
_replace_acls(url, id_, TOKEN1, {'public_read': 1})
for token in [None, TOKEN4]: # anon user & user without explicit permission
# check correct links are returned
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [{'id': id_, 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
assert len(ret.json()['result'][0]['links']) == 1
link = ret.json()['result'][0]['links'][0]
assert_ms_epoch_close_to_now(link['created'])
del link['created']
assert link == {
'linkid': lid,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': None,
'createdby': USER1,
'expiredby': None,
'expired': None
}
def test_get_links_from_sample_set(sample_port, workspace):
"""
test timing for fetching batch of links from list of samples
"""
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN1)
N_SAMPLES = 100
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'} for _ in range(N_SAMPLES)
]})
wscli.set_global_permission({'id': 1, 'new_permission': 'r'})
ids_ = [_create_generic_sample(url, TOKEN1) for _ in range(N_SAMPLES)]
lids = [_create_link(url, TOKEN1, USER1, {
'id': id_,
'version': 1,
'node': 'foo',
'upa': f'1/1/{i+1}'}) for i, id_ in enumerate(ids_)]
start = time.time()
ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={
'method': 'SampleService.get_data_links_from_sample_set',
'version': '1.1',
'id': '42',
'params': [{
'sample_ids': [{'id': id_, 'version': 1} for id_ in ids_],
'as_admin': False,
'effective_time': _get_current_epochmillis()
}]
})
end = time.time()
elapsed = end - start
# getting 500 sample links should take about 5 seconds (1 second per 100 samples)
print(f"retrieved data links from {N_SAMPLES} samples in {elapsed} seconds.")
assert ret.ok
# assuming twice the amound of expected time elasped should raise concern
assert elapsed < 10
assert len(ret.json()['result'][0]['links']) == N_SAMPLES
def test_create_link_fail(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
id_ = _create_generic_sample(url, TOKEN3)
_create_link_fail(
sample_port, TOKEN3, {'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'},
'Sample service error code 30000 Missing input parameter: id')
_create_link_fail(
sample_port, TOKEN3, {'id': id_, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'},
'Sample service error code 30000 Missing input parameter: version')
_create_link_fail(
sample_port, TOKEN3,
{'id': id_, 'version': 1, 'node': 'foo', 'upa': 'upalupa', 'dataid': 'yay'},
'Sample service error code 30001 Illegal input parameter: upalupa is not a valid UPA')
_create_link_fail(
sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'},
'Sample service error code 50040 No such workspace data: No workspace with id 1 exists')
wscli.create_workspace({'workspace': 'foo'})
_create_link_fail(
sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'},
'Sample service error code 50040 No such workspace data: Object 1/1/1 does not exist')
_replace_acls(url, id_, TOKEN3, {'write': [USER4]})
_create_link_fail( # fails if permission granted is admin
sample_port, TOKEN4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'},
'Sample service error code 20000 Unauthorized: User user4 cannot ' +
f'administrate sample {id_}')
_replace_acls(url, id_, TOKEN3, {'admin': [USER4]})
wscli.set_permissions({'id': 1, 'new_permission': 'r', 'users': [USER4]})
_create_link_fail( # fails if permission granted is write
sample_port, TOKEN4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'},
'Sample service error code 20000 Unauthorized: User user4 cannot write to upa 1/1/1')
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
_create_link_fail(
sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'fake', 'upa': '1/1/1'},
f'Sample service error code 50030 No such sample node: {id_} ver 1 fake')
# admin tests
_create_link_fail(
sample_port, TOKEN2,
{'id': id_,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'as_admin': 1,
'as_user': 'foo\bbar'},
'Sample service error code 30001 Illegal input parameter: ' +
'userid contains control characters')
_create_link_fail(
sample_port, TOKEN3,
{'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'as_user': USER4, 'as_admin': 'f'},
'Sample service error code 20000 Unauthorized: User user3 does not have ' +
'the necessary administration privileges to run method create_data_link')
_create_link_fail(
sample_port,
TOKEN2,
{'id': id_,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'as_user': 'fake',
'as_admin': 'f'},
'Sample service error code 50000 No such user: fake')
def test_create_link_fail_link_exists(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
id_ = _create_generic_sample(url, TOKEN3)
_create_link(url, TOKEN3, USER3,
{'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
_create_link_fail(
sample_port, TOKEN3,
{'id': id_, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yay'},
'Sample service error code 60000 Data link exists for data ID: 1/1/1:yay')
def _create_link_fail(sample_port, token, params, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.create_data_link',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_links_from_sample_fail(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN3)
_get_link_from_sample_fail(
sample_port, TOKEN3, {},
'Sample service error code 30000 Missing input parameter: id')
_get_link_from_sample_fail(
sample_port, TOKEN3, {'id': id_},
'Sample service error code 30000 Missing input parameter: version')
_get_link_from_sample_fail(
sample_port, TOKEN3, {'id': id_, 'version': 1, 'effective_time': 'foo'},
"Sample service error code 30001 Illegal input parameter: key 'effective_time' " +
"value of 'foo' is not a valid epoch millisecond timestamp")
_get_link_from_sample_fail(
sample_port, TOKEN4, {'id': id_, 'version': 1},
f'Sample service error code 20000 Unauthorized: User user4 cannot read sample {id_}')
_get_link_from_sample_fail(
sample_port, None, {'id': id_, 'version': 1},
f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}')
badid = uuid.uuid4()
_get_link_from_sample_fail(
sample_port, TOKEN3, {'id': str(badid), 'version': 1},
f'Sample service error code 50010 No such sample: {badid}')
# admin tests
_get_link_from_sample_fail(
sample_port, TOKEN4, {'id': id_, 'version': 1, 'as_admin': 1},
'Sample service error code 20000 Unauthorized: User user4 does not have the ' +
'necessary administration privileges to run method get_data_links_from_sample')
_get_link_from_sample_fail(
sample_port, None, {'id': id_, 'version': 1, 'as_admin': 1},
'Sample service error code 20000 Unauthorized: Anonymous users ' +
'may not act as service administrators.')
def _get_link_from_sample_fail(sample_port, token, params, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_sample',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_links_from_sample_set_fail(sample_port):
url = f'http://localhost:{sample_port}'
id_ = _create_generic_sample(url, TOKEN3)
_get_links_from_sample_set_fail(
sample_port, TOKEN3, {},
'Missing "sample_ids" field - Must provide a list of valid sample ids.')
_get_links_from_sample_set_fail(
sample_port, TOKEN3, {
'sample_ids': [{'id': id_}]
},
"Malformed sample accessor - each sample must provide both an id and a version.")
_get_links_from_sample_set_fail(
sample_port, TOKEN3, {
'sample_ids': [{'id': id_, 'version': 1}]
},
'Missing "effective_time" parameter.')
_get_links_from_sample_set_fail(
sample_port, TOKEN3, {
'sample_ids': [{'id': id_, 'version': 1}],
'effective_time': 'foo'
},
"Sample service error code 30001 Illegal input parameter: key 'effective_time' " +
"value of 'foo' is not a valid epoch millisecond timestamp")
_get_links_from_sample_set_fail(
sample_port, TOKEN4, {
'sample_ids': [{'id': id_, 'version': 1}],
'effective_time': _get_current_epochmillis() - 500
},
f'Sample service error code 20000 Unauthorized: User user4 cannot read sample {id_}')
_get_links_from_sample_set_fail(
sample_port, None, {
'sample_ids': [{'id': id_, 'version': 1}],
'effective_time': _get_current_epochmillis() - 500
},
f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}')
badid = uuid.uuid4()
_get_links_from_sample_set_fail(
sample_port, TOKEN3, {
'sample_ids': [{'id': str(badid), 'version': 1}],
'effective_time': _get_current_epochmillis() - 500
},
'Sample service error code 50010 No such sample:'
f" Could not complete search for samples: ['{badid}']")
# admin tests
_get_links_from_sample_set_fail(
sample_port, TOKEN4, {
'sample_ids': [{'id': id_, 'version': 1}],
'effective_time': _get_current_epochmillis() - 500,
'as_admin': 1,
},
'Sample service error code 20000 Unauthorized: User user4 does not have the ' +
'necessary administration privileges to run method get_data_links_from_sample')
_get_links_from_sample_set_fail(
sample_port, None, {
'sample_ids': [{'id': id_, 'version': 1}],
'effective_time': _get_current_epochmillis() - 500,
'as_admin': 1
},
'Sample service error code 20000 Unauthorized: Anonymous users ' +
'may not act as service administrators.')
def _get_links_from_sample_set_fail(sample_port, token, params, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_sample_set',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def _get_current_epochmillis():
return round(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() * 1000)
def test_expire_data_link(sample_port, workspace, kafka):
_expire_data_link(sample_port, workspace, None, kafka)
def test_expire_data_link_with_data_id(sample_port, workspace, kafka):
_expire_data_link(sample_port, workspace, 'whee', kafka)
def _expire_data_link(sample_port, workspace, dataid, kafka):
''' also tests that 'as_user' is ignored if 'as_admin' is false '''
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'},
{'id': 'bar', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
_replace_acls(url, id1, TOKEN3, {'admin': [USER4]})
# create links
lid1 = _create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': dataid})
lid2 = _create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'bar', 'upa': '1/1/1', 'dataid': 'fake'})
time.sleep(1) # need to be able to set a resonable effective time to fetch links
# expire link
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.expire_data_link',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'dataid': dataid, 'as_user': USER1}]
})
# print(ret.text)
assert ret.ok is True
# check links
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'effective_time': _get_current_epochmillis() - 500}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
links = ret.json()['result'][0]['links']
assert len(links) == 2
for link in links:
if link['dataid'] == 'fake':
current_link = link
else:
expired_link = link
assert_ms_epoch_close_to_now(expired_link['expired'])
assert_ms_epoch_close_to_now(expired_link['created'] + 1000)
del expired_link['created']
del expired_link['expired']
assert expired_link == {
'linkid': lid1,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': dataid,
'createdby': USER3,
'expiredby': USER4,
}
assert_ms_epoch_close_to_now(current_link['created'] + 1000)
del current_link['created']
assert current_link == {
'linkid': lid2,
'id': id1,
'version': 1,
'node': 'bar',
'upa': '1/1/1',
'dataid': 'fake',
'createdby': USER3,
'expiredby': None,
'expired': None
}
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1},
{'event_type': 'ACL_CHANGE', 'sample_id': id1},
{'event_type': 'NEW_LINK', 'link_id': lid1},
{'event_type': 'NEW_LINK', 'link_id': lid2},
{'event_type': 'EXPIRED_LINK', 'link_id': lid1},
])
def test_expire_data_link_as_admin(sample_port, workspace, kafka):
_expire_data_link_as_admin(sample_port, workspace, None, USER2, kafka)
def test_expire_data_link_as_admin_impersonate_user(sample_port, workspace, kafka):
_expire_data_link_as_admin(sample_port, workspace, USER4, USER4, kafka)
def _expire_data_link_as_admin(sample_port, workspace, user, expected_user, kafka):
_clear_kafka_messages(kafka)
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'},
{'id': 'bar', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create links
lid = _create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'duidy'})
time.sleep(1) # need to be able to set a resonable effective time to fetch links
# expire link
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.expire_data_link',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'dataid': 'duidy', 'as_admin': 1, 'as_user': user}]
})
# print(ret.text)
assert ret.ok is True
# check links
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'effective_time': _get_current_epochmillis() - 500}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
links = ret.json()['result'][0]['links']
assert len(links) == 1
link = links[0]
assert_ms_epoch_close_to_now(link['expired'])
assert_ms_epoch_close_to_now(link['created'] + 1000)
del link['created']
del link['expired']
assert link == {
'linkid': lid,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': 'duidy',
'createdby': USER3,
'expiredby': expected_user,
}
_check_kafka_messages(
kafka,
[
{'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1},
{'event_type': 'NEW_LINK', 'link_id': lid},
{'event_type': 'EXPIRED_LINK', 'link_id': lid},
])
def test_expire_data_link_fail(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create links
_create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
_expire_data_link_fail(
sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: upa')
_expire_data_link_fail(
sample_port, TOKEN3, {'upa': '1/0/1'},
'Sample service error code 30001 Illegal input parameter: 1/0/1 is not a valid UPA')
_expire_data_link_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'foo\nbar'},
'Sample service error code 30001 Illegal input parameter: ' +
'dataid contains control characters')
_expire_data_link_fail(
sample_port, TOKEN4, {'upa': '1/1/1', 'dataid': 'yay'},
'Sample service error code 20000 Unauthorized: User user4 cannot write to workspace 1')
wscli.delete_workspace({'id': 1})
_expire_data_link_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'yay'},
'Sample service error code 50040 No such workspace data: Workspace 1 is deleted')
wsadmin = Workspace(wsurl, token=TOKEN_WS_FULL_ADMIN)
wsadmin.administer({'command': 'undeleteWorkspace', 'params': {'id': 1}})
_expire_data_link_fail(
sample_port, TOKEN3, {'upa': '1/1/2', 'dataid': 'yay'},
'Sample service error code 50050 No such data link: 1/1/2:yay')
_expire_data_link_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'yee'},
'Sample service error code 50050 No such data link: 1/1/1:yee')
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
_expire_data_link_fail(
sample_port, TOKEN4, {'upa': '1/1/1', 'dataid': 'yay'},
'Sample service error code 20000 Unauthorized: User user4 cannot ' +
f'administrate sample {id1}')
# admin tests
_expire_data_link_fail(
sample_port, TOKEN2,
{'upa': '1/1/1', 'dataid': 'yay', 'as_admin': ['t'], 'as_user': 'foo\tbar'},
'Sample service error code 30001 Illegal input parameter: ' +
'userid contains control characters')
_expire_data_link_fail(
sample_port, TOKEN3,
{'upa': '1/1/1', 'dataid': 'yay', 'as_admin': ['t'], 'as_user': USER4},
'Sample service error code 20000 Unauthorized: User user3 does not have ' +
'the necessary administration privileges to run method expire_data_link')
_expire_data_link_fail(
sample_port, TOKEN2,
{'upa': '1/1/1', 'dataid': 'yay', 'as_admin': ['t'], 'as_user': 'fake'},
'Sample service error code 50000 No such user: fake')
def _expire_data_link_fail(sample_port, token, params, expected):
_request_fail(sample_port, 'expire_data_link', token, params, expected)
def _request_fail(sample_port, method, token, params, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.' + method,
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_links_from_data(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
{'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'},
{'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
id2 = _create_sample(
url,
TOKEN4,
{'name': 'myothersample',
'node_tree': [{'id': 'root2', 'type': 'BioReplicate'},
{'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2'}
]
},
1
)
# ver 2
_create_sample(
url,
TOKEN4,
{'id': id2,
'name': 'myothersample3',
'node_tree': [{'id': 'root3', 'type': 'BioReplicate'},
{'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3'}
]
},
2
)
# create links
lid1 = _create_link(
url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/2/2'})
lid2 = _create_link(
url, TOKEN4, USER4,
{'id': id2, 'version': 1, 'node': 'root2', 'upa': '1/1/1', 'dataid': 'column1'})
lid3 = _create_link(
url, TOKEN4, USER4,
{'id': id2, 'version': 2, 'node': 'foo3', 'upa': '1/2/2', 'dataid': 'column2'})
# get links from object 1/2/2
ret = _get_links_from_data(url, TOKEN3, {'upa': '1/2/2'})
assert_ms_epoch_close_to_now(ret['effective_time'])
res = ret['links']
expected_links = [
{
'linkid': lid1,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/2/2',
'dataid': None,
'createdby': USER3,
'expiredby': None,
'expired': None
},
{
'linkid': lid3,
'id': id2,
'version': 2,
'node': 'foo3',
'upa': '1/2/2',
'dataid': 'column2',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]
assert len(res) == len(expected_links)
for link in res:
assert_ms_epoch_close_to_now(link['created'])
del link['created']
for link in expected_links:
assert link in res
# get links from object 1/1/1
ret = _get_links_from_data(url, TOKEN3, {'upa': '1/1/1'})
assert_ms_epoch_close_to_now(ret['effective_time'])
res = ret['links']
assert_ms_epoch_close_to_now(res[0]['created'])
del res[0]['created']
assert res == [
{
'linkid': lid2,
'id': id2,
'version': 1,
'node': 'root2',
'upa': '1/1/1',
'dataid': 'column1',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]
# get links from object 1/2/1
ret = _get_links_from_data(url, TOKEN3, {'upa': '1/2/1'})
assert_ms_epoch_close_to_now(ret['effective_time'])
assert ret['links'] == []
def _get_links_from_data(url, token, params, print_resp=False):
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [params]
})
if print_resp:
print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
return ret.json()['result'][0]
def test_get_links_from_data_expired(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
_replace_acls(url, id1, TOKEN3, {'admin': [USER4]})
# create links
lid1 = _create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
oldlinkactive = datetime.datetime.now()
time.sleep(1)
# update link node
lid2 = _create_link(url, TOKEN4, USER4, {
'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'yay',
'update': 1
})
# get current link
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1'}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
res = ret.json()['result'][0]
assert len(res) == 2
assert_ms_epoch_close_to_now(res['effective_time'])
del res['effective_time']
created = res['links'][0]['created']
assert_ms_epoch_close_to_now(created)
del res['links'][0]['created']
assert res == {'links': [
{
'linkid': lid2,
'id': id1,
'version': 1,
'node': 'root',
'upa': '1/1/1',
'dataid': 'yay',
'createdby': USER4,
'expiredby': None,
'expired': None
}
]}
# get expired link
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [{
'upa': '1/1/1',
'effective_time': round(oldlinkactive.timestamp() * 1000)}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
res = ret.json()['result'][0]
assert res['links'][0]['expired'] == created - 1
assert_ms_epoch_close_to_now(res['links'][0]['created'] + 1000)
del res['links'][0]['created']
del res['links'][0]['expired']
assert res == {
'effective_time': round(oldlinkactive.timestamp() * 1000),
'links': [
{
'linkid': lid1,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': 'yay',
'createdby': USER3,
'expiredby': USER4,
}
]}
def test_get_links_from_data_public_read(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN1)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_global_permission({'id': 1, 'new_permission': 'r'})
# create samples
id_ = _create_generic_sample(url, TOKEN1)
# create links
lid = _create_link(url, TOKEN1, USER1, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
for token in [None, TOKEN4]: # anon user, user 4 has no explicit perms
ret = _get_links_from_data(url, token, {'upa': '1/1/1'})
assert_ms_epoch_close_to_now(ret['effective_time'])
assert len(ret['links']) == 1
link = ret['links'][0]
assert_ms_epoch_close_to_now(link['created'])
del link['created']
assert link == {
'linkid': lid,
'id': id_,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': None,
'createdby': USER1,
'expiredby': None,
'expired': None
}
def test_get_links_from_data_as_admin(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN4)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create samples
id1 = _create_sample(
url,
TOKEN4,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create links
lid = _create_link(url, TOKEN4, USER4, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
# get links from object, user 3 has admin read perms
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'as_admin': 1}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
assert len(ret.json()['result'][0]) == 2
assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time'])
assert len(ret.json()['result'][0]['links']) == 1
link = ret.json()['result'][0]['links'][0]
assert_ms_epoch_close_to_now(link['created'])
del link['created']
assert link == {
'linkid': lid,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': None,
'createdby': USER4,
'expiredby': None,
'expired': None
}
def test_get_links_from_data_fail(sample_port, workspace):
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
_get_link_from_data_fail(
sample_port, TOKEN3, {},
'Sample service error code 30000 Missing input parameter: upa')
_get_link_from_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'effective_time': 'foo'},
"Sample service error code 30001 Illegal input parameter: key 'effective_time' " +
"value of 'foo' is not a valid epoch millisecond timestamp")
_get_link_from_data_fail(
sample_port, TOKEN4, {'upa': '1/1/1'},
'Sample service error code 20000 Unauthorized: User user4 cannot read upa 1/1/1')
_get_link_from_data_fail(
sample_port, None, {'upa': '1/1/1'},
'Sample service error code 20000 Unauthorized: Anonymous users cannot read upa 1/1/1')
_get_link_from_data_fail(
sample_port, TOKEN3, {'upa': '1/2/1'},
'Sample service error code 50040 No such workspace data: Object 1/2/1 does not exist')
# admin tests (also tests missing / deleted objects)
_get_link_from_data_fail(
sample_port, TOKEN4, {'upa': '1/1/1', 'as_admin': 1},
'Sample service error code 20000 Unauthorized: User user4 does not have the necessary ' +
'administration privileges to run method get_data_links_from_data')
_get_link_from_data_fail(
sample_port, None, {'upa': '1/1/1', 'as_admin': 1},
'Sample service error code 20000 Unauthorized: Anonymous users may not act ' +
'as service administrators.')
_get_link_from_data_fail(
sample_port, TOKEN3, {'upa': '1/1/2', 'as_admin': 1},
'Sample service error code 50040 No such workspace data: Object 1/1/2 does not exist')
_get_link_from_data_fail(
sample_port, TOKEN3, {'upa': '2/1/1', 'as_admin': 1},
'Sample service error code 50040 No such workspace data: No workspace with id 2 exists')
wscli.delete_objects([{'ref': '1/1'}])
_get_link_from_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'as_admin': 1},
'Sample service error code 50040 No such workspace data: Object 1/1/1 does not exist')
wscli.delete_workspace({'id': 1})
_get_link_from_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'as_admin': 1},
'Sample service error code 50040 No such workspace data: Workspace 1 is deleted')
def _get_link_from_data_fail(sample_port, token, params, expected):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_links_from_data',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_sample_via_data(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_permissions({'id': 1, 'new_permission': 'r', 'users': [USER4]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'meta_user': {'a': {'b': 'f', 'e': 'g'}, 'c': {'d': 'h'}},
'meta_controlled': {'foo': {'bar': 'baz'}, 'premature': {'e': 'fakeout'}},
'source_meta': [{'key': 'foo', 'skey': 'b', 'svalue': {'x': 'y'}}]
},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
id2 = _create_sample(
url,
TOKEN3,
{'name': 'unused', 'node_tree': [{'id': 'unused', 'type': 'BioReplicate'}]},
1
)
# ver 2
_create_sample(
url,
TOKEN3,
{'id': id2,
'name': 'myothersample3',
'node_tree': [{'id': 'root3', 'type': 'BioReplicate'},
{'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3'}
]
},
2
)
# create links
_create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
_create_link(
url, TOKEN3, USER3,
{'id': id2, 'version': 2, 'node': 'root3', 'upa': '1/1/1', 'dataid': 'column1'})
# get first sample via link from object 1/1/1 using a token that has no access
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_sample_via_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'id': str(id1), 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
res = ret.json()['result'][0]
assert_ms_epoch_close_to_now(res['save_date'])
del res['save_date']
expected = {
'id': id1,
'version': 1,
'name': 'mysample',
'user': USER3,
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'parent': None,
'meta_user': {'a': {'b': 'f', 'e': 'g'}, 'c': {'d': 'h'}},
'meta_controlled': {'foo': {'bar': 'baz'}, 'premature': {'e': 'fakeout'}},
'source_meta': [{'key': 'foo', 'skey': 'b', 'svalue': {'x': 'y'}}],
},
{'id': 'foo',
'type': 'TechReplicate',
'parent': 'root',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
]
}
assert res == expected
# get second sample via link from object 1/1/1 using a token that has no access
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_sample_via_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'id': str(id2), 'version': 2}]
})
# print(ret.text)
assert ret.ok is True
res = ret.json()['result'][0]
assert_ms_epoch_close_to_now(res['save_date'])
del res['save_date']
expected = {
'id': id2,
'version': 2,
'name': 'myothersample3',
'user': USER3,
'node_tree': [{'id': 'root3',
'type': 'BioReplicate',
'parent': None,
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
{'id': 'foo3',
'type': 'TechReplicate',
'parent': 'root3',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
]
}
assert res == expected
def test_get_sample_via_data_expired_with_anon_user(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_global_permission({'id': 1, 'new_permission': 'r'})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
id2 = _create_sample(
url,
TOKEN3,
{'name': 'myothersample',
'node_tree': [{'id': 'root2', 'type': 'BioReplicate'},
{'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2'}
]
},
1
)
# create links
_create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
# update link node
_create_link(url, TOKEN3, USER3, {
'id': id2,
'version': 1,
'node': 'root2',
'upa': '1/1/1',
'dataid': 'yay',
'update': 1,
})
# pulled link from server to check the old link was expired
# get sample via current link
ret = requests.post(url, headers=get_authorized_headers(None), json={
'method': 'SampleService.get_sample_via_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'id': str(id2), 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
res = ret.json()['result'][0]
assert_ms_epoch_close_to_now(res['save_date'])
del res['save_date']
expected = {
'id': id2,
'version': 1,
'name': 'myothersample',
'user': USER3,
'node_tree': [{'id': 'root2',
'type': 'BioReplicate',
'parent': None,
'meta_user': {},
'meta_controlled': {},
'source_meta': [],
},
{'id': 'foo2',
'type': 'TechReplicate',
'parent': 'root2',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
]
}
assert res == expected
# get sample via expired link
ret = requests.post(url, headers=get_authorized_headers(None), json={
'method': 'SampleService.get_sample_via_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'id': str(id1), 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
res = ret.json()['result'][0]
assert_ms_epoch_close_to_now(res['save_date'])
del res['save_date']
expected = {
'id': id1,
'version': 1,
'name': 'mysample',
'user': USER3,
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'parent': None,
'meta_user': {},
'meta_controlled': {},
'source_meta': [],
},
{'id': 'foo',
'type': 'TechReplicate',
'parent': 'root',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
]
}
assert res == expected
def test_get_sample_via_data_public_read(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN1)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
wscli.set_global_permission({'id': 1, 'new_permission': 'r'})
# create samples
id_ = _create_generic_sample(url, TOKEN1)
# create links
_create_link(url, TOKEN1, USER1, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'})
# get sample via link from object 1/1/1 using a token that has no explicit access
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.get_sample_via_data',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'id': str(id_), 'version': 1}]
})
# print(ret.text)
assert ret.ok is True
res = ret.json()['result'][0]
assert_ms_epoch_close_to_now(res['save_date'])
del res['save_date']
expected = {
'id': id_,
'version': 1,
'name': 'mysample',
'user': USER1,
'node_tree': [{'id': 'root',
'type': 'BioReplicate',
'parent': None,
'meta_user': {},
'meta_controlled': {},
'source_meta': [],
},
{'id': 'foo',
'type': 'TechReplicate',
'parent': 'root',
'meta_controlled': {},
'meta_user': {},
'source_meta': [],
},
]
}
assert res == expected
def test_get_sample_via_data_fail(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN3)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create samples
id1 = _create_sample(
url,
TOKEN3,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create links
_create_link(url, TOKEN3, USER3,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
_get_sample_via_data_fail(
sample_port, TOKEN3, {},
'Sample service error code 30000 Missing input parameter: upa')
_get_sample_via_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1'},
'Sample service error code 30000 Missing input parameter: id')
_get_sample_via_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'id': id1},
'Sample service error code 30000 Missing input parameter: version')
_get_sample_via_data_fail(
sample_port, TOKEN4, {'upa': '1/1/1', 'id': id1, 'version': 1},
'Sample service error code 20000 Unauthorized: User user4 cannot read upa 1/1/1')
_get_sample_via_data_fail(
sample_port, None, {'upa': '1/1/1', 'id': id1, 'version': 1},
'Sample service error code 20000 Unauthorized: Anonymous users cannot read upa 1/1/1')
_get_sample_via_data_fail(
sample_port, TOKEN3, {'upa': '1/2/1', 'id': id1, 'version': 1},
'Sample service error code 50040 No such workspace data: Object 1/2/1 does not exist')
badid = uuid.uuid4()
_get_sample_via_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'id': str(badid), 'version': 1},
'Sample service error code 50050 No such data link: There is no link from UPA 1/1/1 ' +
f'to sample {badid}')
_get_sample_via_data_fail(
sample_port, TOKEN3, {'upa': '1/1/1', 'id': str(id1), 'version': 2},
f'Sample service error code 50020 No such sample version: {id1} ver 2')
def _get_sample_via_data_fail(sample_port, token, params, expected):
# could make a single method that just takes the service method name to DRY things up a bit
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_sample_via_data',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
def test_get_data_link(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN4)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create samples
id1 = _create_sample(
url,
TOKEN4,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create link
lid = _create_link(url, TOKEN4, USER4,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
# get link, user 3 has admin read perms
ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={
'method': 'SampleService.get_data_link',
'version': '1.1',
'id': '42',
'params': [{'linkid': lid}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
link = ret.json()['result'][0]
created = link.pop('created')
assert_ms_epoch_close_to_now(created)
assert link == {
'linkid': lid,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': 'yay',
'createdby': USER4,
'expiredby': None,
'expired': None
}
# expire link
ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={
'method': 'SampleService.expire_data_link',
'version': '1.1',
'id': '42',
'params': [{'upa': '1/1/1', 'dataid': 'yay'}]
})
# print(ret.text)
assert ret.ok is True
# get link, user 5 has full perms
ret = requests.post(url, headers=get_authorized_headers(TOKEN5), json={
'method': 'SampleService.get_data_link',
'version': '1.1',
'id': '42',
'params': [{'linkid': lid}]
})
# print(ret.text)
assert ret.ok is True
assert len(ret.json()['result']) == 1
link = ret.json()['result'][0]
assert_ms_epoch_close_to_now(link['expired'])
del link['expired']
assert link == {
'linkid': lid,
'id': id1,
'version': 1,
'node': 'foo',
'upa': '1/1/1',
'dataid': 'yay',
'created': created,
'createdby': USER4,
'expiredby': USER4,
}
def test_get_data_link_fail(sample_port, workspace):
url = f'http://localhost:{sample_port}'
wsurl = f'http://localhost:{workspace.port}'
wscli = Workspace(wsurl, token=TOKEN4)
# create workspace & objects
wscli.create_workspace({'workspace': 'foo'})
wscli.save_objects({'id': 1, 'objects': [
{'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'},
]})
# create samples
id1 = _create_sample(
url,
TOKEN4,
{'name': 'mysample',
'node_tree': [{'id': 'root', 'type': 'BioReplicate'},
{'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}
]
},
1
)
# create link
lid = _create_link(url, TOKEN4, USER4,
{'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'})
_get_data_link_fail(
sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: linkid')
_get_data_link_fail(
sample_port, TOKEN4, {'linkid': lid},
'Sample service error code 20000 Unauthorized: User user4 does not have the necessary ' +
'administration privileges to run method get_data_link')
oid = uuid.uuid4()
_get_data_link_fail(
sample_port, TOKEN3, {'linkid': str(oid)},
f'Sample service error code 50050 No such data link: {oid}')
def _get_data_link_fail(sample_port, token, params, expected):
# could make a single method that just takes the service method name to DRY things up a bit
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(token), json={
'method': 'SampleService.get_data_link',
'version': '1.1',
'id': '42',
'params': [params]
})
assert ret.status_code == 500
assert ret.json()['error']['message'] == expected
# ###########################
# Auth user lookup tests
# ###########################
# for some reason having sample_port along with auth in the test fn args prevents a tear down
# error, not quite sure why
def test_user_lookup_build_fail_bad_args():
_user_lookup_build_fail(
'', 'foo', ValueError('auth_url cannot be a value that evaluates to false'))
_user_lookup_build_fail(
'http://foo.com', '', ValueError('auth_token cannot be a value that evaluates to false'))
def test_user_lookup_build_fail_bad_token(sample_port, auth):
_user_lookup_build_fail(
f'http://localhost:{auth.port}/testmode',
'tokentokentoken!',
InvalidTokenError('KBase auth server reported token is invalid.'))
def test_user_lookup_build_fail_bad_auth_url(sample_port, auth):
_user_lookup_build_fail(
f'http://localhost:{auth.port}/testmode/foo',
TOKEN1,
IOError('Error from KBase auth server: HTTP 404 Not Found'))
def test_user_lookup_build_fail_not_auth_url(auth):
_user_lookup_build_fail(
'https://httpbin.org/status/404',
TOKEN1,
IOError('Non-JSON response from KBase auth server, status code: 404'))
def _user_lookup_build_fail(url, token, expected):
with raises(Exception) as got:
KBaseUserLookup(url, token)
assert_exception_correct(got.value, expected)
def test_user_lookup(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode', TOKEN1)
assert ul.invalid_users([]) == []
assert ul.invalid_users([UserID(USER1), UserID(USER2), UserID(USER3)]) == []
def test_user_lookup_cache(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode', TOKEN1)
assert ul._valid_cache.get(USER1, default=False) is False
assert ul._valid_cache.get(USER2, default=False) is False
ul.invalid_users([UserID(USER1)])
assert ul._valid_cache.get(USER1, default=False) is True
assert ul._valid_cache.get(USER2, default=False) is False
def test_user_lookup_bad_users(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN1)
assert ul.invalid_users(
[UserID('nouserhere'), UserID(USER1), UserID(USER2), UserID('whooptydoo'),
UserID(USER3)]) == [UserID('nouserhere'), UserID('whooptydoo')]
def test_user_lookup_fail_bad_args(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN1)
_user_lookup_fail(ul, None, ValueError('usernames cannot be None'))
_user_lookup_fail(ul, [UserID('foo'), UserID('bar'), None], ValueError(
'Index 2 of iterable usernames cannot be a value that evaluates to false'))
def test_user_lookup_fail_bad_username(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN1)
# maybe possibly this error should be shortened
# definitely clear the user name is illegal though, there's no question about that
_user_lookup_fail(ul, [UserID('1')], InvalidUserError(
'The KBase auth server is being very assertive about one of the usernames being ' +
'illegal: 30010 Illegal user name: Illegal user name [1]: 30010 Illegal user name: ' +
'Username must start with a letter'))
def _user_lookup_fail(userlookup, users, expected):
with raises(Exception) as got:
userlookup.invalid_users(users)
assert_exception_correct(got.value, expected)
def test_is_admin(sample_port, auth):
n = AdminPermission.NONE
r = AdminPermission.READ
f = AdminPermission.FULL
_check_is_admin(auth.port, [n, n, n, n])
_check_is_admin(auth.port, [f, f, n, n], ['fulladmin1'])
_check_is_admin(auth.port, [n, f, n, n], ['fulladmin2'])
_check_is_admin(auth.port, [n, n, r, n], None, ['readadmin1'])
_check_is_admin(auth.port, [n, r, n, n], None, ['readadmin2'])
_check_is_admin(auth.port, [n, f, n, n], ['fulladmin2'], ['readadmin2'])
_check_is_admin(auth.port, [n, f, r, n], ['fulladmin2'], ['readadmin1'])
def _check_is_admin(port, results, full_roles=None, read_roles=None):
ul = KBaseUserLookup(
f'http://localhost:{port}/testmode/',
TOKEN_SERVICE,
full_roles,
read_roles)
for t, u, r in zip([TOKEN1, TOKEN2, TOKEN3, TOKEN4], [USER1, USER2, USER3, USER4], results):
assert ul.is_admin(t) == (r, u)
def test_is_admin_cache(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN_SERVICE)
assert ul._admin_cache.get(TOKEN1, default=False) is False
assert ul._admin_cache.get(TOKEN2, default=False) is False
ul.is_admin(TOKEN1)
assert ul._admin_cache.get(TOKEN1, default=False) is not False
assert ul._admin_cache.get(TOKEN2, default=False) is False
def test_is_admin_fail_bad_input(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN_SERVICE)
_is_admin_fail(ul, None, ValueError('token cannot be a value that evaluates to false'))
_is_admin_fail(ul, '', ValueError('token cannot be a value that evaluates to false'))
def test_is_admin_fail_bad_token(sample_port, auth):
ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN_SERVICE)
_is_admin_fail(ul, 'bad token here', InvalidTokenError(
'KBase auth server reported token is invalid.'))
def _is_admin_fail(userlookup, user, expected):
with raises(Exception) as got:
userlookup.is_admin(user)
assert_exception_correct(got.value, expected)
# ###########################
# Workspace wrapper tests
# ###########################
def test_workspace_wrapper_has_permission(sample_port, workspace):
url = f'http://localhost:{workspace.port}'
wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN)
ws = WS(wscli)
wscli2 = Workspace(url, token=TOKEN2)
wscli2.create_workspace({'workspace': 'foo'})
wscli2.save_objects({'id': 1,
'objects': [{'name': 'bar', 'type': 'Trivial.Object-1.0', 'data': {}}]})
wscli2.save_objects({'id': 1,
'objects': [{'name': 'foo', 'type': 'Trivial.Object-1.0', 'data': {}}]})
wscli2.save_objects({'id': 1,
'objects': [{'name': 'foo', 'type': 'Trivial.Object-1.0', 'data': {}}]})
ws.has_permission(UserID(USER2), WorkspaceAccessType.ADMIN, 1) # Shouldn't fail
ws.has_permission(UserID(USER2), WorkspaceAccessType.ADMIN, upa=UPA('1/2/2')) # Shouldn't fail
def test_workspace_wrapper_has_permission_fail_bad_args(sample_port, workspace):
url = f'http://localhost:{workspace.port}'
wscli2 = Workspace(url, token=TOKEN2)
wscli2.create_workspace({'workspace': 'foo'})
wscli2.save_objects({'id': 1,
'objects': [{'name': 'bar', 'type': 'Trivial.Object-1.0', 'data': {}}]})
wscli2.save_objects({'id': 1,
'objects': [{'name': 'foo', 'type': 'Trivial.Object-1.0', 'data': {}}]})
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER1), 1, None, UnauthorizedError(
'User user1 cannot read workspace 1'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER1), None, UPA('1/2/1'),
UnauthorizedError('User user1 cannot read upa 1/2/1'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID('fakeuser'), 1, None, UnauthorizedError(
'User fakeuser cannot read workspace 1'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID('fakeuser'), None, UPA('1/2/1'),
UnauthorizedError('User fakeuser cannot read upa 1/2/1'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), 2, None,
NoSuchWorkspaceDataError('No workspace with id 2 exists'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), None, UPA('2/1/1'),
NoSuchWorkspaceDataError('No workspace with id 2 exists'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), None, UPA('1/2/2'),
NoSuchWorkspaceDataError('Object 1/2/2 does not exist'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), None, UPA('1/3/1'),
NoSuchWorkspaceDataError('Object 1/3/1 does not exist'))
wscli2.delete_objects([{'ref': '1/2'}])
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), None, UPA('1/2/1'),
NoSuchWorkspaceDataError('Object 1/2/1 does not exist'))
wscli2.delete_workspace({'id': 1})
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), None, UPA('1/1/1'),
NoSuchWorkspaceDataError('Workspace 1 is deleted'))
_workspace_wrapper_has_permission_fail(
workspace.port, UserID(USER2), 1, None, NoSuchWorkspaceDataError('Workspace 1 is deleted'))
def _workspace_wrapper_has_permission_fail(ws_port, user, wsid, upa, expected):
url = f'http://localhost:{ws_port}'
wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN)
ws = WS(wscli)
with raises(Exception) as got:
ws.has_permission(user, WorkspaceAccessType.READ, wsid, upa)
assert_exception_correct(got.value, expected)
def test_workspace_wrapper_get_workspaces(sample_port, workspace):
url = f'http://localhost:{workspace.port}'
wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN)
ws = WS(wscli)
wscli1 = Workspace(url, token=TOKEN1)
wscli1.create_workspace({'workspace': 'baz'})
wscli2 = Workspace(url, token=TOKEN2)
wscli2.create_workspace({'workspace': 'foo'})
wscli2.set_global_permission({'id': 2, 'new_permission': 'r'})
wscli3 = Workspace(url, token=TOKEN3)
wscli3.create_workspace({'workspace': 'bar'})
wscli3.set_permissions({'id': 3, 'users': [USER1], 'new_permission': 'r'})
wscli3.create_workspace({'workspace': 'invisible'})
assert ws.get_user_workspaces(UserID(USER1)) == [1, 2, 3] # not 4
def test_workspace_wrapper_get_workspaces_fail_no_user(sample_port, workspace):
url = f'http://localhost:{workspace.port}'
wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN)
ws = WS(wscli)
with raises(Exception) as got:
ws.get_user_workspaces(UserID('fakeuser'))
assert_exception_correct(got.value, NoSuchUserError('User fakeuser is not a valid user'))
# ###########################
# Kafka notifier tests
# ###########################
def test_kafka_notifier_init_fail():
_kafka_notifier_init_fail(None, 't', MissingParameterError('bootstrap_servers'))
_kafka_notifier_init_fail(' \t ', 't', MissingParameterError('bootstrap_servers'))
_kafka_notifier_init_fail('localhost:10000', None, MissingParameterError('topic'))
_kafka_notifier_init_fail('localhost:10000', ' \t ', MissingParameterError('topic'))
_kafka_notifier_init_fail(
'localhost:10000', 'mytopic' + 243 * 'a',
IllegalParameterError('topic exceeds maximum length of 249'))
_kafka_notifier_init_fail(f'localhost:{find_free_port()}', 'mytopic', NoBrokersAvailable())
for c in ['Ѽ', '_', '.', '*']:
_kafka_notifier_init_fail('localhost:10000', f'topic{c}topic', ValueError(
f'Illegal character in Kafka topic topic{c}topic: {c}'))
def _kafka_notifier_init_fail(servers, topic, expected):
with raises(Exception) as got:
KafkaNotifier(servers, topic)
assert_exception_correct(got.value, expected)
def test_kafka_notifier_new_sample(sample_port, kafka):
topic = 'abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-' + 186 * 'a'
kn = KafkaNotifier(f'localhost:{kafka.port}', topic)
try:
id_ = uuid.uuid4()
kn.notify_new_sample_version(id_, 6)
_check_kafka_messages(
kafka,
[{'event_type': 'NEW_SAMPLE', 'sample_id': str(id_), 'sample_ver': 6}],
topic)
finally:
kn.close()
def test_kafka_notifier_notify_new_sample_version_fail(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic')
_kafka_notifier_notify_new_sample_version_fail(kn, None, 1, ValueError(
'sample_id cannot be a value that evaluates to false'))
_kafka_notifier_notify_new_sample_version_fail(kn, uuid.uuid4(), 0, ValueError(
'sample_ver must be > 0'))
_kafka_notifier_notify_new_sample_version_fail(kn, uuid.uuid4(), -3, ValueError(
'sample_ver must be > 0'))
kn.close()
_kafka_notifier_notify_new_sample_version_fail(kn, uuid.uuid4(), 1, ValueError(
'client is closed'))
def _kafka_notifier_notify_new_sample_version_fail(notifier, sample, version, expected):
with raises(Exception) as got:
notifier.notify_new_sample_version(sample, version)
assert_exception_correct(got.value, expected)
def test_kafka_notifier_acl_change(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'topictopic')
try:
id_ = uuid.uuid4()
kn.notify_sample_acl_change(id_)
_check_kafka_messages(
kafka,
[{'event_type': 'ACL_CHANGE', 'sample_id': str(id_)}],
'topictopic')
finally:
kn.close()
def test_kafka_notifier_notify_acl_change_fail(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic')
_kafka_notifier_notify_acl_change_fail(kn, None, ValueError(
'sample_id cannot be a value that evaluates to false'))
kn.close()
_kafka_notifier_notify_acl_change_fail(kn, uuid.uuid4(), ValueError(
'client is closed'))
def _kafka_notifier_notify_acl_change_fail(notifier, sample, expected):
with raises(Exception) as got:
notifier.notify_sample_acl_change(sample)
assert_exception_correct(got.value, expected)
def test_kafka_notifier_new_link(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'topictopic')
try:
id_ = uuid.uuid4()
kn.notify_new_link(id_)
_check_kafka_messages(
kafka,
[{'event_type': 'NEW_LINK', 'link_id': str(id_)}],
'topictopic')
finally:
kn.close()
def test_kafka_notifier_new_link_fail(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic')
_kafka_notifier_new_link_fail(kn, None, ValueError(
'link_id cannot be a value that evaluates to false'))
kn.close()
_kafka_notifier_new_link_fail(kn, uuid.uuid4(), ValueError(
'client is closed'))
def _kafka_notifier_new_link_fail(notifier, sample, expected):
with raises(Exception) as got:
notifier.notify_new_link(sample)
assert_exception_correct(got.value, expected)
def test_kafka_notifier_expired_link(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'topictopic')
try:
id_ = uuid.uuid4()
kn.notify_expired_link(id_)
_check_kafka_messages(
kafka,
[{'event_type': 'EXPIRED_LINK', 'link_id': str(id_)}],
'topictopic')
finally:
kn.close()
def test_kafka_notifier_expired_link_fail(sample_port, kafka):
kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic')
_kafka_notifier_expired_link_fail(kn, None, ValueError(
'link_id cannot be a value that evaluates to false'))
kn.close()
_kafka_notifier_expired_link_fail(kn, uuid.uuid4(), ValueError(
'client is closed'))
def _kafka_notifier_expired_link_fail(notifier, sample, expected):
with raises(Exception) as got:
notifier.notify_expired_link(sample)
assert_exception_correct(got.value, expected)
def test_validate_sample(sample_port):
_validate_sample_as_admin(sample_port, None, TOKEN2, USER2)
def _validate_sample_as_admin(sample_port, as_user, get_token, expected_user):
url = f'http://localhost:{sample_port}'
ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={
'method': 'SampleService.validate_samples',
'version': '1.1',
'id': '67',
'params': [{
'samples': [{
'name': 'mysample',
'node_tree': [{
'id': 'root',
'type': 'BioReplicate',
'meta_controlled': {'foo': {'bar': 'baz'}},
'meta_user': {'a': {'b': 'c'}}
}]
}]
}]
})
# print(ret.text)
assert ret.ok is True
ret_json = ret.json()['result'][0]
assert 'mysample' not in ret_json['errors']
|
cleanup.py
|
import os
import time
import requests
from glob import glob
import os.path as path
from datetime import datetime
from multiprocessing import Process, Queue
def worker(i:int, q: Queue):
while q.qsize()>0:
try:
files_batch = q.get_nowait()
start2 = time.time()
#print(f"[{i}] 50 batch started")
r = requests.post("http://cah.io.community/api/isCompleted", json={"addresses": files_batch})
eligible = r.json()
to_delete = ["/home/archiveteam/CAH/gpujobs/" + x.split(" ")[1] + ".tar.gz" for x in eligible]
#print(f"[{i}] starting delete after {round(time.time()-start2, 2)}")
for file in to_delete:
if os.path.isfile(file) and os.path.getmtime(file) < time.time() - 60*60: # this makes the code more robust
os.remove(file)
#print(f"[{i}] batch done in {round(time.time()-start2, 2)}")
except Exception as e:
#print (f"[{i}] worker raised error {e}")
pass
now = datetime.now().strftime("%Y/%m/%d_%H:%M")
list_of_files = glob('/home/archiveteam/CAH/gpujobs/*.tar.gz')
frm = len(list_of_files)
start = time.time()
i = 0
files_batch = []
q = Queue()
procs = []
for i in range(10):
procs.append(Process(target=worker, args=[i, q]))
#print (f"starting cleanup of {frm}")
for file in list_of_files:
if time.time() - path.getmtime(file) < 100:
continue
uuid = file.split("/")[5].split(".")[0]
files_batch.append(f"rsync {uuid}")
i += 1
if i%50 == 0:
q.put(files_batch)
files_batch = []
q.put(files_batch)
time.sleep(20)
for proc in procs:
proc.start()
for proc in procs:
proc.join()
list_of_files = glob('/home/archiveteam/CAH/gpujobs/*.tar.gz')
end = len(list_of_files)
with open("jobs.txt","wt") as f:
for file in list_of_files:
f.write(file + "\n")
print(f"[{now}] from {frm} to {end} \"task executed in\" {round(time.time()-start,2)} sec")
|
async_script.py
|
# -*- coding: utf-8 -*-
"""
Display output of a given script asynchronously.
Always displays the last line of output from a given script, set by
`script_path`. If a line contains only a color (/^#[0-F]{6}$/), it is used
as such (set force_nocolor to disable). The script may have parameters.
Configuration parameters:
force_nocolor: if true, won't check if a line contains color
(default False)
format: see placeholders below (default '{output}')
script_path: script you want to show output of (compulsory)
(default None)
strip_output: shall we strip leading and trailing spaces from output
(default False)
Format placeholders:
{output} output of script given by "script_path"
Examples:
```
async_script {
format = "{output}"
script_path = "ping 127.0.0.1"
}
```
@author frimdo ztracenastopa@centrum.cz, girst
SAMPLE OUTPUT
{'full_text': 'script output'}
example
{'full_text': '[193957.380605] wlp3s0: authenticated'}
"""
import re
import shlex
from subprocess import Popen, PIPE
from threading import Thread
class Py3status:
"""
"""
# available configuration parameters
force_nocolor = False
format = "{output}"
script_path = None
strip_output = False
def post_config_hook(self):
# class variables:
self.command_thread = Thread()
self.command_output = None
self.command_color = None
self.command_error = None # cannot throw self.py3.error from thread
if not self.script_path:
self.py3.error("script_path is mandatory")
def async_script(self):
response = {}
response["cached_until"] = self.py3.CACHE_FOREVER
if self.command_error is not None:
self.py3.log(self.command_error, level=self.py3.LOG_ERROR)
self.py3.error(self.command_error, timeout=self.py3.CACHE_FOREVER)
if not self.command_thread.is_alive():
self.command_thread = Thread(target=self._command_start)
self.command_thread.daemon = True
self.command_thread.start()
if self.command_color is not None:
response["color"] = self.command_color
response["full_text"] = self.py3.safe_format(
self.format, {"output": self.command_output}
)
return response
def _command_start(self):
try:
command = Popen(shlex.split(self.script_path), stdout=PIPE)
while True:
if command.poll() is not None: # script has exited/died; restart it
command = Popen(shlex.split(self.script_path), stdout=PIPE)
output = command.stdout.readline().decode().strip()
if re.search(r"^#[0-9a-fA-F]{6}$", output) and not self.force_nocolor:
self.command_color = output
else:
if output != self.command_output:
self.command_output = output
self.py3.update()
except Exception as e:
self.command_error = str(e)
self.py3.update()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status, config={"script_path": "ping 127.0.0.1"})
|
Context_test.py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import multiprocessing
import os
def foo(q):
print('被启动的新进程: (%s)' % os.getpid())
q.put('Python')
if __name__ == '__main__':
# 设置使用fork方式启动进程,并获取Context对象
ctx = multiprocessing.get_context('fork')
# 接下来就可用Context对象来代替mutliprocessing模块了
q = ctx.Queue()
# 创建进程
mp = ctx.Process(target=foo, args=(q, ))
# 启动进程
mp.start()
# 获取队列中的消息
print(q.get())
mp.join()
|
gui_server.py
|
# import all the required modules
import socket
import threading
from tkinter import *
from tkinter import font
from tkinter import ttk
# import all functions /
# everthing from chat.py file
# from chat import *
PORT = 5000
# SERVER = "192.168.0.103"
SERVER = '127.0.1.1'
ADDRESS = (SERVER, PORT)
FORMAT = "utf-8"
# Create a new client socket
# and connect to the server
# client = socket.socket(socket.AF_INET,
# socket.SOCK_STREAM)
client = socket.socket()
print(f'[+] Connecting to {SERVER}:{PORT}')
client.connect(ADDRESS)
print('[+] Connected')
# GUI class for the chat
class GUI:
# constructor method
def __init__(self):
# chat window which is currently hidden
self.Window = Tk()
self.Window.withdraw()
# login window
self.login = Toplevel()
# set the title
self.login.title("Login")
self.login.resizable(width = False,
height = False)
self.login.configure(width = 400,
height = 300)
# create a Label
self.pls = Label(self.login,
text = "Please login to continue",
justify = CENTER,
font = "Helvetica 14 bold")
self.pls.place(relheight = 0.15,
relx = 0.2,
rely = 0.07)
# create a Label
self.labelName = Label(self.login,
text = "Name: ",
font = "Helvetica 12")
self.labelName.place(relheight = 0.2,
relx = 0.1,
rely = 0.2)
# create a entry box for
# tyoing the message
self.entryName = Entry(self.login,
font = "Helvetica 14")
self.entryName.place(relwidth = 0.4,
relheight = 0.12,
relx = 0.35,
rely = 0.2)
# set the focus of the curser
self.entryName.focus()
# create a Continue Button
# along with action
self.go = Button(self.login,
text = "CONTINUE",
font = "Helvetica 14 bold",
command = lambda: self.goAhead(self.entryName.get()))
self.go.place(relx = 0.4,
rely = 0.55)
self.Window.mainloop()
def goAhead(self, name):
self.login.destroy()
self.layout(name)
# the thread to receive messages
rcv = threading.Thread(target=self.receive)
rcv.start()
# The main layout of the chat
def layout(self,name):
self.name = name
# to show chat window
self.Window.deiconify()
self.Window.title("CHATROOM")
self.Window.resizable(width = False,
height = False)
self.Window.configure(width = 470,
height = 550,
bg = "#17202A")
self.labelHead = Label(self.Window,
bg = "#17202A",
fg = "#EAECEE",
text = self.name ,
font = "Helvetica 13 bold",
pady = 5)
self.labelHead.place(relwidth = 1)
self.line = Label(self.Window,
width = 450,
bg = "#ABB2B9")
self.line.place(relwidth = 1,
rely = 0.07,
relheight = 0.012)
self.textCons = Text(self.Window,
width = 20,
height = 2,
bg = "#17202A",
fg = "#EAECEE",
font = "Helvetica 14",
padx = 5,
pady = 5)
self.textCons.place(relheight = 0.745,
relwidth = 1,
rely = 0.08)
self.labelBottom = Label(self.Window,
bg = "#ABB2B9",
height = 80)
self.labelBottom.place(relwidth = 1,
rely = 0.825)
self.entryMsg = Entry(self.labelBottom,
bg = "#2C3E50",
fg = "#EAECEE",
font = "Helvetica 13")
# place the given widget
# into the gui window
self.entryMsg.place(relwidth = 0.74,
relheight = 0.06,
rely = 0.008,
relx = 0.011)
self.entryMsg.focus()
# create a Send Button
self.buttonMsg = Button(self.labelBottom,
text = "Send",
font = "Helvetica 10 bold",
width = 20,
bg = "#ABB2B9",
command = lambda : self.sendButton(self.entryMsg.get()))
self.buttonMsg.place(relx = 0.77,
rely = 0.008,
relheight = 0.06,
relwidth = 0.22)
self.textCons.config(cursor = "arrow")
# create a scroll bar
scrollbar = Scrollbar(self.textCons)
# place the scroll bar
# into the gui window
scrollbar.place(relheight = 1,
relx = 0.974)
scrollbar.config(command = self.textCons.yview)
self.textCons.config(state = DISABLED)
# function to basically start the thread for sending messages
def sendButton(self, msg):
self.textCons.config(state = DISABLED)
self.msg=msg
self.entryMsg.delete(0, END)
snd= threading.Thread(target = self.sendMessage)
snd.start()
# function to receive messages
def receive(self):
while True:
try:
message = client.recv(1024).decode(FORMAT)
# if the messages from the server is NAME send the client's name
if message == 'NAME':
client.send(self.name.encode(FORMAT))
else:
# insert messages to text box
self.textCons.config(state = NORMAL)
self.textCons.insert(END,
message+"\n\n")
self.textCons.config(state = DISABLED)
self.textCons.see(END)
except:
# an error will be printed on the command line or console if there's an error
print("An error occured!")
client.close()
break
# function to send messages
def sendMessage(self):
self.textCons.config(state=DISABLED)
while True:
message = (f"{self.name}: {self.msg}")
client.send(message.encode(FORMAT))
break
# create a GUI class object
g = GUI()
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return constant_op.constant(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegexp(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertIsNone(ctx.summary_writer)
ctx.summary_writer = 'mock'
self.assertEqual('mock', ctx.summary_writer)
self.assertIsNone(ctx.summary_recording)
ctx.summary_recording = 'mock'
self.assertEqual('mock', ctx.summary_recording)
self.assertIsNone(ctx.summary_step)
ctx.summary_step = 'mock'
self.assertEqual('mock', ctx.summary_step)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.summary_writer,
ctx.summary_recording,
ctx.summary_step,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegexp(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
test_var = variables.Variable([2., 3.])
self.assertAllEqual(test_fn(test_var), 1.0)
def testPyFunctionAsync(self):
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.Executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
configure_virtual_cpus()
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertEqual(cache.get('1'), None)
cache.put('2', array_ops.zeros((2)))
self.assertNotEqual(cache.get('2'), None)
if __name__ == '__main__':
test.main()
|
test_fsm.py
|
"""Unit tests for fsm.py"""
import datetime
import logging
import select
import socket
from struct import pack
import sys
import threading
import time
import pytest
from pynetdicom import AE, build_context, evt, debug_logger
from pynetdicom.association import Association
from pynetdicom import fsm as FINITE_STATE
from pynetdicom.fsm import *
from pynetdicom.dimse_primitives import C_ECHO
from pynetdicom.pdu_primitives import (
A_ASSOCIATE, A_ABORT, A_P_ABORT, P_DATA, A_RELEASE,
MaximumLengthNotification, ImplementationClassUIDNotification
)
from pynetdicom.pdu import A_RELEASE_RQ
from pynetdicom.sop_class import VerificationSOPClass
from pynetdicom.transport import AssociationSocket
from pynetdicom.utils import validate_ae_title
from .dummy_c_scp import DummyVerificationSCP, DummyBaseSCP
from .encoded_pdu_items import (
a_associate_ac, a_associate_rq, a_associate_rj, p_data_tf, a_abort,
a_release_rq, a_release_rp,
)
from .parrot import ThreadedParrot
#debug_logger()
REFERENCE_BAD_EVENTS = [
# Event, bad states
("Evt1", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rq) p
("Evt2", [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection available
("Evt3", [1, 4]), # A-ASSOCIATE-AC PDU recv
("Evt4", [1, 4]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection open
("Evt6", [1, 4]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (ac) p
("Evt8", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rj) p
("Evt9", [1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13]), # P-DATA primitive
("Evt10", [1, 4]), # P-DATA-TF PDU
("Evt11", [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE (rq) p
("Evt12", [1, 4]), # A-RELEASE-RQ PDU recv
("Evt13", [1, 4]), # A-RELEASE-RP PDU recv
("Evt14", [1, 2, 3, 4, 5, 6, 7, 10, 11, 13]), # A-RELEASE (rsp) primitive
("Evt15", [1, 2, 13]), # A-ABORT (rq) primitive
("Evt16", [1, 4]), # A-ABORT PDU recv
("Evt17", [1]), # Connection closed
("Evt18", [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # ARTIM expired
("Evt19", [1, 4]), # Unrecognised PDU rev
]
REFERENCE_GOOD_EVENTS = [
# Event, good states
("Evt1", [1]), # A-ASSOCIATE (rq) p
("Evt2", [4]), # Connection available
("Evt3", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-AC PDU recv
("Evt4", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [1]), # Connection open
("Evt6", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [3]), # A-ASSOCIATE (ac) p
("Evt8", [3]), # A-ASSOCIATE (rj) p
("Evt9", [6, 8]), # P-DATA primitive
("Evt10", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # P-DATA-TF PDU
("Evt11", [6]), # A-RELEASE (rq) p
("Evt12", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RQ PDU recv
("Evt13", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RP PDU recv
("Evt14", [8, 9, 12]), # A-RELEASE (rsp) primitive
("Evt15", [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # A-ABORT (rq) primitive
("Evt16", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ABORT PDU recv
("Evt17", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection closed
("Evt18", [2, 13]), # ARTIM expired
("Evt19", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Unrecognised PDU rev
]
class BadDUL(object):
"""A DUL that always raises an exception during actions."""
def __init__(self):
self.is_killed = False
def kill_dul(self):
"""Hook for testing whether DUL got killed."""
self.is_killed = True
@property
def primitive(self):
"""Prevent StateMachine from setting primitive."""
return None
class TestStateMachine(object):
"""Non-functional unit tests for fsm.StateMachine."""
def test_init(self):
"""Test creation of new StateMachine."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
assert fsm.current_state == 'Sta1'
assert fsm.dul == assoc.dul
def test_invalid_transition_raises(self):
"""Test StateMachine.transition using invalid states raises."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
msg = r"Invalid state 'Sta0' for State Machine"
with pytest.raises(ValueError, match=msg):
fsm.transition('Sta0')
def test_valid_transition(self):
"""Test StateMachine.transition using valid states."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for ii in range(1, 14):
assert 1 <= ii <= 13
fsm.transition("Sta{}".format(ii))
assert fsm.current_state == "Sta{}".format(ii)
@pytest.mark.parametrize("event, states", REFERENCE_BAD_EVENTS)
def test_invalid_action_raises(self, event, states):
"""Test StateMachine.do_action raises exception if action invalid."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for state in states:
state = "Sta{}".format(state)
fsm.current_state = state
msg = msg = (
r"Invalid event '{}' for the current state '{}'"
.format(event, state)
)
with pytest.raises(InvalidEventError, match=msg):
fsm.do_action(event)
@pytest.mark.parametrize("event, states", REFERENCE_GOOD_EVENTS)
def test_exception_during_action(self, event, states):
"""Test an exception raised during an action kill the DUL."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
fsm.dul = BadDUL()
for state in states:
fsm.dul.is_killed = False
state = "Sta{}".format(state)
fsm.current_state = state
with pytest.raises(AttributeError):
fsm.do_action(event)
assert fsm.dul.is_killed is True
assert fsm.current_state == state
class TestStateBase(object):
"""Base class for State tests."""
def setup(self):
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
for thread in threading.enumerate():
if isinstance(thread, ThreadedParrot):
thread.shutdown()
def get_associate(self, assoc_type):
primitive = A_ASSOCIATE()
if assoc_type == 'request':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ('', 0)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ('localhost', 11112)
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16382
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'accept':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.result = 0x00
primitive.result_source = 0x01
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_results_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16383
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4.5'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'reject':
primitive.result = 0x01
primitive.result_source = 0x01
primitive.diagnostic = 0x01
return primitive
def get_release(self, is_response=False):
primitive = A_RELEASE()
if is_response:
primitive.result = 'affirmative'
return primitive
def get_abort(self, is_ap=False):
if is_ap:
primitive = A_P_ABORT()
primitive.provider_reason = 0x00
else:
primitive = A_ABORT()
primitive.abort_source = 0x00
return primitive
def get_pdata(self):
item = [1, p_data_tf[10:]]
primitive = P_DATA()
primitive.presentation_data_value_list.append(item)
return primitive
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm._events = []
fsm.original_action = fsm.do_action
def do_action(event):
fsm._events.append(event)
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def start_server(self, commands):
"""Start the receiving server."""
server = ThreadedParrot(('localhost', 11112), commands)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def print_fsm_scp(self, fsm, scp=None):
"""Print out some of the quantities we're interested in."""
print('Transitions', fsm._transitions)
print('Changes')
for change in fsm._changes:
print('\t{}'.format(change))
print('Events', fsm._events)
if scp and scp.handlers:
print('Received', scp.handlers[0].received)
print('Sent', scp.handlers[0].sent)
def get_acceptor_assoc(self):
# AF_INET: IPv4, SOCK_STREAM: TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 1, 0)
)
sock.connect(('localhost', 11112))
ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='acceptor')
assoc.set_socket(AssociationSocket(assoc, client_socket=sock))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.acceptor.supported_contexts = [cx]
fsm = self.monkey_patch(assoc.dul.state_machine)
return assoc, fsm
class TestState01(TestStateBase):
"""Tests for State 01: Idle."""
def test_evt01(self):
"""Test Sta1 + Evt1."""
# Sta1 + Evt1 -> AE-1 -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
# AE-1: Issue TRANSPORT_CONNECT primitive to <transport service>
commands = [
('recv', None),
('send', a_abort)
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:1] == ['Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta1 + Evt2."""
# Sta1 + Evt2 -> <ignore> -> Sta1
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta1 + Evt3."""
# Sta1 + Evt3 -> <ignore> -> Sta1
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt3']
def test_evt04(self):
"""Test Sta1 + Evt4."""
# Sta1 + Evt4 -> <ignore> -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta1 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta1 + Evt6."""
# Sta1 + Evt6 -> <ignore> -> Sta1
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt6']
def test_evt07(self):
"""Test Sta1 + Evt7."""
# Sta1 + Evt7 -> <ignore> -> Sta1
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt7'
def test_evt08(self):
"""Test Sta1 + Evt8."""
# Sta1 + Evt8 -> <ignore> -> Sta1
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt8'
assert self.fsm.current_state == 'Sta1'
def test_evt09(self):
"""Test Sta1 + Evt9."""
# Sta1 + Evt9 -> <ignore> -> Sta1
# Evt9: Receive P-DATA primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt9'
assert self.fsm.current_state == 'Sta1'
def test_evt10(self):
"""Test Sta1 + Evt10."""
# Sta1 + Evt10 -> <ignore> -> Sta1
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt10']
def test_evt11(self):
"""Test Sta1 + Evt11."""
# Sta1 + Evt11 -> <ignore> -> Sta1
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt11'
assert self.fsm.current_state == 'Sta1'
def test_evt12(self):
"""Test Sta1 + Evt12."""
# Sta1 + Evt12 -> <ignore> -> Sta1
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt12']
def test_evt13(self):
"""Test Sta1 + Evt13."""
# Sta1 + Evt13 -> <ignore> -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt13']
def test_evt14(self):
"""Test Sta1 + Evt14."""
# Sta1 + Evt14 -> <ignore> -> Sta1
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt14'
assert self.fsm.current_state == 'Sta1'
def test_evt15(self):
"""Test Sta1 + Evt15."""
# Sta1 + Evt15 -> <ignore> -> Sta1
# Evt15: Receive A-ABORT (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_abort(False))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt15'
assert self.fsm.current_state == 'Sta1'
def test_evt16(self):
"""Test Sta1 + Evt16."""
# Sta1 + Evt16 -> <ignore> -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt16']
def test_evt17(self):
"""Test Sta1 + Evt17."""
# Sta1 + Evt17 -> <ignore> -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = []
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt17']
def test_evt18(self):
"""Test Sta1 + Evt18."""
# Sta1 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
self.assoc.kill()
assert self.assoc.dul.artim_timer.expired
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt18'
assert self.fsm.current_state == 'Sta1'
def test_evt19(self):
"""Test Sta1 + Evt19."""
# Sta1 + Evt19 -> <ignore> -> Sta1
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt19']
class TestState02(TestStateBase):
"""Tests for State 02: Connection open, waiting for A-ASSOCIATE-RQ."""
def test_evt01(self):
"""Test Sta2 + Evt1."""
# Sta2 + Evt1 -> <ignore> -> Sta2
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta2 + Evt2."""
# Sta2 + Evt2 -> <ignore> -> Sta2
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta2 + Evt3."""
# Sta2 + Evt3 -> AA-1 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt3', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt3']
def test_evt04(self):
"""Test Sta2 + Evt4."""
# Sta2 + Evt4 -> AA-1 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt4', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta2 + Evt5."""
# Sta2 + Evt5 -> <ignore> -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06a(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> **Sta3** or Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
commands = [
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt06b(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> Sta3 or **Sta13**
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
bad_request = a_associate_rq[:6] + b'\x00\x02' + a_associate_rq[8:]
assert len(bad_request) == len(a_associate_rq)
commands = [
('send', bad_request),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt07(self):
"""Test Sta2 + Evt7."""
# Sta2 + Evt7 -> <ignore> -> Sta2
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt7']
def test_evt08(self):
"""Test Sta2 + Evt8."""
# Sta2 + Evt8 -> <ignore> -> Sta2
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt8']
def test_evt09(self):
"""Test Sta2 + Evt9."""
# Sta2 + Evt9 -> <ignore> -> Sta2
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt9']
def test_evt10(self):
"""Test Sta2 + Evt10."""
# Sta2 + Evt10 -> AA-1 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', p_data_tf),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt10', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt10']
def test_evt11(self):
"""Test Sta2 + Evt11."""
# Sta2 + Evt11 -> <ignore> -> Sta2
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt11']
def test_evt12(self):
"""Test Sta2 + Evt12."""
# Sta2 + Evt12 -> AA-1 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt12', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt12']
def test_evt13(self):
"""Test Sta2 + Evt13."""
# Sta2 + Evt13 -> AA-1 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt13', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt13']
def test_evt14(self):
"""Test Sta2 + Evt14."""
# Sta2 + Evt14 -> <ignore> -> Sta2
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt14']
def test_evt15(self):
"""Test Sta2 + Evt15."""
# Sta2 + Evt15 -> <ignore> -> Sta2
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt15']
def test_evt16(self):
"""Test Sta2 + Evt16."""
# Sta2 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('send', a_abort),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt16', 'AA-2')
]
assert fsm._events[:2] == ['Evt5', 'Evt16']
def test_evt17(self):
"""Test Sta2 + Evt17."""
# Sta2 + Evt17 -> AA-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-5: Stop ARTIM timer
commands = []
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt17', 'AA-5')
]
assert fsm._events[:2] == ['Evt5', 'Evt17']
def test_evt18(self):
"""Test Sta2 + Evt18."""
# Sta2 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt18']
def test_evt19(self):
"""Test Sta2 + Evt19."""
# Sta2 + Evt19 -> AA-1 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt19', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt19']
class TestState03(TestStateBase):
"""Tests for State 03: Awaiting A-ASSOCIATE (rsp) primitive."""
def test_evt01(self):
"""Test Sta3 + Evt1."""
# Sta3 + Evt1 -> <ignore> -> Sta3
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta3 + Evt2."""
# Sta3 + Evt2 -> <ignore> -> Sta3
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta3 + Evt3."""
# Sta3 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_ac),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt3', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt3']
def test_evt04(self):
"""Test Sta3 + Evt4."""
# Sta3 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_rj),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt4', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta3 + Evt5."""
# Sta3 + Evt5 -> <ignore> -> Sta3
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta3 + Evt6."""
# Sta3 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt6', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt6']
def test_evt07(self):
"""Test Sta3 + Evt7."""
# Sta3 + Evt7 -> AE-7 -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
# AE-7: Send A-ASSOCIATE-AC PDU
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:3] == ['Sta2', 'Sta3', 'Sta6']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta3 + Evt8."""
# Sta3 + Evt8 -> AE-8 -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
# AE-8: Send A-ASSOCIATE-RJ PDU and start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_associate('reject'))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt8', 'AE-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta3 + Evt9."""
# Sta3 + Evt9 -> <ignore> -> Sta3
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_pdata())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta3 + Evt10."""
# Sta3 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', p_data_tf),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt10', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt10']
def test_evt11(self):
"""Test Sta3 + Evt11."""
# Sta3 + Evt11 -> <ignore> -> Sta3
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(False))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta3 + Evt12."""
# Sta3 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_release_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt12', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt12']
def test_evt13(self):
"""Test Sta3 + Evt13."""
# Sta3 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_release_rp),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt13', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt13']
def test_evt14(self):
"""Test Sta3 + Evt14."""
# Sta3 + Evt14 -> <ignore> -> Sta3
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(True))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta3 + Evt15."""
# Sta3 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_abort())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt15', 'AA-1'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta3 + Evt16."""
# Sta3 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_abort),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt16', 'AA-3')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt16']
def test_evt17(self):
"""Test Sta3 + Evt17."""
# Sta3 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('wait', 0.1),
('send', a_associate_rq),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt17', 'AA-4')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt17']
def test_evt18(self):
"""Test Sta3 + Evt18."""
# Sta3 + Evt18 -> <ignore> -> Sta3
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.5)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.2)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta3 + Evt19."""
# Sta3 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt19', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt19']
class TestState04(TestStateBase):
"""Tests for State 04: Awaiting TRANSPORT_OPEN from <transport service>."""
def test_evt01(self):
"""Test Sta4 + Evt1."""
# Sta4 + Evt1 -> <ignore> -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta4 + Evt2."""
# Sta4 + Evt2 -> <ignore> -> Sta4
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta4 + Evt3."""
# Sta4 + Evt3 -> <ignore> -> Sta4
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt3']
def test_evt04(self):
"""Test Sta4 + Evt4."""
# Sta4 + Evt4 -> <ignore> -> Sta4
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta4 + Evt5."""
# Sta4 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta4 + Evt6."""
# Sta4 + Evt6 -> <ignore> -> Sta4
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt6']
def test_evt07(self):
"""Test Sta4 + Evt7."""
# Sta4 + Evt7 -> <ignore> -> Sta4
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt7']
def test_evt08(self):
"""Test Sta4 + Evt8."""
# Sta4 + Evt8 -> <ignore> -> Sta4
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt8']
def test_evt09(self):
"""Test Sta4 + Evt9."""
# Sta4 + Evt9 -> <ignore> -> Sta4
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt9']
def test_evt10(self):
"""Test Sta4 + Evt10."""
# Sta4 + Evt10 -> <ignore> -> Sta4
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt10']
def test_evt11(self):
"""Test Sta4 + Evt11."""
# Sta4 + Evt11 -> <ignore> -> Sta4
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt11']
def test_evt12(self):
"""Test Sta4 + Evt12."""
# Sta4 + Evt12 -> <ignore> -> Sta4
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt12']
def test_evt13(self):
"""Test Sta4 + Evt13."""
# Sta4 + Evt13 -> <ignore> -> Sta4
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
while not self.fsm.current_state == 'Sta4':
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt13']
def test_evt14(self):
"""Test Sta4 + Evt14."""
# Sta4 + Evt14 -> <ignore> -> Sta4
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt14']
def test_evt15(self):
"""Test Sta4 + Evt15."""
# Sta4 + Evt15 -> <ignore> -> Sta4
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt15']
def test_evt16(self):
"""Test Sta4 + Evt16."""
# Sta4 + Evt16 -> <ignore> -> Sta4
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt16']
def test_evt17(self):
"""Test Sta4 + Evt17."""
# Sta4 + Evt17 -> <ignore> -> Sta4
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = []
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt17']
def test_evt18(self):
"""Test Sta4 + Evt18."""
# Sta4 + Evt18 -> <ignore> -> Sta4
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt18']
def test_evt19(self):
"""Test Sta4 + Evt19."""
# Sta4 + Evt19 -> <ignore> -> Sta4
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00\x00')
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt19']
class TestState05(TestStateBase):
"""Tests for State 05: Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU."""
def test_evt01(self):
"""Test Sta5 + Evt1."""
# Sta5 + Evt1 -> <ignore> -> Sta5
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('wait', 0.2)
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta5 + Evt2."""
# Sta5 + Evt2 -> <ignore> -> Sta5
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta5 + Evt3."""
# Sta5 + Evt3 -> AE-3 -> Sta6
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AE-3: Issue A-ASSOCIATE (ac) primitive
commands = [
('recv', None),
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt3']
def test_evt04(self):
"""Test Sta5 + Evt4."""
# Sta5 + Evt4 -> AE-4 -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AE-4: Issue A-ASSOCIATE (rj) primitive
commands = [
('recv', None),
('send', a_associate_rj),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt4', 'AE-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta5 + Evt5 -> <ignore> -> Sta5
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta5 + Evt6."""
# Sta5 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt6']
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta5 + Evt7."""
# Sta5 + Evt7 -> <ignore> -> Sta5
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt7']
def test_evt08(self):
"""Test Sta5 + Evt8."""
# Sta5 + Evt8 -> <ignore> -> Sta5
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt8']
def test_evt09(self):
"""Test Sta5 + Evt9."""
# Sta5 + Evt9 -> <ignore> -> Sta5
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt9']
def test_evt10(self):
"""Test Sta5 + Evt10."""
# Sta5 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', p_data_tf),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt10']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta5 + Evt11."""
# Sta5 + Evt11 -> <ignore> -> Sta5
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt11']
def test_evt12(self):
"""Test Sta5 + Evt12."""
# Sta5 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt12']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta5 + Evt13."""
# Sta5 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta5 + Evt14."""
# Sta5 + Evt14 -> <ignore> -> Sta5
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt14']
def test_evt15(self):
"""Test Sta5 + Evt15."""
# Sta5 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and restart ARTIM
commands = [
('recv', None),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt15', 'AA-1'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta13', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt15', 'Evt17']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta5 + Evt16."""
# Sta5 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: If service user initiated:
# Issue A-ABORT primitve and close transport
# Otherwise
# Issue A-P-ABORT primitive and close transport
commands = [
('recv', None),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt16']
def test_evt17(self):
"""Test Sta5 + Evt17."""
# Sta1 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt17']
def test_evt18(self):
"""Test Sta5 + Evt18."""
# Sta5 + Evt18 -> <ignore> -> Sta5
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt18']
def test_evt19(self):
"""Test Sta5 + Evt19."""
# Sta5 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState06(TestStateBase):
"""Tests for State 06: Association established and ready for data."""
def test_evt01(self):
"""Test Sta6 + Evt1."""
# Sta6 + Evt1 -> <ignore> -> Sta6
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3)
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta6 + Evt2."""
# Sta6 + Evt2 -> <ignore> -> Sta6
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta6 + Evt3."""
# Sta6 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.01)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt3']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta6 + Evt4."""
# Sta6 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt4']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta6 + Evt5."""
# Sta6 + Evt5 -> <ignore> -> Sta6
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta6 + Evt6."""
# Sta6 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt6']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta6 + Evt7."""
# Sta6 + Evt7 -> <ignore> -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt7']
def test_evt08(self):
"""Test Sta6 + Evt8."""
# Sta6 + Evt8 -> <ignore> -> Sta6
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt8']
def test_evt09(self):
"""Test Sta6 + Evt9."""
# Sta6 + Evt9 -> DT-1 -> Sta6
# Evt9: Receive P-DATA primitive from <local user>
# DT-1: Send P-DATA-TD PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt9', 'DT-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt9']
def test_evt10(self):
"""Test Sta6 + Evt10."""
# Sta6 + Evt10 -> DT-2 -> Sta6
# Evt10: Receive P-DATA-TF PDU from <remote>
# DT-2: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt10']
def test_evt11(self):
"""Test Sta6 + Evt11."""
# Sta6 + Evt11 -> AR-1 -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt11']
def test_evt12(self):
"""Test Sta6 + Evt12."""
# Sta6 + Evt12 -> AR-2 -> Sta8
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-2: Issue A-RELEASE (rq) primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt12']
def test_evt13(self):
"""Test Sta6 + Evt13."""
# Sta6 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta6 + Evt14."""
# Sta6 + Evt14 -> <ignore> -> Sta6
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt14']
def test_evt15(self):
"""Test Sta6 + Evt15."""
# Sta6 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.abort()
time.sleep(0.1)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt15']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta6 + Evt16."""
# Sta6 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT, and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt16']
def test_evt17(self):
"""Test Sta6 + Evt17."""
# Sta6 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt17']
def test_evt18(self):
"""Test Sta6 + Evt18."""
# Sta6 + Evt18 -> <ignore> -> Sta6
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.4),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt18']
def test_evt19(self):
"""Test Sta6 + Evt19."""
# Sta6 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState07(TestStateBase):
"""Tests for State 07: Awaiting A-RELEASE-RP PDU."""
def test_evt01(self):
"""Test Sta7 + Evt1."""
# Sta7 + Evt1 -> <ignore> -> Sta7
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('request'))
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta7 + Evt2."""
# Sta7 + Evt2 -> <ignore> -> Sta7
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta7 + Evt3."""
# Sta7 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt3']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta7 + Evt4."""
# Sta7 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt4']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta7 + Evt5."""
# Sta7 + Evt5 -> <ignore> -> Sta7
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta7 + Evt6."""
# Sta7 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt6']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta7 + Evt7."""
# Sta7 + Evt7 -> <ignore> -> Sta7
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt7']
def test_evt08(self):
"""Test Sta7 + Evt8."""
# Sta7 + Evt8 -> <ignore> -> Sta7
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt8']
def test_evt09(self):
"""Test Sta7 + Evt9."""
# Sta7 + Evt9 -> <ignore> -> Sta7
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt9']
def test_evt10(self):
"""Test Sta7 + Evt10."""
# Sta7 + Evt10 -> AR-6 -> Sta7
# Evt10: Receive P-DATA-TF PDU from <remote>
# AR-6: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
#primitive = self.assoc.dul.receive_pdu(wait=False)
#assert isinstance(primitive, P_DATA)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt10', 'AR-6'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt10']
def test_evt11(self):
"""Test Sta7 + Evt11."""
# Sta7 + Evt11 -> <ignore> -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt11']
def test_evt12(self):
"""Test Sta7 + Evt12."""
# Sta7 + Evt12 -> AR-8 -> Sta9 or Sta10
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-8: Issue A-RELEASE (rq) - release collision
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12']
def test_evt13(self):
"""Test Sta7 + Evt13."""
# Sta7 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rp),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
primitive = self.assoc.dul.receive_pdu(wait=False)
assert isinstance(primitive, A_RELEASE)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt13']
def test_evt14(self):
"""Test Sta7 + Evt14."""
# Sta7 + Evt14 -> <ignore> -> Sta7
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt14']
def test_evt15(self):
"""Test Sta7 + Evt15."""
# Sta7 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt15']
def test_evt16(self):
"""Test Sta7 + Evt16."""
# Sta7 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt16']
def test_evt17(self):
"""Test Sta7 + Evt17."""
# Sta7 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt17']
def test_evt18(self):
"""Test Sta7 + Evt18."""
# Sta7 + Evt18 -> <ignore> -> Sta7
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt18']
def test_evt19(self):
"""Test Sta7 + Evt19."""
# Sta7 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt19']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState08(TestStateBase):
"""Tests for State 08: Awaiting A-RELEASE (rp) primitive."""
def test_evt01(self):
"""Test Sta8 + Evt1."""
# Sta8 + Evt1 -> <ignore> -> Sta8
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta8 + Evt2."""
# Sta8 + Evt2 -> <ignore> -> Sta8
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta8 + Evt3."""
# Sta8 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt3']
def test_evt04(self):
"""Test Sta8 + Evt4."""
# Sta8 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_rj),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta8 + Evt5."""
# Sta8 + Evt5 -> <ignore> -> Sta8
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta8 + Evt6."""
# Sta8 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt6']
def test_evt07(self):
"""Test Sta8 + Evt7."""
# Sta8 + Evt7 -> <ignore> -> Sta8
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt7']
def test_evt08(self):
"""Test Sta8 + Evt8."""
# Sta8 + Evt8 -> <ignore> -> Sta8
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt8']
def test_evt09(self):
"""Test Sta8 + Evt9."""
# Sta8 + Evt9 -> AR-7 -> Sta8
# Evt9: Receive P-DATA primitive from <local user>
# AR-7: Send P-DATA-TF PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt9']
def test_evt10(self):
"""Test Sta8 + Evt10."""
# Sta8 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', p_data_tf),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt10']
def test_evt11(self):
"""Test Sta8 + Evt11."""
# Sta8 + Evt11 -> <ignore> -> Sta8
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt11']
def test_evt12(self):
"""Test Sta8 + Evt12."""
# Sta8 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # get a_assoc_rq
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt12']
def test_evt13(self):
"""Test Sta8 + Evt13."""
# Sta8 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_release_rp),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt13']
def test_evt14(self):
"""Test Sta8 + Evt14."""
# Sta8 + Evt14 -> AR-4 -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Send A-RELEASE-RP PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt14']
def test_evt15(self):
"""Test Sta8 + Evt15."""
# Sta8 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt15']
def test_evt16(self):
"""Test Sta8 + Evt16."""
# Sta8 + Evt16 -> AA-3 -> Sta13
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_abort),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt16']
def test_evt17(self):
"""Test Sta8 + Evt17."""
# Sta8 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt17']
def test_evt18(self):
"""Test Sta8 + Evt18."""
# Sta8 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt18']
def test_evt19(self):
"""Test Sta8 + Evt19."""
# Sta8 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', b'\x08\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt19']
class TestState09(TestStateBase):
"""Tests for State 09: Release collision req - awaiting A-RELEASE (rp)."""
def test_evt01(self):
"""Test Sta9 + Evt1."""
# Sta9 + Evt1 -> <ignore> -> Sta9
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1), # no response
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta9 + Evt2."""
# Sta9 + Evt2 -> <ignore> -> Sta9
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta9 + Evt3."""
# Sta9 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta9 + Evt4."""
# Sta9 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('send', a_associate_rj),
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta9 + Evt5."""
# Sta9 + Evt5 -> <ignore> -> Sta9
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta9 + Evt6."""
# Sta9 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta9 + Evt7."""
# Sta9 + Evt7 -> <ignore> -> Sta9
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta9 + Evt8."""
# Sta9 + Evt8 -> <ignore> -> Sta9
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta9 + Evt9."""
# Sta9 + Evt9 -> <ignore> -> Sta9
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta9 + Evt10."""
# Sta9 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta9 + Evt11."""
# Sta9 + Evt11 -> <ignore> -> Sta9
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta9 + Evt12."""
# Sta9 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta9 + Evt13."""
# Sta9 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta9 + Evt14."""
# Sta9 + Evt14 -> AR-9 -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-9: Send A-RELEASE-RP PDU to <remote>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('recv', None), # recv a-release-rp
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta9 + Evt15."""
# Sta9 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta9 + Evt16."""
# Sta9 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta9 + Evt17."""
# Sta9 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta9 + Evt18."""
# Sta9 + Evt18 -> <ignore> -> Sta9
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta9 + Evt19."""
# Sta9 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00'), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState10(TestStateBase):
"""Tests for State 10: Release collision acc - awaiting A-RELEASE-RP ."""
def test_evt01(self):
"""Test Sta10 + Evt1."""
# Sta10 + Evt1 -> <ignore> -> Sta10
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta10 + Evt2."""
# Sta10 + Evt2 -> <ignore> -> Sta10
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta10 + Evt3."""
# Sta10 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt3', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta10 + Evt4."""
# Sta10 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rj), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt4', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta10 + Evt5."""
# Sta10 + Evt5 -> <ignore> -> Sta10
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta10 + Evt6."""
# Sta10 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt6', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta10 + Evt7."""
# Sta10 + Evt7 -> <ignore> -> Sta10
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta10 + Evt8."""
# Sta10 + Evt8 -> <ignore> -> Sta10
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta10 + Evt9."""
# Sta10 + Evt9 -> <ignore> -> Sta10
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta10 + Evt10."""
# Sta10 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt10', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta10 + Evt11."""
# Sta10 + Evt11 -> <ignore> -> Sta10
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta10 + Evt12."""
# Sta10 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt12', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta10 + Evt13."""
# Sta10 + Evt13 -> AR-10 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-10: Issue A-RELEASE (rp) primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13'
]
def test_evt14(self):
"""Test Sta10 + Evt14."""
# Sta10 + Evt14 -> <ignore> -> Sta10
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt14'
]
def test_evt15(self):
"""Test Sta10 + Evt15."""
# Sta10 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt15', 'AA-1'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta10 + Evt16."""
# Sta10 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt16', 'AA-3'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta10 + Evt17."""
# Sta10 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt17', 'AA-4'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta10 + Evt18."""
# Sta10 + Evt18 -> <ignore> -> Sta10
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.2),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta10 + Evt19."""
# Sta10 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt19', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState11(TestStateBase):
"""Tests for State 11: Release collision req - awaiting A-RELEASE-RP PDU"""
def test_evt01(self):
"""Test Sta11 + Evt1."""
# Sta11 + Evt1 -> <ignore> -> Sta11
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta11 + Evt2."""
# Sta11 + Evt2 -> <ignore> -> Sta11
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta11 + Evt3."""
# Sta11 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt3',
]
def test_evt04(self):
"""Test Sta11 + Evt4."""
# Sta11 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rj),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt4',
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta11 + Evt5."""
# Sta11 + Evt5 -> <ignore> -> Sta11
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta11 + Evt6."""
# Sta11 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rq),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt6',
]
def test_evt07(self):
"""Test Sta11 + Evt7."""
# Sta11 + Evt7 -> <ignore> -> Sta11
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt7'
]
def test_evt08(self):
"""Test Sta11 + Evt8."""
# Sta11 + Evt8 -> <ignore> -> Sta11
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt8'
]
def test_evt09(self):
"""Test Sta11 + Evt9."""
# Sta11 + Evt9 -> <ignore> -> Sta11
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt9'
]
def test_evt10(self):
"""Test Sta11 + Evt10."""
# Sta11 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', p_data_tf),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt10',
]
def test_evt11(self):
"""Test Sta11 + Evt11."""
# Sta11 + Evt11 -> <ignore> -> Sta11
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt11'
]
def test_evt12(self):
"""Test Sta11 + Evt12."""
# Sta11 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt12',
]
def test_evt13(self):
"""Test Sta11 + Evt13."""
# Sta11 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rp),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt13',
]
def test_evt14(self):
"""Test Sta11 + Evt14."""
# Sta11 + Evt14 -> <ignore> -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt14'
]
def test_evt15(self):
"""Test Sta11 + Evt15."""
# Sta11 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-release-rp
('recv', None), # recv a-abort
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt15'
]
def test_evt16(self):
"""Test Sta11 + Evt16."""
# Sta11 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_abort),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt16',
]
def test_evt17(self):
"""Test Sta11 + Evt17."""
# Sta11 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt17',
]
def test_evt18(self):
"""Test Sta11 + Evt18."""
# Sta11 + Evt18 -> <ignore> -> Sta11
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt18',
]
def test_evt19(self):
"""Test Sta11 + Evt19."""
# Sta11 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt19',
]
class TestState12(TestStateBase):
"""Tests for State 12: Release collision acc - awaiting A-RELEASE (rp)"""
def test_evt01(self):
"""Test Sta12 + Evt1."""
# Sta12 + Evt1 -> <ignore> -> Sta12
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta12 + Evt2."""
# Sta12 + Evt2 -> <ignore> -> Sta12
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta12 + Evt3."""
# Sta12 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_ac), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt3', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta12 + Evt4."""
# Sta12 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rj), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt4', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta12 + Evt5."""
# Sta12 + Evt5 -> <ignore> -> Sta12
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta12 + Evt6."""
# Sta12 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rq), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt6', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta12 + Evt7."""
# Sta12 + Evt7 -> <ignore> -> Sta12
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt7'
]
def test_evt08(self):
"""Test Sta12 + Evt8."""
# Sta12 + Evt8 -> <ignore> -> Sta12
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt8'
]
def test_evt09(self):
"""Test Sta12 + Evt9."""
# Sta12 + Evt9 -> <ignore> -> Sta12
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt9'
]
def test_evt10(self):
"""Test Sta12 + Evt10."""
# Sta12 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', p_data_tf), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt10', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta12 + Evt11."""
# Sta12 + Evt11 -> <ignore> -> Sta12
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt11'
]
def test_evt12(self):
"""Test Sta12 + Evt12."""
# Sta12 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rq), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt12', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta12 + Evt13."""
# Sta12 + Evt13 -> AA-8 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rp), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt13', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta12 + Evt14."""
# Sta12 + Evt14 -> AR-4 -> Sta12
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Issue A-RELEASE-RP PDU and start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None), # recv a-release-rp
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt14', 'AR-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta12 + Evt15."""
# Sta12 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt15', 'AA-1'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta12 + Evt16."""
# Sta12 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_abort), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt16', 'AA-3'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt16'
]
def test_evt17(self):
"""Test Sta12 + Evt17."""
# Sta12 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt17', 'AA-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt17'
]
def test_evt18(self):
"""Test Sta12 + Evt18."""
# Sta12 + Evt18 -> <ignore> -> Sta12
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt18'
]
def test_evt19(self):
"""Test Sta12 + Evt19."""
# Sta12 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt19', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState13(TestStateBase):
"""Tests for State 13: Waiting for connection closed."""
def test_evt01(self):
"""Test Sta13 + Evt1."""
# Sta13 + Evt1 -> <ignore> -> Sta13
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta13 + Evt2."""
# Sta13 + Evt2 -> <ignore> -> Sta13
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta13 + Evt3."""
# Sta13 + Evt3 -> AA-6 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_ac),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt3', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt3'
]
def test_evt04(self):
"""Test Sta13 + Evt4."""
# Sta13 + Evt4 -> AA-6 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_rj),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt4', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt4'
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta13 + Evt5."""
# Sta13 + Evt5 -> <ignore> -> Sta13
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta13 + Evt6."""
# Sta13 + Evt6 -> AA-7 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt6', 'AA-7'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt6'
]
def test_evt07(self):
"""Test Sta13 + Evt7."""
# Sta13 + Evt7 -> <ignore> -> Sta13
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta13 + Evt8."""
# Sta13 + Evt8 -> <ignore> -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta13 + Evt9."""
# Sta13 + Evt9 -> <ignore> -> Sta13
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.2),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
start = time.time()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
if time.time() - start > 5:
self.print_fsm_scp(self.fsm, scp)
break
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta13 + Evt10."""
# Sta13 + Evt10 -> AA-6 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', p_data_tf),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt10', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt10'
]
def test_evt11(self):
"""Test Sta13 + Evt11."""
# Sta13 + Evt11 -> <ignore> -> Sta13
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta13 + Evt12."""
# Sta13 + Evt12 -> AA-6 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt12', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt12'
]
def test_evt13(self):
"""Test Sta13 + Evt13."""
# Sta13 + Evt13 -> AA-6 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt13', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt13'
]
def test_evt14(self):
"""Test Sta13 + Evt14."""
# Sta13 + Evt14 -> <ignore> -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta13 + Evt15."""
# Sta13 + Evt15 -> <ignore> -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta13 + Evt16."""
# Sta13 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_abort),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt16', 'AA-2'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt16'
]
def test_evt17(self):
"""Test Sta13 + Evt17."""
# Sta13 + Evt17 -> AR-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AR-5: Stop ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt17'
]
def test_evt18(self):
"""Test Sta13 + Evt18."""
# Sta13 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt18', 'AA-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta13', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta13 + Evt19."""
# Sta13 + Evt19 -> AA-7 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt19', 'AA-7'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt19'
]
class TestParrotAttack(TestStateBase):
"""Test a parrot attack on the association."""
def test_requestor(self):
commands = [
('recv', None),
('send', a_associate_ac),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', a_release_rq),
('wait', 0.1)
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.5)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:14] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
]
def test_acceptor(self):
"""Test hitting the acceptor with PDUs."""
# Also a regression test for #120
# C-ECHO-RQ
# 80 total length
echo_rq = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA-TF 74
b"\x00\x00\x00\x46\x01" # PDV Item 70
b"\x03" # PDV: 2 -> 69
b"\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00" # 12 Command Group Length
b"\x00\x00\x02\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38"
b"\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00" # 26
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # 10 Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # 10 Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # 10 Command Data Set Type
)
# Send associate request then c-echo requests then release request
commands = [
('send', a_associate_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', a_release_rq),
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.5)
#self.print_fsm_scp(fsm, scp=None)
scp.shutdown()
assert [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
] == fsm._changes[:30]
class TestStateMachineFunctionalRequestor(object):
"""Functional tests for StateMachine as association requestor."""
def setup(self):
"""Run prior to each test"""
self.scp = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = 'localhost'
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
time.sleep(0.1)
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_monkey_patch(self):
"""Test monkey patching of StateMachine works as intended."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = self.monkey_patch(assoc.dul.state_machine)
assert fsm.current_state == 'Sta1'
fsm.current_state = 'Sta13'
fsm.do_action('Evt3')
assert fsm._changes == [('Sta13', 'Evt3', 'AA-6')]
assert fsm._transitions == ['Sta13']
def test_associate_accept_release(self):
"""Test normal association/release."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
if self.assoc.is_established:
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_reject(self):
"""Test normal association rejection."""
self.scp = DummyVerificationSCP()
self.scp.ae.require_called_aet = True
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
time.sleep(0.05)
assert self.assoc.is_rejected
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt4', 'AE-4'), # A-ASSOC-RJ PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_abort(self):
"""Test association acceptance then local abort."""
self.scp = DummyVerificationSCP()
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
if self.assoc.is_established:
self.assoc.abort()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection closed
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # connection closed
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_local_abort(self):
"""Test association acceptance then local abort if no cx."""
self.scp = DummyVerificationSCP()
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.requestor.requested_contexts[0].abstract_syntax = '1.2.3'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
time.sleep(0.1)
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection close
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # Connection closed
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_peer_abort(self):
"""Test association acceptance then peer abort."""
self.scp = DummyVerificationSCP()
self.scp.ae.network_timeout = 0.5
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
while not self.assoc.is_established:
time.sleep(0.05)
while not self.assoc.is_aborted:
time.sleep(0.05)
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt16', 'AA-3'), # A-ABORT-RQ PDV recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_send_data(self):
"""Test association acceptance then send DIMSE message."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.send_c_echo()
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta6',
'Sta6',
'Sta7', # Waitinf for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt9', 'DT-1'), # P-DATA rq primitive
('Sta6', 'Evt10', 'DT-2'), # P-DATA-TF PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_release_AR6(self):
"""Test receive P-DATA-TF while waiting for A-RELEASE-RP."""
# Requestor sends A-RELEASE-RQ, acceptor sends P-DATA-TF then
# A-RELEASE-RP
# Patch AR-4 to also send a P-DATA-TF
orig_entry = FINITE_STATE.ACTIONS['AR-4']
def AR_4(dul):
# Send C-ECHO-RQ
dul.socket.send(p_data_tf)
# Normal release response
dul.pdu = A_RELEASE_RP()
dul.pdu.from_primitive(dul.primitive)
# Callback
dul.socket.send(dul.pdu.encode())
dul.artim_timer.start()
return 'Sta13'
# In this case the association acceptor will hit AR_4
FINITE_STATE.ACTIONS['AR-4'] = ('Bluh', AR_4, 'Sta13')
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AR-4']= orig_entry
def test_release_AR7(self):
"""Test receive P-DATA primitive after A-RELEASE-RQ PDU."""
orig_entry = FINITE_STATE.ACTIONS['AR-2']
def AR_2(dul):
"""AR-2 occurs when an A-RELEASE-RQ PDU is received."""
# Add P-DATA primitive request
primitive = C_ECHO()
primitive.MessageID = 1
primitive.AffectedSOPClassUID = VerificationSOPClass
# Send C-ECHO request to the peer via DIMSE and wait for the response
dul.assoc.dimse.send_msg(primitive, 1)
# Normal AR2 response
dul.to_user_queue.put(dul.primitive)
return 'Sta8'
# In this case the association acceptor will hit AR_2
FINITE_STATE.ACTIONS['AR-2'] = ('Bluh', AR_2, 'Sta8')
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AR-2']= orig_entry
class TestStateMachineFunctionalAcceptor(object):
"""Functional tests for StateMachine as association acceptor."""
def setup(self):
"""Run prior to each test"""
self.scp = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = 'localhost'
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.orig_entry = FINITE_STATE.ACTIONS['AE-2']
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
FINITE_STATE.ACTIONS['AE-2']= self.orig_entry
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_invalid_protocol_version(self):
"""Test receiving an A-ASSOC-RQ with invalid protocol version."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
def AE_2(dul):
dul.pdu = A_ASSOCIATE_RQ()
dul.pdu.from_primitive(dul.primitive)
dul.pdu.protocol_version = 0x0002
bytestream = dul.pdu.encode()
dul.socket.send(bytestream)
return 'Sta5'
FINITE_STATE.ACTIONS['AE-2'] = ('Bluh', AE_2, 'Sta5')
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
assert self.assoc.is_rejected
assert self.assoc.acceptor.primitive.result == 0x01
assert self.assoc.acceptor.primitive.result_source == 0x02
assert self.assoc.acceptor.primitive.diagnostic == 0x02
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
class TestEventHandling(object):
"""Test the FSM event handlers."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
scp.shutdown()
def test_transition_acceptor(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
assert event.event.name == 'EVT_FSM_TRANSITION'
assert event.event.description == "State machine about to transition"
states = [ee.current_state for ee in triggered]
assert states[:6] == ['Sta1', 'Sta2', 'Sta3', 'Sta6', 'Sta8', 'Sta13']
scp.shutdown()
def test_transition_acceptor_bind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
scp.bind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta6', 'Sta8', 'Sta13']
def test_transition_acceptor_unbind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
scp.unbind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
time.sleep(0.5)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta2', 'Sta3']
scp.shutdown()
def test_transition_requestor(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert assoc.is_established
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:5] == ['Sta1', 'Sta4', 'Sta5', 'Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_bind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.bind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:2] == ['Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_unbind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.unbind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta4', 'Sta5']
scp.shutdown()
def test_transition_raises(self, caplog):
"""Test the handler for EVT_FSM_TRANSITION raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_FSM_TRANSITION' event "
"handler 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
import selfdrive.crash as crash
from selfdrive.version import dirty, origin, branch, commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event)),
threading.Thread(target=ws_send, args=(ws, end_event)),
threading.Thread(target=upload_handler, args=(end_event,)),
threading.Thread(target=log_handler, args=(end_event,)),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,))
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "result" in data and "id" in data:
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=10)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# return logs in order they should be sent
# excluding most recent (active) log file
return sorted(logs[:-1])
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
log_retries = 0
while not end_event.is_set():
try:
try:
result = json.loads(log_recv_queue.get(timeout=1))
log_success = result.get("success")
log_entry = result.get("id")
log_path = os.path.join(SWAGLOG_DIR, log_entry)
if log_entry and log_success:
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
except queue.Empty:
pass
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# never send last log file because it is the active log
# and only send one log file at a time (most recent first)
if not len(log_files) or not log_send_queue.empty():
continue
log_entry = log_files.pop()
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
except OSError:
pass # file could be deleted by log rotation
log_retries = 0
except Exception:
cloudlog.exception("athena.log_handler.exception")
log_retries += 1
if log_retries != 0:
time.sleep(backoff(log_retries))
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
ssock.close()
local_sock.close()
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
signal_sock.close()
def ws_recv(ws, end_event):
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
Params().put("LastAthenaPingTime", str(int(sec_since_boot() * 1e9)))
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
ws.send(data)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
ws.settimeout(1)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("LastAthenaPingTime")
except Exception:
crash.capture_exception()
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
multi_processing2.py
|
from multiprocessing import Process
import os,time
# 子进程要执行的代码
def run_proc(name):
for i in range(10):
time.sleep(1)
print(f'Run child process {name}: {os.getpid()}-', i)
if __name__=='__main__':
print(f'Parent process: {os.getpid()}')
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
for i in range(10):
time.sleep(1)
print(f'Run Parent process: {os.getpid()}-', i)
p.join()
print('Child process end.')
|
services.py
|
# coding: utf-8
#
# This module implements background service management.
#
# Background service management features:
# - Proper starting and killing of background service.
# - proctitle definition.
# - SIGTERM and SIGHUP propagation.
# - SIGCHLD management. No zombie.
# - Child suicide on parent death.
#
# Two classes help manage multiple services: Service and ServicesManager.
#
# Each long running process must have it's own Service instance. Service class
# is used both to manage current process service as well as reference to
# manipulate long-runnning child process.
#
# ServicesManager class is kind of an init system for the main process. It is
# responsible of starting, killing and propagating signals to child processes.
# Service instance of main process service must have a reference to
# ServicesManager.
import logging
import os
import signal
import sys
from multiprocessing import Process
from time import sleep
from .errors import UserError
logger = logging.getLogger(__name__)
class Service(object):
# Manage long running process. This include setup, signal management and
# loop.
#
# There is two kind of services : main service and child services. Main
# service is responsible to propagate signals to children with
# ServicesManager.
def __init__(self, app, name=None, services=None, setproctitle=None):
self.app = app
self.name = name
self.logname = name or u'service'
# Must be None for children or ServicesManager instance for main
# service. Used to propagate signals. See reload() method.
self.services = services
self.setproctitle = setproctitle
self.parentpid = None
# Once the process is forked to run the service loop, we still use this
# object in parent process to manage the child process. So we flag here
# whether the service has forked in it's own process. Must be updated
# in parent process once the service is forked. See
# ServicesManager.start().
self.is_my_process = True
def __unicode__(self):
return self.logname
def __enter__(self):
self.sigchld = False
if self.services:
signal.signal(signal.SIGCHLD, self.sigchld_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
self.sighup = False
def __exit__(self, *a):
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def check_parent_running(self):
if self.parentpid is None:
# If no parentpid, we are the main service. We are running.
return True
try:
os.kill(self.parentpid, 0)
return True
except OSError:
return False
def sigchld_handler(self, *a):
self.sigchld = True
def sighup_handler(self, *a):
self.sighup = True
def sigterm_handler(self, *a):
logger.info(u"%s terminated.", self)
sys.exit(1)
def apply_config(self):
pass
def run(self):
if self.name and self.setproctitle:
self.setproctitle(self.name)
logger.info(u"Starting %s.", self)
self.setup()
try:
self.serve()
except KeyboardInterrupt:
logger.info(u"%s interrupted.", self)
sys.exit(1)
def serve(self):
with self:
logger.debug(u"Entering %s loop.", self)
while True:
if not self.check_parent_running():
logger.warn(
u"Parent process %d is dead. Committing suicide.",
self.parentpid)
sys.exit(1)
if self.sigchld:
self.sigchld = False
if self.services:
self.services.check()
if self.sighup:
self.sighup = False
self.reload()
self.serve1()
def reload(self):
self.app.reload()
if self.services:
self.services.reload()
def setup(self):
# This method is called once before looping to prepare the service:
# bind address, setup SSL, etc.
pass
def serve1(self):
# This method is called by the loop and must serve one request/task.
# This method should not block for too long waiting for work to be
# done. Reload is applied between two calls of this method.
raise NotImplementedError
class ServicesManager(object):
# Manage child services : starting in background, tracking PID, replicating
# signals, checking status, stopping and killing.
#
# Add a service with services_manager.add(Service(…)).
#
# As a context manager, services are started on enter and stopped-killed on
# exit.
def __init__(self):
self.processes = []
self.pid = os.getpid()
def __enter__(self):
self.start()
def __exit__(self, *a):
self.stop()
logger.debug(u"Waiting background services.")
sleep(0.125)
self.kill()
def add(self, service):
service.parentpid = self.pid
self.processes.append(
(service, Process(target=service.run, name=service.name)))
def start(self):
for service, process in self.processes:
process.start()
service.is_my_process = False
def reload(self):
for _, process in self.processes:
os.kill(process.pid, signal.SIGHUP)
def check(self):
for i in self.processes[:]:
_, p = i
logger.debug(u"Checking child %s (%s).", p.name, p.pid)
if not p.is_alive():
logger.debug("%s (%s) is dead.", p.name, p.pid)
self.processes.remove(i)
msg = u"Child %s (%s) died." % (p.name, p.pid)
raise UserError(msg)
def stop(self):
for _, process in self.processes:
process.terminate()
def kill(self, timeout=5, step=0.5):
while timeout > 0:
processes = [p for _, p in self.processes if p.is_alive()]
if not processes:
break
sleep(step)
timeout -= step
for _, process in processes:
logger.warning(u"Killing %s.", process)
os.kill(process.pid, signal.SIGKILL)
|
test_bz2.py
|
from test import test_support as support
from test.test_support import TESTFN, _4G, bigmemtest, import_module, findfile
import unittest
from cStringIO import StringIO
import os
import subprocess
import sys
try:
import threading
except ImportError:
threading = None
bz2 = import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = sys.platform not in ("win32", "os2emx", "riscos")
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT = 'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n'
DATA = 'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
DATA_CRLF = 'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80'
EMPTY_DATA = 'BZh9\x17rE8P\x90\x00\x00\x00\x00'
if has_cmdline_bunzip2:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
else:
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
class BZ2FileTest(BaseTest):
"Test BZ2File type miscellaneous methods."
def setUp(self):
self.filename = TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
def createTempFile(self, crlf=0):
with open(self.filename, "wb") as f:
if crlf:
data = self.DATA_CRLF
else:
data = self.DATA
f.write(data)
def testRead(self):
# "Test BZ2File.read()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
def testRead0(self):
# Test BBZ2File.read(0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(0), "")
def testReadChunk10(self):
# "Test BZ2File.read() in chunks of 10 bytes"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = ''
while 1:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testRead100(self):
# "Test BZ2File.read(100)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testReadLine(self):
# "Test BZ2File.readline()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
sio = StringIO(self.TEXT)
for line in sio.readlines():
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
# "Test BZ2File.readlines()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
sio = StringIO(self.TEXT)
self.assertEqual(bz2f.readlines(), sio.readlines())
def testIterator(self):
# "Test iter(BZ2File)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
sio = StringIO(self.TEXT)
self.assertEqual(list(iter(bz2f)), sio.readlines())
def testClosedIteratorDeadlock(self):
# "Test that iteration on a closed bz2file releases the lock."
# http://bugs.python.org/issue3309
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, bz2f.next)
# This call will deadlock of the above .next call failed to
# release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testXReadLines(self):
# "Test BZ2File.xreadlines()"
self.createTempFile()
bz2f = BZ2File(self.filename)
sio = StringIO(self.TEXT)
self.assertEqual(list(bz2f.xreadlines()), sio.readlines())
bz2f.close()
def testUniversalNewlinesLF(self):
# "Test BZ2File.read() with universal newlines (\\n)"
self.createTempFile()
bz2f = BZ2File(self.filename, "rU")
self.assertEqual(bz2f.read(), self.TEXT)
self.assertEqual(bz2f.newlines, "\n")
bz2f.close()
def testUniversalNewlinesCRLF(self):
# "Test BZ2File.read() with universal newlines (\\r\\n)"
self.createTempFile(crlf=1)
bz2f = BZ2File(self.filename, "rU")
self.assertEqual(bz2f.read(), self.TEXT)
self.assertEqual(bz2f.newlines, "\r\n")
bz2f.close()
def testWrite(self):
# "Test BZ2File.write()"
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
# "Test BZ2File.write() with chunks of 10 bytes"
with BZ2File(self.filename, "w") as bz2f:
n = 0
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteLines(self):
# "Test BZ2File.writelines()"
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
sio = StringIO(self.TEXT)
bz2f.writelines(sio.readlines())
# patch #1535500
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write("abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(IOError, bz2f.write, "a")
self.assertRaises(IOError, bz2f.writelines, ["a"])
def testSeekForward(self):
# "Test BZ2File.seek(150, 0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
# "Test BZ2File.seek(-150, 1)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsFromEnd(self):
# "Test BZ2File.seek(-150, 2)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekPostEnd(self):
# "Test BZ2File.seek(150000)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), "")
def testSeekPostEndTwice(self):
# "Test BZ2File.seek(150000) twice"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), "")
def testSeekPreStart(self):
# "Test BZ2File.seek(-150, 0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testOpenDel(self):
# "Test opening and deleting a file many times"
self.createTempFile()
for i in xrange(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
# "Test opening a nonexistent file"
self.assertRaises(IOError, BZ2File, "/non/existent")
def testModeU(self):
# Bug #1194181: bz2.BZ2File opened for write with mode "U"
self.createTempFile()
bz2f = BZ2File(self.filename, "U")
bz2f.close()
f = file(self.filename)
f.seek(0, 2)
self.assertEqual(f.tell(), len(self.DATA))
f.close()
def testBug1191043(self):
# readlines() for files containing no newline
data = 'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, ['Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, ['Test'])
def testContextProtocol(self):
# BZ2File supports the context management protocol
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1 // 0
except ZeroDivisionError:
pass
else:
self.fail("1 // 0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Using a BZ2File from several threads doesn't deadlock (issue #7205).
data = "1" * 2**20
nthreads = 10
with bz2.BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationReads(self):
# Issue #8397: mixed iteration and reads should be forbidden.
with bz2.BZ2File(self.filename, 'wb') as f:
# The internal buffer size is hard-wired to 8192 bytes, we must
# write out more than that for the test to stop half through
# the buffer.
f.write(self.TEXT * 100)
with bz2.BZ2File(self.filename, 'rb') as f:
next(f)
self.assertRaises(ValueError, f.read)
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
@unittest.skipIf(sys.platform == 'win32',
'test depends on being able to delete a still-open file,'
' which is not possible on Windows')
def testInitNonExistentFile(self):
# Issue #19878: Should not segfault when __init__ with non-existent
# file for the second time.
self.createTempFile()
# Test close():
with BZ2File(self.filename, "wb") as f:
self.assertRaises(IOError, f.__init__, "non-existent-file")
# Test object deallocation without call to close():
f = bz2.BZ2File(self.filename)
self.assertRaises(IOError, f.__init__, "non-existent-file")
del f
class BZ2CompressorTest(BaseTest):
def testCompress(self):
# "Test BZ2Compressor.compress()/flush()"
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
# "Test BZ2Compressor.compress()/flush() of empty string"
bz2c = BZ2Compressor()
data = bz2c.compress('')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
# "Test BZ2Compressor.compress()/flush() with chunks of 10 bytes"
bz2c = BZ2Compressor()
n = 0
data = ''
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
@bigmemtest(_4G, memuse=1.25)
def testBigmem(self, size):
text = "a" * size
bz2c = bz2.BZ2Compressor()
data = bz2c.compress(text) + bz2c.flush()
del text
text = self.decompress(data)
self.assertEqual(len(text), size)
self.assertEqual(text.strip("a"), "")
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
# "Test BZ2Decompressor.decompress()"
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
# "Test BZ2Decompressor.decompress() with chunks of 10 bytes"
bz2d = BZ2Decompressor()
text = ''
n = 0
while 1:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
# "Test BZ2Decompressor.decompress() with unused data"
bz2d = BZ2Decompressor()
unused_data = "this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
# "Calling BZ2Decompressor.decompress() after EOS must raise EOFError"
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, "anything")
self.assertRaises(EOFError, bz2d.decompress, "")
@bigmemtest(_4G, memuse=1.25)
def testBigmem(self, size):
# Issue #14398: decompression fails when output data is >=2GB.
if size < _4G:
self.skipTest("Test needs 5GB of memory to run.")
compressed = bz2.compress("a" * _4G)
text = bz2.BZ2Decompressor().decompress(compressed)
self.assertEqual(len(text), _4G)
self.assertEqual(text.strip("a"), "")
class FuncTest(BaseTest):
"Test module functions"
def testCompress(self):
# "Test compress() function"
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
# "Test compress() of empty string"
text = bz2.compress('')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
# "Test decompress() function"
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
# "Test decompress() function with empty string"
text = bz2.decompress("")
self.assertEqual(text, "")
def testDecompressToEmptyString(self):
# "Test decompress() of minimal bz2 data to empty string"
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, '')
def testDecompressIncomplete(self):
# "Test decompress() function with incomplete data"
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
@bigmemtest(_4G, memuse=1.25)
def testCompressBigmem(self, size):
text = "a" * size
data = bz2.compress(text)
del text
text = self.decompress(data)
self.assertEqual(len(text), size)
self.assertEqual(text.strip("a"), "")
@bigmemtest(_4G, memuse=1.25)
def testDecompressBigmem(self, size):
# Issue #14398: decompression fails when output data is >=2GB.
if size < _4G:
self.skipTest("Test needs 5GB of memory to run.")
compressed = bz2.compress("a" * _4G)
text = bz2.decompress(compressed)
self.assertEqual(len(text), _4G)
self.assertEqual(text.strip("a"), "")
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
FuncTest
)
support.reap_children()
if __name__ == '__main__':
test_main()
# vim:ts=4:sw=4
|
test_crud.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import os
import pickle
import pytz
import random
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from . import KIND, eventually, equals
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
USE_MEMCACHE = bool(os.environ.get("MEMCACHED_HOSTS"))
def _assert_contemporaneous(timestamp1, timestamp2, delta_margin=2):
delta_margin = datetime.timedelta(seconds=delta_margin)
assert delta_margin > abs(timestamp1 - timestamp2)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert cache_key in cache_dict
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_MEMCACHE, reason="Memcache is not configured")
def test_retrieve_entity_with_memcache(ds_entity, memcache_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
cache_key = global_cache_module.MemcacheCache._key(cache_key)
assert memcache_context.global_cache.client.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entities_in_parallel_nested(ds_entity):
"""Regression test for #357.
https://github.com/googleapis/python-ndb/issues/357
"""
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
entity3_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity3_id, foo=66, bar="route")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
key3 = ndb.Key(KIND, entity3_id)
@ndb.tasklet
def get_two_entities():
entity1, (entity2, entity3) = yield (
key1.get_async(),
[key2.get_async(), key3.get_async()],
)
raise ndb.Return(entity1, entity2, entity3)
entity1, entity2, entity3 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
assert isinstance(entity3, SomeKind)
assert entity3.foo == 66
assert entity3.bar == "route"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_repeated_local_structured_property(dispose_of, ds_client):
class Dog(ndb.Model):
name = ndb.StringProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.dogs == dogs
def test_get_by_id_with_compressed_repeated_local_structured_property(
client_context, dispose_of, ds_client
):
class Dog(ndb.Model):
name = ndb.TextProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
with client_context.new(legacy_data=True).use():
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
house_id = key.id()
dispose_of(key._key)
retrieved = House.get_by_id(house_id)
assert retrieved.dogs == dogs
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)}, KIND, entity_id, **{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
@pytest.mark.usefixtures("client_context")
def test_multiple_key_properties(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty(kind="Whatevs")
bar = ndb.KeyProperty(kind="Whatevs")
foo = ndb.Key("Whatevs", 123)
bar = ndb.Key("Whatevs", 321)
entity = SomeKind(foo=foo, bar=bar)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == bar
assert retrieved.foo != retrieved.bar
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert not cache_dict
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert cache_key in cache_dict
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert cache_key not in cache_dict
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, redis_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert redis_context.global_cache.redis.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert redis_context.global_cache.redis.get(cache_key) is None
@pytest.mark.skipif(not USE_MEMCACHE, reason="Memcache is not configured")
def test_insert_entity_with_memcache(dispose_of, memcache_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
cache_key = global_cache_module.MemcacheCache._key(cache_key)
assert memcache_context.global_cache.client.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert memcache_context.global_cache.client.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert memcache_context.global_cache.client.get(cache_key) is None
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def save_entity():
ndb.get_context().call_on_commit(commit_callback)
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
return key
key = ndb.transaction(save_entity)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
commit_callback.assert_called_once_with()
@pytest.mark.usefixtures("client_context")
def test_update_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def update_entity():
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
return key
key = ndb.transaction(update_entity)
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_parallel_transactions():
def task(delay):
@ndb.tasklet
def callback():
transaction = ndb.get_context().transaction
yield ndb.sleep(delay)
assert ndb.get_context().transaction == transaction
raise ndb.Return(transaction)
return callback
future1 = ndb.transaction_async(task(0.1))
future2 = ndb.transaction_async(task(0.06))
ndb.wait_all((future1, future2))
assert future1.get_result() != future2.get_result()
@pytest.mark.usefixtures("client_context")
def test_delete_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert cache_key in cache_dict
assert key.delete() is None
assert cache_key not in cache_dict
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert cache_dict[cache_key][0] == b"0"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_delete_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
assert key.get().foo == 42
assert redis_context.global_cache.redis.get(cache_key) is not None
assert key.delete() is None
assert redis_context.global_cache.redis.get(cache_key) is None
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert redis_context.global_cache.redis.get(cache_key) == b"0"
@pytest.mark.skipif(not USE_MEMCACHE, reason="Memcache is not configured")
def test_delete_entity_with_memcache(ds_entity, memcache_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
cache_key = global_cache_module.MemcacheCache._key(cache_key)
assert key.get().foo == 42
assert memcache_context.global_cache.client.get(cache_key) is not None
assert key.delete() is None
assert memcache_context.global_cache.client.get(cache_key) is None
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert memcache_context.global_cache.client.get(cache_key) == b"0"
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
assert key.get().foo == 42 # not deleted until commit
ndb.transaction(delete_entity)
assert key.get() is None
def test_delete_entity_in_transaction_with_global_cache(client_context, ds_entity):
"""Regression test for #426
https://github.com/googleapis/python-ndb/issues/426
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use():
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
ndb.transaction(key.delete)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction_then_rollback(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
raise Exception("Spurious error")
with pytest.raises(Exception):
ndb.transaction(delete_entity)
assert key.get().foo == 42
@pytest.mark.usefixtures("client_context")
def test_allocate_ids():
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(5)
assert len(keys) == 5
for key in keys:
assert key.id()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_get_by_id(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
entity = SomeKind.get_by_id(entity_id)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
ds_entity(KIND, name, foo=42)
entity = SomeKind.get_or_insert(name, foo=21)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_insert(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
entity = SomeKind.get_or_insert(name, foo=21)
dispose_of(entity._key._key)
assert entity.foo == 21
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_in_transaction(dispose_of):
"""Regression test for #433
https://github.com/googleapis/python-ndb/issues/433
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
@ndb.transactional()
def do_the_thing(foo):
entity = SomeKind.get_or_insert(name, foo=foo)
return entity
entity = do_the_thing(42)
dispose_of(entity._key._key)
assert entity.foo == 42
entity = do_the_thing(21)
assert entity.foo == 42
def test_get_by_id_default_namespace_when_context_namespace_is_other(
client_context, dispose_of, other_namespace
):
"""Regression test for #535.
https://github.com/googleapis/python-ndb/issues/535
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity1 = SomeKind(foo=1, id="x", namespace="")
entity1.put()
dispose_of(entity1.key._key)
with client_context.new(namespace=other_namespace).use():
result = SomeKind.get_by_id("x", namespace="")
assert result is not None
assert result.foo == 1
def test_get_or_insert_default_namespace_when_context_namespace_is_other(
client_context, dispose_of, other_namespace
):
"""Regression test for #535.
https://github.com/googleapis/python-ndb/issues/535
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
with client_context.new(namespace=other_namespace).use():
SomeKind.get_or_insert("x", namespace="", foo=1)
result = SomeKind.get_by_id("x", namespace="")
assert result is not None
assert result.foo == 1
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_structured_property(dispose_of):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
def test_insert_entity_with_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
ds_entity = ds_client.get(key._key)
assert ds_entity["foo"] == 42
assert ds_entity["bar.one"] == "hi"
assert ds_entity["bar.two"] == "mom"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, **{"foo": 42, "bar.one": "hi", "bar.two": "mom"})
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_repeated_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND,
entity_id,
**{"foo": 42, "bar.one": ["hi", "hello"], "bar.two": ["mom", "dad"]}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "hello"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_insert_expando(dispose_of):
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
entity = SomeKind(foo=42)
entity.expando_prop = "exp-value"
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.expando_prop == "exp-value"
@pytest.mark.usefixtures("client_context")
def test_insert_polymodel(dispose_of):
class Animal(ndb.PolyModel):
one = ndb.StringProperty()
class Feline(Animal):
two = ndb.StringProperty()
class Cat(Feline):
three = ndb.StringProperty()
entity = Cat(one="hello", two="dad", three="i'm in jail")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved, Animal)
assert isinstance(retrieved, Cat)
assert retrieved.one == "hello"
assert retrieved.two == "dad"
assert retrieved.three == "i'm in jail"
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
entity = SomeKind(foo="bar")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.created_at, datetime.datetime)
assert isinstance(retrieved.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property_with_tz(dispose_of):
"""Regression test for #517
https://github.com/googleapis/python-ndb/issues/517
"""
class SomeKind(ndb.Model):
created_at = ndb.DateTimeProperty(auto_now_add=True, tzinfo=pytz.utc)
updated_at = ndb.DateTimeProperty(auto_now=True, tzinfo=pytz.utc)
now = datetime.datetime.now(pytz.utc)
entity = SomeKind()
key = entity.put()
dispose_of(key._key)
_assert_contemporaneous(entity.created_at, now)
_assert_contemporaneous(entity.updated_at, now)
retrieved = key.get()
_assert_contemporaneous(retrieved.created_at, now)
_assert_contemporaneous(retrieved.updated_at, now)
@pytest.mark.usefixtures("client_context")
def test_insert_datetime_property_with_tz(dispose_of):
"""Regression test for #517
https://github.com/googleapis/python-ndb/issues/517
"""
class SomeKind(ndb.Model):
alarm1 = ndb.DateTimeProperty(tzinfo=pytz.utc)
alarm2 = ndb.DateTimeProperty(tzinfo=pytz.utc)
now = datetime.datetime.now(pytz.utc)
entity = SomeKind(
alarm1=now,
alarm2=datetime.datetime.utcnow(), # naive
)
key = entity.put()
dispose_of(key._key)
_assert_contemporaneous(entity.alarm1, now)
_assert_contemporaneous(entity.alarm2, now)
retrieved = key.get()
_assert_contemporaneous(retrieved.alarm1, now)
_assert_contemporaneous(retrieved.alarm2, now)
@pytest.mark.usefixtures("client_context")
def test_insert_nested_autonow_property(dispose_of):
class OtherKind(ndb.Model):
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
entity = SomeKind(other=OtherKind())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.other.created_at, datetime.datetime)
assert isinstance(retrieved.other.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_uninitialized_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty(required=True)
entity = SomeKind()
with pytest.raises(ndb.exceptions.BadValueError):
entity.put()
@mock.patch(
"google.cloud.ndb._datastore_api.make_call",
mock.Mock(side_effect=Exception("Datastore shouldn't get called.")),
)
def test_crud_without_datastore(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
context.set_datastore_policy(False) # Don't use Datastore
key = ndb.Key(KIND, entity_id)
SomeKind(foo=42, bar="none", baz="night", _key=key).put()
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
key.delete()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_computed_key_property(dispose_of):
"""Regression test for #284.
https://github.com/googleapis/python-ndb/issues/284
"""
class AModel(ndb.Model):
s_foo = ndb.StringProperty()
class BModel(ndb.Model):
s_bar = ndb.StringProperty()
key_a = ndb.KeyProperty(kind="AModel", indexed=True)
class CModel(ndb.Model):
s_foobar = ndb.StringProperty()
key_b = ndb.KeyProperty(kind="BModel", indexed=True)
key_a = ndb.ComputedProperty( # Issue here
lambda self: self.key_b.get().key_a if self.key_b else None,
)
key_a = AModel(s_foo="test").put()
dispose_of(key_a._key)
key_b = BModel(s_bar="test", key_a=key_a).put()
dispose_of(key_b._key)
key_c = CModel(s_foobar="test", key_b=key_b).put()
dispose_of(key_c._key)
entity = key_c.get()
assert entity.key_a == key_a
assert entity.key_b == key_b
@pytest.mark.usefixtures("client_context")
def test_user_property(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
user = ndb.User("somebody@example.com", "gmail.com")
entity = SomeKind(user=user)
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.user.email() == "somebody@example.com"
assert retreived.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_user_property_different_user_class(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
class User(object):
def email(self):
return "somebody@example.com"
def auth_domain(self):
return "gmail.com"
def user_id(self):
return None
entity = SomeKind(user=User())
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.user.email() == "somebody@example.com"
assert retreived.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_repeated_empty_strings(dispose_of):
"""Regression test for issue # 300.
https://github.com/googleapis/python-ndb/issues/300
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty(repeated=True)
entity = SomeKind(foo=["", ""])
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.foo == ["", ""]
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
@pytest.mark.usefixtures("redis_context")
def test_multi_get_weirdness_with_redis(dispose_of):
"""Regression test for issue #294.
https://github.com/googleapis/python-ndb/issues/294
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
objects = [SomeKind(foo=str(i)) for i in range(10)]
keys = ndb.put_multi(objects)
for key in keys:
dispose_of(key._key)
ndb.get_multi(keys)
one_object = random.choice(keys).get()
one_object.foo = "CHANGED"
one_object.put()
objects_upd = ndb.get_multi(keys)
keys_upd = [obj.key for obj in objects_upd]
assert len(keys_upd) == len(keys)
assert len(set(keys_upd)) == len(set(keys))
assert set(keys_upd) == set(keys)
@pytest.mark.usefixtures("client_context")
def test_multi_with_lots_of_keys(dispose_of):
"""Regression test for issue #318.
https://github.com/googleapis/python-ndb/issues/318
"""
N = 1001
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
assert len(keys) == N
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
ndb.delete_multi(keys)
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_allocate_a_lot_of_keys():
N = 1001
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(N)
assert len(keys) == N
@pytest.mark.usefixtures("client_context")
def test_delete_multi_with_transactional(dispose_of):
"""Regression test for issue #271
https://github.com/googleapis/python-ndb/issues/271
"""
N = 10
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
@ndb.transactional()
def delete_them(entities):
ndb.delete_multi([entity.key for entity in entities])
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
assert delete_them(entities) is None
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_compressed_text_property(dispose_of, ds_client):
"""Regression test for #277
https://github.com/googleapis/python-ndb/issues/277
"""
class SomeKind(ndb.Model):
foo = ndb.TextProperty(compressed=True)
entity = SomeKind(foo="Compress this!")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "Compress this!"
ds_entity = ds_client.get(key._key)
assert zlib.decompress(ds_entity["foo"]) == b"Compress this!"
def test_insert_entity_with_repeated_local_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #326
https://github.com/googleapis/python-ndb/issues/326
"""
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.LocalStructuredProperty(OtherKind, repeated=True)
with client_context.new(legacy_data=True).use():
entity = SomeKind(
foo=42,
bar=[
OtherKind(one="hi", two="mom"),
OtherKind(one="and", two="dad"),
],
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "and"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
def test_insert_structured_property_with_unindexed_subproperty_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #341
https://github.com/googleapis/python-ndb/issues/341
"""
class OtherKind(ndb.Model):
data = ndb.BlobProperty(indexed=False)
class SomeKind(ndb.Model):
entry = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(entry=OtherKind(data=b"01234567890" * 1000))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.entry, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_serialization(dispose_of):
"""Regression test for #384
https://github.com/googleapis/python-ndb/issues/384
"""
# This is needed because pickle can't serialize local objects
global SomeKind, OtherKind
class OtherKind(ndb.Model):
foo = ndb.IntegerProperty()
@classmethod
def _get_kind(cls):
return "OtherKind"
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
@classmethod
def _get_kind(cls):
return "SomeKind"
entity = SomeKind(other=OtherKind(foo=1, namespace="Test"), namespace="Test")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.other.key is None or retrieved.other.key.id() is None
entity = pickle.loads(pickle.dumps(retrieved))
assert entity.other.foo == 1
@pytest.mark.usefixtures("client_context")
def test_custom_validator(dispose_of, ds_client):
"""New feature test for #252
https://github.com/googleapis/python-ndb/issues/252
"""
def date_validator(prop, value):
return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(validator=date_validator)
entity = SomeKind(foo="2020-08-08 1:02:03")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2020, 8, 8, 1, 2, 3)
def test_cache_returns_entity_if_available(dispose_of, client_context):
"""Regression test for #441
https://github.com/googleapis/python-ndb/issues/441
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
somekind = SomeKind(foo=1)
key = somekind.put()
dispose_of(key._key)
query = ndb.Query(kind="SomeKind")
ourkind = query.get()
ourkind.bar = "confusing"
assert somekind.bar == "confusing"
def test_cache_off_new_entity_created(dispose_of, client_context):
"""Regression test for #441
https://github.com/googleapis/python-ndb/issues/441
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
somekind = SomeKind(foo=1)
key = somekind.put()
dispose_of(key._key)
query = ndb.Query(kind="SomeKind")
ourkind = query.get()
ourkind.bar = "confusing"
assert somekind.bar is None
@pytest.mark.usefixtures("client_context")
def test_local_structured_property_with_polymodel(dispose_of):
"""Regression test for #481
https://github.com/googleapis/python-ndb/issues/481
"""
class Base(ndb.PolyModel):
pass
class SubKind(Base):
foo = ndb.StringProperty()
class Container(ndb.Model):
child = ndb.LocalStructuredProperty(Base)
entity = Container(child=SubKind(foo="bar"))
key = entity.put()
dispose_of(key._key)
entity = entity.key.get()
assert entity.child.foo == "bar"
@pytest.mark.usefixtures("client_context")
def test_local_structured_property_with_inheritance(dispose_of):
"""Regression test for #523
https://github.com/googleapis/python-ndb/issues/523
"""
class Base(ndb.Model):
pass
class SubKind(Base):
foo = ndb.StringProperty()
class Container(ndb.Model):
children = ndb.LocalStructuredProperty(Base, repeated=True)
entity = Container()
subkind = SubKind(foo="bar")
entity.children.append(subkind)
key = entity.put()
dispose_of(key._key)
entity = entity.key.get()
assert isinstance(entity.children[0], Base)
def test_structured_property_with_nested_compressed_json_property_using_legacy_format(
client_context, dispose_of
):
"""Regression test for #602
https://github.com/googleapis/python-ndb/issues/602
"""
class OtherKind(ndb.Model):
data = ndb.JsonProperty(compressed=True)
class SomeKind(ndb.Model):
sub_model = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
model = SomeKind(sub_model=OtherKind(data={"test": 1}))
key = model.put()
dispose_of(key._key)
assert key.get().sub_model.data["test"] == 1
|
object_storage_service_benchmark.py
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import enum
import glob
import json
import logging
import os
import posixpath
import re
import threading
import time
import uuid
from absl import flags
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.sample import PercentileCalculator # noqa
import six
from six.moves import range
from six.moves import zip
flags.DEFINE_enum('storage', providers.GCP,
[providers.GCP, providers.AWS,
providers.AZURE, providers.OPENSTACK],
'storage provider (GCP/AZURE/AWS/OPENSTACK) to use.')
flags.DEFINE_string('object_storage_region', None,
'Storage region for object storage benchmark.')
flags.DEFINE_string('object_storage_gcs_multiregion', None,
'Storage multiregion for GCS in object storage benchmark.')
flags.DEFINE_string('object_storage_storage_class', None,
'Storage class to use in object storage benchmark.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace',
'api_multistream', 'api_multistream_writes',
'api_multistream_reads'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations. \n'
'api_multistream: runs API-based benchmarking with multiple '
'upload/download streams.\n'
'api_multistream_writes: runs API-based benchmarking with '
'multiple upload streams.')
flags.DEFINE_string('object_storage_bucket_name', None,
'If set, the bucket will be created with this name')
flags.DEFINE_boolean('object_storage_apply_region_suffix_to_bucket_name', False,
'If set, the region will be appended to the bucket name.')
flags.DEFINE_enum('cli_test_size', 'normal',
['normal', 'large'],
'size of the cli tests. Normal means a mixture of various \n'
'object sizes up to 32MiB (see '
'data/cloud-storage-workload.sh). \n'
'Large means all objects are of at least 1GiB.')
flags.DEFINE_integer('object_storage_multistream_objects_per_stream', 1000,
'Number of objects to send and/or receive per stream. '
'Only applies to the api_multistream scenario.',
lower_bound=1)
flag_util.DEFINE_yaml('object_storage_object_sizes', '1KB',
'Size of objects to send and/or receive. Only applies to '
'the api_multistream scenario. Examples: 1KB, '
'{1KB: 50%, 10KB: 50%}')
flags.DEFINE_integer('object_storage_streams_per_vm', 10,
'Number of independent streams per VM. Only applies to '
'the api_multistream scenario.',
lower_bound=1)
flags.DEFINE_integer('object_storage_list_consistency_iterations', 200,
'Number of iterations to perform for the api_namespace '
'list consistency benchmark. This flag is mainly for '
'regression testing in the benchmarks. Reduce the number '
'to shorten the execution time of the api_namespace '
'scenario. However, to get useful metrics from the '
'api_namespace scenario, a high number of iterations '
'should be used (>=200).')
flags.DEFINE_enum('object_storage_object_naming_scheme', 'sequential_by_stream',
['sequential_by_stream',
'approximately_sequential'],
'How objects will be named. Only applies to the '
'api_multistream benchmark. '
'sequential_by_stream: object names from each stream '
'will be sequential, but different streams will have '
'different name prefixes. '
'approximately_sequential: object names from all '
'streams will roughly increase together.')
flags.DEFINE_string('object_storage_objects_written_file_prefix', None,
'If specified, the bucket and all of the objects will not '
'be deleted, and the list of object names will be written '
'to a file with the specified prefix in the following '
'format: <bucket>/<object>. This prefix can be passed to '
'this benchmark in a later run via via the '
'object_storage_read_objects_prefix flag. Only valid for '
'the api_multistream and api_multistream_writes scenarios. '
'The filename is appended with the date and time so that '
'later runs can be given a prefix and a minimum age of '
'objects. The later run will then use the oldest objects '
'available or fail if there is no file with an old enough '
'date. The prefix is also appended with the region so that '
'later runs will read objects from the same region.')
flags.DEFINE_string('object_storage_read_objects_prefix', None,
'If specified, no new bucket or objects will be created. '
'Instead, the benchmark will read the objects listed in '
'a file with the specified prefix that was written some '
'number of hours before (as specifed by '
'object_storage_read_objects_min_hours). Only valid for '
'the api_multistream_reads scenario.')
flags.DEFINE_integer('object_storage_read_objects_min_hours', 72, 'The minimum '
'number of hours from which to read objects that were '
'written on a previous run. Used in combination with '
'object_storage_read_objects_prefix.')
flags.DEFINE_boolean('object_storage_dont_delete_bucket', False,
'If True, the storage bucket won\'t be deleted. Useful '
'for running the api_multistream_reads scenario multiple '
'times against the same objects.')
flags.DEFINE_string('object_storage_worker_output', None,
'If set, the worker threads\' output will be written to the'
'path provided.')
flags.DEFINE_float('object_storage_latency_histogram_interval', None,
'If set, a latency histogram sample will be created with '
'buckets of the specified interval in seconds. Individual '
'histogram samples are created for each different object '
'size in the distribution, because it is easy to aggregate '
'the histograms during post-processing, but impossible to '
'go in the opposite direction.')
flags.DEFINE_boolean(
'record_individual_latency_samples', False,
'If set, record the latency of each download and upload '
'in its own sample.')
flags.DEFINE_boolean(
'object_storage_bulk_delete', False,
'If true, deletes objects with bulk delete client request and records '
'average latency per object. Otherwise, deletes one object per request '
'and records individual delete latency'
)
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': False,
'num_machines': 1}
BENCHMARK_NAME = 'object_storage_service'
BENCHMARK_CONFIG = """
object_storage_service:
description: >
Object/blob storage service benchmarks. Specify
--object_storage_scenario
to select a set of sub-benchmarks to run. default is all.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
flags:
gcloud_scopes: https://www.googleapis.com/auth/devstorage.read_write
"""
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
LARGE_DATA_SIZE_IN_BYTES = 3 * 1024 * 1024 * 1024
LARGE_DATA_SIZE_IN_MBITS = 8 * LARGE_DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
API_TEST_SCRIPTS_DIR = 'object_storage_api_test_scripts'
# Files that will be sent to the remote VM as a package for API test script.
API_TEST_SCRIPT_PACKAGE_FILES = [
'__init__.py', 'object_storage_interface.py', 'azure_flags.py',
'gcs_flags.py', 's3_flags.py'
]
SCRIPT_DIR = '/tmp/run'
REMOTE_PACKAGE_DIR = posixpath.join(SCRIPT_DIR, 'providers')
DOWNLOAD_DIRECTORY = posixpath.join(SCRIPT_DIR, 'temp')
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p0.1', 'p1', 'p5', 'p10', 'p50', 'p90', 'p95', 'p99',
'p99.9', 'average', 'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 100
LARGE_CLI_TEST_ITERATION_COUNT = 20
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
# GCS has special region handling until we can remove it :(
DEFAULT_GCS_MULTIREGION = 'us'
# Keys for flag names and metadata values
OBJECT_STORAGE_REGION = 'object_storage_region'
REGIONAL_BUCKET_LOCATION = 'regional_bucket_location'
OBJECT_STORAGE_GCS_MULTIREGION = 'object_storage_gcs_multiregion'
GCS_MULTIREGION_LOCATION = 'gcs_multiregion_location'
DEFAULT = 'default'
# This accounts for the overhead of running RemoteCommand() on a VM.
MULTISTREAM_DELAY_PER_VM = 5.0 * units.second
# We wait this long for each stream. Note that this is multiplied by
# the number of streams per VM, not the total number of streams.
MULTISTREAM_DELAY_PER_STREAM = 0.1 * units.second
# And add a constant factor for PKB-side processing
MULTISTREAM_DELAY_CONSTANT = 10.0 * units.second
# Max number of delete operations per second
MULTISTREAM_DELETE_OPS_PER_SEC = 3500
# The multistream write benchmark writes a file in the VM's /tmp with
# the objects it has written, which is used by the multistream read
# benchmark. This is the filename.
OBJECTS_WRITTEN_FILE = 'pkb-objects-written'
# If the gap between different stream starts and ends is above a
# certain proportion of the total time, we log a warning because we
# are throwing out a lot of information. We also put the warning in
# the sample metadata.
MULTISTREAM_STREAM_GAP_THRESHOLD = 0.2
# The API test script uses different names for providers than this
# script :(
STORAGE_TO_API_SCRIPT_DICT = {
providers.GCP: 'GCS',
providers.AWS: 'S3',
providers.AZURE: 'AZURE'}
_SECONDS_PER_HOUR = 60 * 60
class MultistreamOperationType(enum.Enum):
"""MultiStream Operations supported by object_storage_api_tests script."""
download = 1
upload = 2
delete = 3
bulk_delete = 4
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
class ColdDataError(Exception):
"""Exception indicating that the cold object data does not exist."""
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _GetClientLibVersion(vm, library_name):
"""This function returns the version of client lib installed on a vm.
Args:
vm: the VM to get the client lib version from.
library_name: the name of the client lib.
Returns:
The version string of the client.
"""
version, _ = vm.RemoteCommand('pip3 show %s |grep Version' % library_name)
logging.info('%s client lib version is: %s', library_name, version)
return version
def MultiThreadStartDelay(num_vms, threads_per_vm):
"""Find how long in the future we can simultaneously start threads on VMs.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
A units.Quantity of time such that if we want to start
threads_per_vm threads on num_vms VMs, we can start the threads
sequentially, tell each of them to sleep for this number of
seconds, and we expect that we will be able to start the last
thread before the delay has finished.
"""
return (
MULTISTREAM_DELAY_CONSTANT +
MULTISTREAM_DELAY_PER_VM * num_vms +
MULTISTREAM_DELAY_PER_STREAM * threads_per_vm)
def MultiThreadDeleteDelay(num_vms, threads_per_vm):
"""Calculates delay time between delete operation.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
float. Delay time in seconds based on number of vms and threads and the
maximum number of delete operations per second.
"""
return (num_vms * threads_per_vm) / (MULTISTREAM_DELETE_OPS_PER_SEC)
def _ProcessMultiStreamResults(start_times, latencies, sizes, operation,
all_sizes, results, metadata=None):
"""Read and process results from the api_multistream worker process.
Results will be reported per-object size and combined for all
objects.
Args:
start_times: a list of numpy arrays. Operation start times, as
POSIX timestamps.
latencies: a list of numpy arrays. Operation durations, in seconds.
sizes: a list of numpy arrays. Object sizes used in each
operation, in bytes.
operation: 'upload' or 'download'. The operation the results are from.
all_sizes: a sequence of integers. all object sizes in the
distribution used, in bytes.
results: a list to append Sample objects to.
metadata: dict. Base sample metadata
"""
num_streams = FLAGS.object_storage_streams_per_vm * FLAGS.num_vms
assert len(start_times) == num_streams
assert len(latencies) == num_streams
assert len(sizes) == num_streams
if metadata is None:
metadata = {}
metadata['num_streams'] = num_streams
metadata['objects_per_stream'] = (
FLAGS.object_storage_multistream_objects_per_stream)
metadata['object_naming'] = FLAGS.object_storage_object_naming_scheme
min_num_records = min((len(start_time) for start_time in start_times))
num_records = sum((len(start_time) for start_time in start_times))
logging.info('Processing %s total operation records', num_records)
stop_times = [start_time + latency
for start_time, latency in zip(start_times, latencies)]
last_start_time = max((start_time[0] for start_time in start_times))
first_stop_time = min((stop_time[-1] for stop_time in stop_times))
# Compute how well our synchronization worked
first_start_time = min((start_time[0] for start_time in start_times))
last_stop_time = max((stop_time[-1] for stop_time in stop_times))
start_gap = last_start_time - first_start_time
stop_gap = last_stop_time - first_stop_time
if ((start_gap + stop_gap) / (last_stop_time - first_start_time) <
MULTISTREAM_STREAM_GAP_THRESHOLD):
logging.info(
'First stream started %s seconds before last stream started', start_gap)
logging.info(
'Last stream ended %s seconds after first stream ended', stop_gap)
else:
logging.warning(
'Difference between first and last stream start/end times was %s and '
'%s, which is more than %s of the benchmark time %s.',
start_gap, stop_gap, MULTISTREAM_STREAM_GAP_THRESHOLD,
(last_stop_time - first_start_time))
metadata['stream_gap_above_threshold'] = True
# Find the indexes in each stream where all streams are active,
# following Python's [inclusive, exclusive) index convention.
active_start_indexes = np.full(num_streams, 0)
for index, start_time in enumerate(start_times):
for i in range(len(start_time)):
if start_time[i] >= last_start_time:
active_start_indexes[index] = i
break
active_stop_indexes = np.full(num_streams, min_num_records)
for index, stop_time in enumerate(stop_times):
for i in range(len(stop_time) - 1, -1, -1):
if stop_time[i] <= first_stop_time:
active_stop_indexes[index] = i + 1
break
active_latencies = [
latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
active_sizes = [
sizes[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
all_active_latencies = np.concatenate(active_latencies)
all_active_sizes = np.concatenate(active_sizes)
# Don't publish the full distribution in the metadata because doing
# so might break regexp-based parsers that assume that all metadata
# values are simple Python objects. However, do add an
# 'object_size_B' metadata field even for the full results because
# searching metadata is easier when all records with the same metric
# name have the same set of metadata fields.
distribution_metadata = metadata.copy()
if len(all_sizes) == 1:
distribution_metadata['object_size_B'] = all_sizes[0]
else:
distribution_metadata['object_size_B'] = 'distribution'
latency_prefix = 'Multi-stream %s latency' % operation
logging.info('Processing %s multi-stream %s results for the full '
'distribution.', len(all_active_latencies), operation)
_AppendPercentilesToResults(
results,
all_active_latencies,
latency_prefix,
LATENCY_UNIT,
distribution_metadata)
# Publish by-size and full-distribution stats even if there's only
# one size in the distribution, because it simplifies postprocessing
# of results.
for size in all_sizes:
this_size_metadata = metadata.copy()
this_size_metadata['object_size_B'] = size
logging.info('Processing multi-stream %s results for object size %s',
operation, size)
_AppendPercentilesToResults(
results,
all_active_latencies[all_active_sizes == size],
latency_prefix,
LATENCY_UNIT,
this_size_metadata)
# Record samples for individual downloads and uploads if requested.
if FLAGS.record_individual_latency_samples:
for latency in all_active_latencies[all_active_sizes == size]:
results.append(
sample.Sample('%s individual' % latency_prefix, latency,
LATENCY_UNIT, this_size_metadata))
# Build the object latency histogram if user requested it
if FLAGS.object_storage_latency_histogram_interval and any(
size in x for x in sizes):
histogram_interval = FLAGS.object_storage_latency_histogram_interval
hist_latencies = [[l for l, s in zip(*w_l_s) if s == size]
for w_l_s in zip(latencies, sizes)]
max_latency = max([max(l) for l in hist_latencies])
# Note that int() floors for us
num_histogram_buckets = int(max_latency / histogram_interval) + 1
histogram_buckets = [0 for _ in range(num_histogram_buckets)]
for worker_latencies in hist_latencies:
for latency in worker_latencies:
# Note that int() floors for us
histogram_buckets[int(latency / histogram_interval)] += 1
histogram_str = ','.join([str(c) for c in histogram_buckets])
histogram_metadata = this_size_metadata.copy()
histogram_metadata['interval'] = histogram_interval
histogram_metadata['histogram'] = histogram_str
results.append(sample.Sample(
'Multi-stream %s latency histogram' % operation,
0.0, 'histogram', metadata=histogram_metadata))
# Throughput metrics
total_active_times = [np.sum(latency) for latency in active_latencies]
active_durations = [stop_times[i][active_stop_indexes[i] - 1] -
start_times[i][active_start_indexes[i]]
for i in range(num_streams)]
total_active_sizes = [np.sum(size) for size in active_sizes]
# 'net throughput (with gap)' is computed by taking the throughput
# for each stream (total # of bytes transmitted / (stop_time -
# start_time)) and then adding the per-stream throughputs. 'net
# throughput' is the same, but replacing (stop_time - start_time)
# with the sum of all of the operation latencies for that thread, so
# we only divide by the time that stream was actually transmitting.
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput',
np.sum((size / active_time * 8
for size, active_time
in zip(total_active_sizes, total_active_times))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (with gap)',
np.sum((size / duration * 8
for size, duration in zip(total_active_sizes, active_durations))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (simplified)',
sum([np.sum(size) for size in sizes]) /
(last_stop_time - first_start_time) * 8,
'bit / second', metadata=distribution_metadata))
# QPS metrics
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (any stream active)',
num_records / (last_stop_time - first_start_time), 'operation / second',
metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (all streams active)',
len(all_active_latencies) / (first_stop_time - last_start_time),
'operation / second', metadata=distribution_metadata))
# Statistics about benchmarking overhead
gap_time = sum((active_duration - active_time
for active_duration, active_time
in zip(active_durations, total_active_times)))
results.append(sample.Sample(
'Multi-stream ' + operation + ' total gap time',
gap_time, 'second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' gap time proportion',
gap_time / (first_stop_time - last_start_time) * 100.0,
'percent', metadata=distribution_metadata))
def _DistributionToBackendFormat(dist):
"""Convert an object size distribution to the format needed by the backend.
Args:
dist: a distribution, given as a dictionary mapping size to
frequency. Size will be a string with a quantity and a
unit. Frequency will be a percentage, including a '%'
character. dist may also be a string, in which case it represents
a single object size which applies to 100% of objects.
Returns:
A dictionary giving an object size distribution. Sizes will be
integers representing bytes. Frequencies will be floating-point
numbers in [0,100], representing percentages.
Raises:
ValueError if dist is not a valid distribution.
"""
if isinstance(dist, dict):
val = {flag_util.StringToBytes(size):
flag_util.StringToRawPercent(frequency)
for size, frequency in six.iteritems(dist)}
else:
# We allow compact notation for point distributions. For instance,
# '1KB' is an abbreviation for '{1KB: 100%}'.
val = {flag_util.StringToBytes(dist): 100.0}
# I'm requiring exact addition to 100, which can always be satisfied
# with integer percentages. If we want to allow general decimal
# percentages, all we have to do is replace this equality check with
# approximate equality.
if sum(six.itervalues(val)) != 100.0:
raise ValueError("Frequencies in %s don't add to 100%%!" % dist)
return val
class APIScriptCommandBuilder(object):
"""Builds command lines for the API test script.
Attributes:
test_script_path: the path to the API test script on the remote machine.
storage: the storage provider to use, in the format expected by
the test script.
service: the ObjectStorageService object corresponding to the
storage provider.
"""
def __init__(self, test_script_path, storage, service):
self.test_script_path = test_script_path
self.storage = storage
self.service = service
def BuildCommand(self, args):
"""Build a command string for the API test script.
Args:
args: a list of strings. These will become space-separated
arguments to the test script.
Returns:
A string that can be passed to vm.RemoteCommand.
"""
cmd_parts = [
self.test_script_path,
'--storage_provider=%s' % self.storage
] + args + self.service.APIScriptArgs()
if FLAGS.object_storage_storage_class is not None:
cmd_parts += ['--object_storage_class',
FLAGS.object_storage_storage_class]
return ' '.join(cmd_parts)
class UnsupportedProviderCommandBuilder(APIScriptCommandBuilder):
"""A dummy command builder for unsupported providers.
When a provider isn't supported by the API test script yet, we
create this command builder for them. It will let us run the CLI
benchmark on that provider, but if the user tries to run an API
benchmark, it will throw an error.
Attributes:
provider: the name of the unsupported provider.
"""
def __init__(self, provider):
self.provider = provider
def BuildCommand(self, args):
raise NotImplementedError('API tests are not supported on provider %s.' %
self.provider)
def OneByteRWBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for small object latency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
one_byte_rw_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=OneByteRW'])
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s', raw_result)
for up_and_down in ([
MultistreamOperationType.upload, MultistreamOperationType.download
]):
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
def SingleStreamThroughputBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for large object throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
single_stream_throughput_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=SingleStreamThroughput'])
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s', raw_result)
for up_and_down in [
MultistreamOperationType.upload, MultistreamOperationType.download
]:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if not result_string:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
def ListConsistencyBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for bucket list consistency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
list_consistency_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--iterations=%d' % FLAGS.object_storage_list_consistency_iterations,
'--scenario=ListConsistency'])
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s', raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if not result_string:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
results.append(sample.Sample(
metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker's input includes
overlapping operations, or operations that don't move forward in
time, or if the input list isn't in stream number order.
"""
start_times = []
latencies = []
sizes = []
for worker_out in output:
json_out = json.loads(worker_out)
for stream in json_out:
assert len(stream['start_times']) == len(stream['latencies'])
assert len(stream['latencies']) == len(stream['sizes'])
start_times.append(np.asarray(stream['start_times'], dtype=np.float64))
latencies.append(np.asarray(stream['latencies'], dtype=np.float64))
sizes.append(np.asarray(stream['sizes'], dtype=np.int64))
return start_times, latencies, sizes
def _RunMultiStreamProcesses(vms, command_builder, cmd_args, streams_per_vm):
"""Runs all of the multistream read or write processes and doesn't return
until they complete.
Args:
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
cmd_args: arguments for the command_builder.
streams_per_vm: number of threads per vm.
"""
output = [None] * len(vms)
def RunOneProcess(vm_idx):
logging.info('Running on VM %s.', vm_idx)
cmd = command_builder.BuildCommand(cmd_args + [
'--stream_num_start=%s' % (vm_idx * streams_per_vm),
'--vm_id=%s' % vm_idx
])
out, _ = vms[vm_idx].RobustRemoteCommand(cmd, should_log=False)
output[vm_idx] = out
# Each vm/process has a thread managing it.
threads = [
threading.Thread(target=RunOneProcess, args=(vm_idx,))
for vm_idx in range(len(vms))]
for thread in threads:
thread.start()
logging.info('Started %s processes.', len(vms))
# Wait for the threads to finish
for thread in threads:
thread.join()
logging.info('All processes complete.')
return output
def _DatetimeNow():
"""Returns datetime.datetime.now()."""
return datetime.datetime.now()
def _ColdObjectsWrittenFilename():
"""Generates a name for the objects_written_file.
Returns:
The name of the objects_written_file if it should be created, or None.
"""
if FLAGS.object_storage_objects_written_file_prefix:
# Note this format is required by _ColdObjectsWrittenFileAgeHours.
datetime_suffix = _DatetimeNow().strftime('%Y%m%d-%H%M')
return '%s-%s-%s-%s' % (
FLAGS.object_storage_objects_written_file_prefix,
FLAGS.object_storage_region,
uuid.uuid4(), # Add a UUID to support parallel runs that upload data.
datetime_suffix)
return None
def _ColdObjectsWrittenFileAgeHours(filename):
"""Determines the age in hours of an objects_written_file.
Args:
filename: The name of the file.
Returns:
The age of the file in hours (based on the name), or None.
"""
# Parse the year, month, day, hour, and minute from the filename based on the
# way it is written in _ColdObjectsWrittenFilename.
match = re.search(r'(\d\d\d\d)(\d\d)(\d\d)-(\d\d)(\d\d)$', filename)
if not match:
return None
year, month, day, hour, minute = (int(item) for item in match.groups())
write_datetime = datetime.datetime(year, month, day, hour, minute)
write_timedelta = _DatetimeNow() - write_datetime
return write_timedelta.total_seconds() / _SECONDS_PER_HOUR
def _MultiStreamOneWay(results, metadata, vms, command_builder,
service, bucket_name, operation):
"""Measures multi-stream latency and throughput in one direction.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
operation: 'upload' or 'download'
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
size_distribution = _DistributionToBackendFormat(
FLAGS.object_storage_object_sizes)
logging.info('Distribution %s, backend format %s.',
FLAGS.object_storage_object_sizes, size_distribution)
streams_per_vm = FLAGS.object_storage_streams_per_vm
num_vms = FLAGS.num_vms
start_time = (
time.time() +
MultiThreadStartDelay(num_vms, streams_per_vm).m_as('second'))
delete_delay = MultiThreadDeleteDelay(num_vms, streams_per_vm)
logging.info('Start time is %s', start_time)
logging.info('Delete delay is %s', delete_delay)
cmd_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--num_streams=%s' % streams_per_vm,
'--start_time=%s' % start_time,
'--objects_written_file=%s' % objects_written_file]
if operation == MultistreamOperationType.upload:
cmd_args += [
'--object_sizes="%s"' % size_distribution,
'--object_naming_scheme=%s' % FLAGS.object_storage_object_naming_scheme,
'--scenario=MultiStreamWrite']
elif operation == MultistreamOperationType.download:
cmd_args += ['--scenario=MultiStreamRead']
elif operation == MultistreamOperationType.delete:
cmd_args += [
'--scenario=MultiStreamDelete',
'--delete_delay=%s' % delete_delay
]
elif operation == MultistreamOperationType.bulk_delete:
cmd_args += [
'--scenario=MultiStreamDelete', '--bulk_delete=true',
'--delete_delay=%s' % delete_delay
]
else:
raise Exception('Value of operation must be \'upload\' or \'download\'.'
'Value is: \'' + operation.name + '\'')
output = _RunMultiStreamProcesses(vms, command_builder, cmd_args,
streams_per_vm)
start_times, latencies, sizes = LoadWorkerOutput(output)
if FLAGS.object_storage_worker_output:
with open(FLAGS.object_storage_worker_output, 'w') as out_file:
out_file.write(json.dumps(output))
_ProcessMultiStreamResults(
start_times,
latencies,
sizes,
operation.name,
list(six.iterkeys(size_distribution)),
results,
metadata=metadata)
# Write the objects written file if the flag is set and this is an upload
objects_written_path_local = _ColdObjectsWrittenFilename()
if operation == MultistreamOperationType.upload and objects_written_path_local is not None:
# Get the objects written from all the VMs
# Note these are JSON lists with the following format:
# [[object1_name, object1_size],[object2_name, object2_size],...]
outs = vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('cat ' + objects_written_file), vms)
maybe_storage_account = ''
maybe_resource_group = ''
if FLAGS.storage == 'Azure':
maybe_storage_account = '"azure_storage_account": "%s", ' % \
service.storage_account.name
maybe_resource_group = '"azure_resource_group": "%s", ' % \
service.resource_group.name
# Merge the objects written from all the VMs into a single string
objects_written_json = \
'{%s%s"bucket_name": "%s", "objects_written": %s}' % \
(maybe_storage_account, maybe_resource_group, bucket_name,
'[' + ','.join([out for out, _ in outs]) + ']')
# Write the file
with open(objects_written_path_local, 'w') as objects_written_file_local:
objects_written_file_local.write(objects_written_json)
def MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream read/write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.upload)
logging.info('Finished multi-stream write test. Starting '
'multi-stream read test.')
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.download)
logging.info('Finished multi-stream read test.')
def MultiStreamWriteBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.upload)
logging.info('Finished multi-stream write test.')
def MultiStreamReadBenchmark(results, metadata, vms, command_builder,
service, bucket_name, read_objects):
"""A benchmark for multi-stream read latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
read_objects: List of lists of [object_name, object_size]. In the outermost
list, each element corresponds to a VM's worker process.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream read test on %s VMs.', len(vms))
assert read_objects is not None, (
'api_multistream_reads scenario requires the '
'object_storage_read_objects_prefix flag to be set.')
# Send over the objects written file
try:
# Write the per-VM objects-written-files
assert len(read_objects) == len(vms), (
'object_storage_read_objects_prefix file specified requires exactly '
'%d VMs, but %d were provisioned.' % (len(read_objects), len(vms)))
for vm, vm_objects_written in zip(vms, read_objects):
# Note that each file is written with a unique name so that parallel runs
# don't overwrite the same local file. They are pushed to the VM to a file
# named OBJECTS_WRITTEN_FILE.
tmp_objects_written_path = os.path.join(vm_util.GetTempDir(),
'%s-%s' % (OBJECTS_WRITTEN_FILE,
vm.name))
with open(tmp_objects_written_path, 'w') as objects_written_file:
objects_written_file.write(json.dumps(vm_objects_written))
vm.PushFile(tmp_objects_written_path,
posixpath.join(vm_util.VM_TMP_DIR, OBJECTS_WRITTEN_FILE))
except Exception as e:
raise Exception('Failed to upload the objects written files to the VMs: '
'%s' % e)
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.download)
logging.info('Finished multi-stream read test.')
def MultiStreamDelete(results, metadata, vms, command_builder, service,
bucket_name):
"""A benchmark for multi-stream delete.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream delete test on %s VMs.', len(vms))
if FLAGS.object_storage_bulk_delete:
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.bulk_delete)
else:
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.delete)
logging.info('Finished multi-stream delete test.')
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Benchmark config to verify.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid
flags.
"""
del benchmark_config
data.ResourcePath(DATA_FILE)
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
if not FLAGS.object_storage_region:
raise errors.Setup.InvalidFlagConfigurationError(
'Please specify --object_storage_region if using '
'--object_storage_apply_region_suffix_to_bucket_name.')
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
# PercentileCalculator will (correctly) raise an exception on empty
# input, but an empty input list makes semantic sense here.
if len(input_results) == 0:
return
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
service, bucket):
"""A benchmark for CLI tool throughput.
We will upload and download a set of files from/to a local directory
via cli tools and observe the throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
data_directory = '/tmp/run/data'
# The real solution to the iteration count issue is dynamically
# choosing the number of iterations based on how long they
# take. This will work for now, though.
if FLAGS.storage == providers.AZURE:
iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
elif FLAGS.cli_test_size == 'normal':
iteration_count = CLI_TEST_ITERATION_COUNT
else:
iteration_count = LARGE_CLI_TEST_ITERATION_COUNT
# The CLI-based tests require some provisioning on the VM first.
vm.RemoteCommand(
'cd /tmp/run/; bash cloud-storage-workload.sh %s' % FLAGS.cli_test_size)
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
if FLAGS.cli_test_size == 'normal':
data_size_in_mbits = DATA_SIZE_IN_MBITS
file_names = ['file-%s.dat' % i for i in range(100)]
else:
data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
file_names = ['file_large_3gib.dat']
for _ in range(iteration_count):
try:
service.EmptyBucket(bucket)
except Exception:
pass
try:
_, res = service.CLIUploadDirectory(vm, data_directory,
file_names, bucket)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to upload, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
try:
vm.RemoveFile(posixpath.join(DOWNLOAD_DIRECTORY, '*'))
except Exception:
pass
try:
_, res = service.CLIDownloadBucket(vm, bucket,
file_names, DOWNLOAD_DIRECTORY)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to download, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
metrics_prefix = ''
if FLAGS.cli_test_size != 'normal':
metrics_prefix = '%s ' % FLAGS.cli_test_size
_AppendPercentilesToResults(output_results,
cli_upload_results,
'%s%s' % (metrics_prefix,
UPLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
'%s%s' % (metrics_prefix,
DOWNLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
def PrepareVM(vm, service):
vm.InstallPackages('python3-pip')
# dependencies of API_TEST_SCRIPT
vm.RemoteCommand('sudo pip3 install absl-py')
vm.RemoteCommand('sudo pip3 install pyyaml')
vm.Install('openssl')
# Prepare data on vm, create a run directory in temporary directory, and add
# permission.
vm.RemoteCommand('sudo mkdir -p ' + SCRIPT_DIR)
vm.RemoteCommand('sudo chmod 777 ' + SCRIPT_DIR)
vm.RemoteCommand('sudo mkdir -p ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo chmod 777 ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo mkdir -p ' + REMOTE_PACKAGE_DIR)
vm.RemoteCommand('sudo chmod 777 ' + REMOTE_PACKAGE_DIR)
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, SCRIPT_DIR)
# push the test script
script_path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, API_TEST_SCRIPT))
vm.PushFile(script_path, '/tmp/run/')
# push the package dependencies of the test script
for file_name in API_TEST_SCRIPT_PACKAGE_FILES + service.APIScriptFiles():
path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, file_name))
logging.info('Uploading %s to %s', path, vm)
vm.PushFile(path, REMOTE_PACKAGE_DIR)
service.PrepareVM(vm)
def CleanupVM(vm, service):
service.CleanupVM(vm)
vm.RemoteCommand('/usr/bin/yes | sudo pip3 uninstall absl-py')
vm.RemoteCommand('sudo rm -rf /tmp/run/')
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
vm.RemoteCommand('rm -f %s' % objects_written_file)
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Raises:
ColdDataError: If this benchmark is reading cold data, but the data isn't
cold enough (as configured by object_storage_read_objects_min_hours).
"""
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Load the objects to read file if specified
benchmark_spec.read_objects = None
if FLAGS.object_storage_read_objects_prefix is not None:
# By taking a glob, we choose an arbitrary file that is old enough, assuming
# there is ever more than one.
search_prefix = '%s-%s*' % (
FLAGS.object_storage_read_objects_prefix,
FLAGS.object_storage_region)
read_objects_filenames = glob.glob(search_prefix)
logging.info('Considering object files %s*: %s', search_prefix,
read_objects_filenames)
for filename in read_objects_filenames:
age_hours = _ColdObjectsWrittenFileAgeHours(filename)
if age_hours and age_hours > FLAGS.object_storage_read_objects_min_hours:
read_objects_filename = filename
break
else:
raise ColdDataError(
'Object data older than %d hours does not exist. Current cold data '
'files include the following: %s' % (
FLAGS.object_storage_read_objects_min_hours,
read_objects_filenames))
with open(read_objects_filename) as read_objects_file:
# Format of json structure is:
# {"bucket_name": <bucket_name>,
# ... any other provider-specific context needed
# "objects_written": <objects_written_array>}
benchmark_spec.read_objects = json.loads(read_objects_file.read())
benchmark_spec.read_objects_filename = read_objects_filename
benchmark_spec.read_objects_age_hours = age_hours
# When this benchmark reads these files, the data will be deleted. Delete
# the file that specifies the data too.
if not FLAGS.object_storage_dont_delete_bucket:
os.remove(read_objects_filename)
assert benchmark_spec.read_objects is not None, (
'Failed to read the file specified by '
'--object_storage_read_objects_prefix')
# Load the provider and its object storage service
providers.LoadProvider(FLAGS.storage)
# Determine the bucket name.
if benchmark_spec.read_objects is not None:
# Using an existing bucket
bucket_name = benchmark_spec.read_objects['bucket_name']
if FLAGS.object_storage_bucket_name is not None:
logging.warning('--object_storage_bucket_name ignored because '
'--object_storage_read_objects was specified')
else:
# Use a new bucket (or the name of a specified bucket).
bucket_name = FLAGS.object_storage_bucket_name or 'pkb%s' % FLAGS.run_uri
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
# Avoid non-alphanumeric characters in the region as bucket names on some
# clouds cannot contain non-alphanumeric characters.
bucket_name = '%s%s' % (bucket_name,
re.sub(r'[\W_]', '', FLAGS.object_storage_region))
service = object_storage_service.GetObjectStorageClass(FLAGS.storage)()
if (FLAGS.storage == 'Azure' and
FLAGS.object_storage_read_objects_prefix is not None):
# Storage provider is azure and we are reading existing objects.
# Need to prepare the ObjectStorageService with the existing storage
# account and resource group associated with the bucket containing our
# objects
service.PrepareService(
FLAGS.object_storage_region,
# On Azure, use an existing storage account if we
# are reading existing objects
(benchmark_spec.read_objects['azure_storage_account'],
benchmark_spec.read_objects['azure_resource_group']))
elif FLAGS.storage == 'Azure' and FLAGS.object_storage_bucket_name:
# We are using a bucket that may exist from a previous run. We should use
# a storage account and resource group for this bucket based on the same
# name (for consistency).
service.PrepareService(
FLAGS.object_storage_region,
# The storage account must not exceed 24 characters.
(bucket_name[:24], bucket_name + '-resource-group'),
try_to_create_storage_account_and_resource_group=True)
else:
service.PrepareService(FLAGS.object_storage_region)
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: PrepareVM(vm, service), vms)
# Make the bucket.
if benchmark_spec.read_objects is None:
# Fail if we cannot create the bucket as long as the bucket name was not
# set via a flag. If it was set by a flag, then we will still try to create
# the bucket, but won't fail if it was created. This supports running the
# benchmark on the same bucket multiple times.
raise_on_bucket_creation_failure = not FLAGS.object_storage_bucket_name
if FLAGS.storage == 'GCP' and FLAGS.object_storage_gcs_multiregion:
# Use a GCS multiregional bucket
multiregional_service = gcs.GoogleCloudStorageService()
multiregional_service.PrepareService(FLAGS.object_storage_gcs_multiregion
or DEFAULT_GCS_MULTIREGION)
multiregional_service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
else:
# Use a regular bucket
service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
# Save the service and the bucket name for later
benchmark_spec.service = service
benchmark_spec.bucket_name = bucket_name
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
metadata = {'storage_provider': FLAGS.storage}
vms = benchmark_spec.vms
if FLAGS[OBJECT_STORAGE_REGION].present:
metadata[REGIONAL_BUCKET_LOCATION] = FLAGS.object_storage_region
else:
metadata[REGIONAL_BUCKET_LOCATION] = DEFAULT
if FLAGS[OBJECT_STORAGE_GCS_MULTIREGION].present:
metadata[GCS_MULTIREGION_LOCATION] = FLAGS.object_storage_gcs_multiregion
else:
metadata[GCS_MULTIREGION_LOCATION] = DEFAULT
metadata.update(service.Metadata(vms[0]))
results = []
test_script_path = '/tmp/run/%s' % API_TEST_SCRIPT
try:
command_builder = APIScriptCommandBuilder(
test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service)
except KeyError:
command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)
for name, benchmark in [('cli', CLIThroughputBenchmark),
('api_data', OneByteRWBenchmark),
('api_data', SingleStreamThroughputBenchmark),
('api_namespace', ListConsistencyBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms[0], command_builder,
service, bucket_name)
# MultiStreamRW and MultiStreamWrite support multiple VMs, so they have a
# slightly different calling convention than the others.
for name, benchmark in [('api_multistream', MultiStreamRWBenchmark),
('api_multistream_writes',
MultiStreamWriteBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms, command_builder, service, bucket_name)
# MultiStreamRead has the additional 'read_objects' parameter
if FLAGS.object_storage_scenario in {'api_multistream_reads', 'all'}:
metadata['cold_objects_filename'] = benchmark_spec.read_objects_filename
metadata['cold_objects_age_hours'] = benchmark_spec.read_objects_age_hours
MultiStreamReadBenchmark(results, metadata, vms, command_builder, service,
bucket_name,
benchmark_spec.read_objects['objects_written'])
# Clear the bucket if we're not saving the objects for later
# This is needed for long running tests, or else the objects would just pile
# up after each run.
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
MultiStreamDelete(results, metadata, vms, command_builder, service,
bucket_name)
service.UpdateSampleMetadata(results)
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if not hasattr(benchmark_spec, 'service'):
logging.info('Skipping cleanup as prepare method failed')
return
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: CleanupVM(vm, service), vms)
# Only clean up bucket if we're not saving the objects for a later run
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.DeleteBucket(bucket_name)
service.CleanupService()
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("CommunityFeaturesToggle", "1"),
("IsMetric", "1"),
("CustomLeadMark", "1")
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start()
if EON:
system("am startservice com.neokii.optool/.MainService")
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
stress.py
|
"""
Run all of the unit tests for this package multiple times in a highly
multithreaded way to stress the system. This makes it possible to look
for memory leaks and threading issues and provides a good target for a
profiler to accumulate better data.
"""
from __future__ import print_function
import sys, os, gc, time, threading, thread
class StressTest:
def __init__(self):
self.dirname = os.path.split(__file__)[0]
sys.path.append(self.dirname)
gc.set_debug(gc.DEBUG_LEAK)
import runtests
self.module = runtests
self.done = []
def dprint(self, msg):
# Debugging helper to trace thread-related tests.
if 1: print(msg)
def markStart(self):
self._start = time.clock()
def markFinish(self):
self._finish = time.clock()
def elapsed(self):
return self._finish - self._start
def printGCReport(self):
for item in gc.get_objects():
print(item, sys.getrefcount(item))
def runThread(self, iterations):
thread_id = thread.get_ident()
self.dprint("thread %s starting..." % thread_id)
time.sleep(0.1)
for i in range(iterations):
self.dprint("thread %s iter %d start" % (thread_id, i))
self.module.main()
self.dprint("thread %s iter %d end" % (thread_id, i))
self.done.append(None)
self.dprint("thread %s done" % thread_id)
def stressTest(self, iterations=1, threads=1):
args = (iterations,)
self.markStart()
for i in range(threads):
thread = threading.Thread(target=self.runThread, args=args)
thread.start()
while len(self.done) < (iterations * threads):
self.dprint(len(self.done))
time.sleep(0.1)
self.markFinish()
took = self.elapsed()
self.printGCReport()
def main():
test = StressTest()
test.stressTest(2, 10)
if __name__ == '__main__':
main()
sys.exit(0)
|
client.py
|
import os
import sys
from ftplib import FTP, all_errors
from threading import Thread, Event
import requests
from tcp_latency import measure_latency
NAMENODE_ADDR = 'namenode'
def print_help():
print("""\nList of available commands:
init
create <file name/path in FS>
read <file name/path in FS> <file name/path on Client>
write <file name/path on Client> <file name/path in FS>
rm <file name/path in FS>
info <file name/path in FS>
cp <from file name/path in FS> <to file name/path in FS>
mv <from file name/path in FS> <to file name/path in FS>
cd <folder name/path in FS>
ls <folder name/path in FS>
mkdir <folder name/path in FS>
rmdir <folder name/path in FS>\n""")
def send_req(cmd, args='', show=True):
try:
r = requests.get(f'http://{NAMENODE_ADDR}:80/' + cmd, json=args)
if show:
print(r.json()['msg'])
return r.json()['msg']
except Exception as e:
print(e)
def ping_datanodes(datanodes):
latency = []
for datanode in datanodes:
latency.append(measure_latency(host=datanode, port=21)[0])
return latency
def update_lock(event, file_from):
while not event.wait(300):
send_req('update_lock', {'file_path': file_from}, show=False)
def read_file(file_from, file_to=None):
if file_to is None:
file_to = file_from
result = send_req('read', {'file_path': file_from}, show=False)
if isinstance(result, str):
print(result)
return
datanodes = result['ips']
file_from = result['path']
event = Event()
send_clock_update = Thread(target=update_lock, args=(event, file_from))
send_clock_update.start()
latency = ping_datanodes(datanodes)
data_stored = False
for latency, datanode in sorted(zip(latency, datanodes)):
try:
with FTP(datanode) as ftp, open(file_to, 'wb') as localfile:
ftp.login()
ftp.retrbinary('RETR ' + file_from, localfile.write, 1024)
data_stored = True
break
except PermissionError:
print("Cannot open file. Try with sudo")
data_stored = True
break
except all_errors:
continue
if not data_stored:
print('Cannot connect to datanode')
event.set()
send_clock_update.join()
send_req('release_lock', {'file_path': file_from}, show=False)
def write_file(file_from, file_to=None):
if file_to is None:
file_to = file_from
result = send_req('write',
{'file_path': file_to, 'file_size': os.path.getsize(file_from)},
show=False)
if isinstance(result, str):
print(result)
return
datanodes = result['ips']
file_to = result['path']
event = Event()
send_clock_update = Thread(target=update_lock, args=(event, file_to))
send_clock_update.start()
latencies = ping_datanodes(datanodes)
connected_node = None
for latency, datanode in sorted(zip(latencies, datanodes)):
try:
with FTP(datanode) as ftp, open(file_from, 'rb') as localfile:
ftp.login()
ftp.storbinary('STOR ' + file_to, localfile)
connected_node = datanode
break
except all_errors:
continue
if connected_node is None:
print('Cannot connect to datanode')
send_req('release_lock', {'file_path': file_to}, show=False)
else:
t = Thread(target=send_req,
args=('replicate_file',
{'file_path': file_to, 'node_ip': connected_node},
False))
t.start()
event.set()
send_clock_update.join()
def delete_directory(dir_path):
response, flag = send_req('rmdir', {'dir_path': dir_path}, show=False)
if flag:
answer = input(response)
if answer.lower() in ['y', 'yes']:
send_req('rmdir', {'dir_path': dir_path, 'force_delete': True})
else:
print(response)
def main():
args = sys.argv[1:] # get command with arguments
if len(args) == 0:
print("Empty command!\nFor help write command: help")
elif len(args) == 1: # commands without any argument
if args[0] == 'help':
print_help()
elif args[0] == 'init':
send_req('init')
elif args[0] == 'ls':
send_req('ls', {})
else:
print("Incorrect command!\nFor help write command: help")
elif len(args) == 2: # commands with 1 argument
if args[0] == 'create':
send_req('create', {'file_path': args[1]})
elif args[0] == 'rm':
send_req('rm', {'file_path': args[1]})
elif args[0] == 'info':
send_req('info', {'file_path': args[1]})
elif args[0] == 'cd':
send_req('cd', {'dir_path': args[1]})
elif args[0] == 'ls':
send_req('ls', {'dir_path': args[1]})
elif args[0] == 'mkdir':
send_req('mkdir', {'dir_path': args[1]})
elif args[0] == 'rmdir':
delete_directory(args[1])
elif args[0] == 'read':
read_file(args[1])
elif args[0] == 'write':
write_file(args[1])
else:
print("Incorrect command!\nFor help write command: help")
elif len(args) == 3: # commands with 2 arguments
if args[0] == 'read':
read_file(args[1], args[2])
elif args[0] == 'write':
write_file(args[1], args[2])
elif args[0] == 'cp':
send_req('copy', {'file_path_from': args[1], 'dir_path_to': args[2]})
elif args[0] == 'mv':
send_req('move', {'file_path_from': args[1], 'dir_path_to': args[2]})
else:
print("Incorrect command!\nFor help write command: help")
else:
print("Wrong amount of arguments!\nFor help write command: help")
if __name__ == "__main__":
main()
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(object):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, np.random.choice, a, p=p)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
np.random.seed(self.seed)
assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 1295
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import pathlib
import random
import socket
import sys
import threading
import unittest
from datetime import datetime, timedelta
from logging.config import dictConfig
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import pytest
from freezegun import freeze_time
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.configuration import conf
from airflow.dag_processing.manager import (
DagFileProcessorAgent,
DagFileProcessorManager,
DagFileStat,
DagParsingSignal,
DagParsingStat,
)
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.models import DagBag, DagModel, TaskInstance as TI, errors
from airflow.models.dagcode import DagCode
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, TaskCallbackRequest
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import DagRunState, State
from airflow.utils.types import DagRunType
from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context
from tests.models import TEST_DAGS_FOLDER
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_ids, callbacks):
super().__init__(file_path, pickle_dags, dag_ids, callbacks)
# We need a "real" selectable handle for waitable_handle to work
readable, writable = multiprocessing.Pipe(duplex=False)
writable.send('abc')
writable.close()
self._waitable_handle = readable
self._result = 0, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
@staticmethod
def _create_process(file_path, callback_requests, dag_ids, pickle_dags):
return FakeDagFileProcessorRunner(
file_path,
pickle_dags,
dag_ids,
callback_requests,
)
@property
def waitable_handle(self):
return self._waitable_handle
class TestDagFileProcessorManager:
def setup_method(self):
dictConfig(DEFAULT_LOGGING_CONFIG)
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def teardown_class(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def run_processor_manager_one_loop(self, manager, parent_pipe):
if not manager._async_mode:
parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE)
results = []
while True:
manager._run_parsing_loop()
while parent_pipe.poll(timeout=0.01):
obj = parent_pipe.recv()
if not isinstance(obj, DagParsingStat):
results.append(obj)
elif obj.done:
return results
raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!")
@conf_vars({('core', 'load_examples'): 'False'})
def test_remove_file_clears_import_error(self, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines('an invalid airflow DAG')
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
with create_session() as session:
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
filename_to_parse.remove()
# Rerun the scheduler once the dag file has been removed
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
child_pipe.close()
parent_pipe.close()
@conf_vars({('core', 'load_examples'): 'False'})
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
@pytest.mark.backend("mysql", "postgres")
def test_start_new_processes_with_same_filepath(self):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
file_1 = 'file_1.py'
file_2 = 'file_2.py'
file_3 = 'file_3.py'
manager._file_path_queue = [file_1, file_2, file_3]
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
manager.start_new_processes()
# Because of the config: '[scheduler] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert [file_3] == manager._file_path_queue
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
assert manager._processors == {}
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
assert manager._processors == {'abc.txt': mock_processor}
@conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_alphabetically(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test dag files are sorted alphabetically"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_random_seeded_by_host(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are randomly sorted and seeded by host name"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
expected_order = dag_files
random.Random(get_hostname()).shuffle(expected_order)
assert manager._file_path_queue == expected_order
# Verify running it again produces same order
manager._file_paths = []
manager.prepare_file_path_queue()
assert manager._file_path_queue == expected_order
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_sorted_by_modified_time(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are sorted by modified time"""
paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0}
dag_files = list(paths_with_mtime.keys())
mock_getmtime.side_effect = list(paths_with_mtime.values())
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_excludes_missing_file(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Check that a file is not enqueued for processing if it has been deleted"""
dag_files = ["file_3.py", "file_2.py", "file_4.py"]
mock_getmtime.side_effect = [1.0, 2.0, FileNotFoundError()]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_2.py', 'file_3.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_recently_modified_file_is_parsed_with_mtime_mode(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""
Test recently updated files are processed even if min_file_process_interval is not reached
"""
freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0)
initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp()
dag_files = ["file_1.py"]
mock_getmtime.side_effect = [initial_file_1_mtime]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=3,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
# let's say the DAG was just parsed 2 seconds before the Freezed time
last_finish_time = freezed_base_time - timedelta(seconds=10)
manager._file_stats = {
"file_1.py": DagFileStat(1, 0, last_finish_time, 1.0, 1),
}
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
manager.prepare_file_path_queue()
assert manager._file_path_queue == []
# Simulate the DAG modification by using modified_time which is greater
# than the last_parse_time but still less than now - min_file_process_interval
file_1_new_mtime = freezed_base_time - timedelta(seconds=5)
file_1_new_mtime_ts = file_1_new_mtime.timestamp()
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
mock_getmtime.side_effect = [file_1_new_mtime_ts]
manager.prepare_file_path_queue()
# Check that file is added to the queue even though file was just recently passed
assert manager._file_path_queue == ["file_1.py"]
assert last_finish_time < file_1_new_mtime
assert (
manager._file_process_interval
> (freezed_base_time - manager.get_last_finish_time("file_1.py")).total_seconds()
)
def test_find_zombies(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(TEST_DAG_FOLDER, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('example_branch_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_first')
dag_run = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
ti = TI(task, run_id=dag_run.run_id, state=State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.flush()
ti.job_id = local_job.id
session.add(ti)
session.flush()
manager._last_zombie_query_time = timezone.utcnow() - timedelta(
seconds=manager._zombie_threshold_secs + 1
)
manager._find_zombies()
requests = manager._callback_to_execute[dag.fileloc]
assert 1 == len(requests)
assert requests[0].full_filepath == dag.fileloc
assert requests[0].msg == f"Detected {ti} as zombie"
assert requests[0].is_failure_callback is True
assert isinstance(requests[0].simple_task_instance, SimpleTaskInstance)
assert ti.dag_id == requests[0].simple_task_instance.dag_id
assert ti.task_id == requests[0].simple_task_instance.task_id
assert ti.run_id == requests[0].simple_task_instance.run_id
session.query(TI).delete()
session.query(LJ).delete()
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_handle_failure_callback_with_zombies_are_correctly_passed_to_dag_file_processor(
self, mock_processor
):
"""
Check that the same set of failure callback with zombies are passed to the dag
file processors until the next zombie detection logic is invoked.
"""
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
with conf_vars({('scheduler', 'parsing_processes'): '1', ('core', 'load_examples'): 'False'}):
dagbag = DagBag(test_dag_path, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('test_example_bash_operator')
dag.sync_to_db()
dag_run = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id='run_this_last')
ti = TI(task, run_id=dag_run.run_id, state=State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.flush()
# TODO: If there was an actual Relationship between TI and Job
# we wouldn't need this extra commit
session.add(ti)
ti.job_id = local_job.id
session.flush()
expected_failure_callback_requests = [
TaskCallbackRequest(
full_filepath=dag.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message",
)
]
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=test_dag_path,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
if async_mode:
# Once for initial parse, and then again for the add_callback_to_queue
assert len(fake_processors) == 2
assert fake_processors[0]._file_path == str(test_dag_path)
assert fake_processors[0]._callback_requests == []
else:
assert len(fake_processors) == 1
assert fake_processors[-1]._file_path == str(test_dag_path)
callback_requests = fake_processors[-1]._callback_requests
assert {zombie.simple_task_instance.key for zombie in expected_failure_callback_requests} == {
result.simple_task_instance.key for result in callback_requests
}
child_pipe.close()
parent_pipe.close()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit'
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
child_pipe, parent_pipe = multiprocessing.Pipe()
manager = DagFileProcessorManager(
dag_directory=dag_directory,
dag_ids=[],
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
manager._run_parsing_loop()
result = None
while parent_pipe.poll(timeout=None):
result = parent_pipe.recv()
if isinstance(result, DagParsingStat) and result.done:
break
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.query(DagModel).get(dag_id) is not None
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.execution_timeout(30)
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_pipe_full_deadlock(self, mock_processor):
dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py"
child_pipe, parent_pipe = multiprocessing.Pipe()
# Shrink the buffers to exacerbate the problem!
for fd in (parent_pipe.fileno(),):
sock = socket.socket(fileno=fd)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
sock.detach()
exit_event = threading.Event()
# To test this behaviour we need something that continually fills the
# parent pipe's buffer (and keeps it full).
def keep_pipe_full(pipe, exit_event):
n = 0
while True:
if exit_event.is_set():
break
req = CallbackRequest(str(dag_filepath))
try:
logging.debug("Sending CallbackRequests %d", n + 1)
pipe.send(req)
except TypeError:
# This is actually the error you get when the parent pipe
# is closed! Nicely handled, eh?
break
except OSError:
break
n += 1
logging.debug(" Sent %d CallbackRequests", n)
thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event))
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=dag_filepath,
dag_ids=[],
# A reasonable large number to ensure that we trigger the deadlock
max_runs=100,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
try:
thread.start()
# If this completes without hanging, then the test is good!
manager._run_parsing_loop()
exit_event.set()
finally:
logging.info("Closing pipes")
parent_pipe.close()
child_pipe.close()
thread.join(timeout=1.0)
@conf_vars({('core', 'load_examples'): 'False'})
@mock.patch('airflow.dag_processing.manager.Stats.timing')
def test_send_file_processing_statsd_timing(self, statsd_timing_mock, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
dag_code = dedent(
"""
from airflow import DAG
dag = DAG(dag_id='temp_dag', schedule_interval='0 0 * * *')
"""
)
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(dag_code)
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
last_runtime = manager.get_last_runtime(manager.file_paths[0])
child_pipe.close()
parent_pipe.close()
statsd_timing_mock.assert_called_with('dag_processing.last_duration.temp_dag', last_runtime)
def test_refresh_dags_dir_doesnt_delete_zipped_dags(self, tmpdir):
"""Test DagFileProcessorManager._refresh_dag_dir method"""
manager = DagFileProcessorManager(
dag_directory=TEST_DAG_FOLDER,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(dag_folder=tmpdir, include_examples=False)
zipped_dag_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag.process_file(zipped_dag_path)
dag = dagbag.get_dag("test_zip_dag")
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
manager.last_dag_dir_refresh_time = timezone.utcnow() - timedelta(minutes=10)
manager._refresh_dag_dir()
# Assert dag not deleted in SDM
assert SerializedDagModel.has_dag('test_zip_dag')
# assert code not deleted
assert DagCode.has_dag(dag.fileloc)
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
remove_list = []
for mod in sys.modules:
if mod not in self.old_modules:
remove_list.append(mod)
for mod in remove_list:
del sys.modules[mod]
@staticmethod
def _processor_factory(file_path, zombies, dag_ids, pickle_dags):
return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies)
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
assert not os.path.isfile(log_file_loc)
@conf_vars({('core', 'load_examples'): 'False'})
def test_parse_once(self):
clear_db_serialized_dags()
clear_db_dags()
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path, 1, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
processor_agent.heartbeat()
assert processor_agent.all_files_processed
assert processor_agent.done
with create_session() as session:
dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
def test_launch_process(self):
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
assert os.path.isfile(log_file_loc)
|
airline-colors.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Johan Kanflo (github.com/kanflo)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Subscribes to the topic 'adsb/proximity/json' and publishes a color to the
# topic 'ghost/led' according to operator/distance of an aircraft.
#
import imagecolor
import os
import paho.mqtt.client as mosquitto
import Queue
import time
from threading import *
import remotelogger
import logging, sys
import socket
import calendar, datetime
import json
import traceback
import math
import signal
import random
import argparse
import bing
import bingconfig
gQuitting = False
gCurrentColor = ()
gConnectCount = 0
log = logging.getLogger(__name__)
def mqttOnConnect(mosq, obj, rc):
log.info("MQTT connected")
mosq.subscribe("adsb/proximity/json", 0)
log.debug("MQTT Connect: %s" % (str(rc)))
def mqttOnDisconnect(mosq, obj, rc):
if 1:
log.info("MQTT disconnected")
else:
global gQuitting
global gConnectCount
log.info("MQTT Disconnect: %s" % (str(rc)))
gConnectCount += 1
if gConnectCount == 10:
log.info("Giving up!")
gQuitting = True
sys.exit()
if not gQuitting:
while not mqttConnect():
time.sleep(10)
log.info("Attempting MQTT reconnect")
log.info("MQTT connected")
def mqttOnMessage(mosq, obj, msg):
global args
global gCurrentColor
try:
data = json.loads(msg.payload)
except Exception as e:
log.error("JSON load failed for '%s' : %s" % (msg.payload, e))
print traceback.format_exc()
return
if data["operator"] and data["distance"]:
airline = data["operator"]
distance = data["distance"]
lost = False
if "lost" in data:
lost = data["lost"]
if airline == "SAS":
airline = "SAS Airlines"
if lost:
log.debug("Lost sight of aircraft")
color = (0, 0, 0)
else:
try:
color = imagecolor.getColor(airline)
except Exception as e:
log.error("getColor failed %s" % (e))
print traceback.format_exc()
return
if distance > args.max_distance or not color:
color = (0, 0, 0)
else:
color_0 = int(color[0] * (1 - (distance / args.max_distance)))
color_1 = int(color[1] * (1 - (distance / args.max_distance)))
color_2 = int(color[2] * (1 - (distance / args.max_distance)))
color = (color_0, color_1, color_2)
if color != gCurrentColor:
log.debug("New color is %02x%02x%02x" % (color[0], color[1], color[2]))
# This is a bit lazy, I know...
cmd = "mosquitto_pub -h %s -t %s -m \"#%02x%02x%02x\"" % (args.mqtt_host, args.mqtt_topic, color[0], color[1], color[2])
os.system(cmd)
gCurrentColor = color
def mqttOnPublish(mosq, obj, mid):
# log.debug("mid: "+str(mid)))
pass
def mqttOnSubscribe(mosq, obj, mid, granted_qos):
log.debug("Subscribed")
def mqttOnLog(mosq, obj, level, string):
log.debug("log:"+string)
def mqttThread():
global gQuitting
log.info("MQTT thread started")
try:
mqttc.loop_forever()
gQuitting = True
log.info("MQTT thread exiting")
gQuitting = True
except Exception as e:
log.error("MQTT thread got exception: %s" % (e))
print traceback.format_exc()
# gQuitting = True
# log.info("MQTT disconnect")
# mqttc.disconnect();
log.info("MQTT thread exited")
def mqttConnect():
global mqttc
global args
try:
# If you want to use a specific client id, use
# mqttc = mosquitto.Mosquitto("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mosquitto.Mosquitto("airlinecolor-%d" % (random.randint(0, 65535)))
mqttc.on_message = mqttOnMessage
mqttc.on_connect = mqttOnConnect
mqttc.on_disconnect = mqttOnDisconnect
mqttc.on_publish = mqttOnPublish
mqttc.on_subscribe = mqttOnSubscribe
#mqttc.on_log = mqttOnLog # Uncomment to enable debug messages
mqttc.connect(args.mqtt_host, args.mqtt_port, 60)
if 1:
log.info("MQTT thread started")
try:
mqttc.loop_start()
while True:
time.sleep(60)
log.info("MQTT thread exiting")
except Exception as e:
log.error("MQTT thread got exception: %s" % (e))
print traceback.format_exc()
# gQuitting = True
# log.info("MQTT disconnect")
# mqttc.disconnect();
log.info("MQTT thread exited")
else:
thread = Thread(target = mqttThread)
thread.daemon = True
thread.start()
return True
except socket.error, e:
log.error("Failed to connect MQTT broker at %s:%d" % (args.mqtt_host, args.mqtt_port))
return False
log.info("MQTT wierdness")
def loggingInit(level, log_host):
log = logging.getLogger(__name__)
# Initialize remote logging
logger = logging.getLogger()
logger.setLevel(level)
if log_host != None:
remotelogger.init(logger = logger, appName = "airlinecol", subSystem = None, host = log_host, level = logging.DEBUG)
# Log to stdout
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def signal_handler(signal, frame):
global gQuitting
global mqttc
print "ctrl-c"
gQuitting = True
mqttc.disconnect();
sys.exit(0)
def main():
global gQuitting
global mqttc
global args
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mqtt-host', dest='mqtt_host', help="MQTT broker hostname", default='127.0.0.1')
parser.add_argument('-p', '--mqtt-port', dest='mqtt_port', type=int, help="MQTT broker port number", default=1883)
parser.add_argument('-t', '--mqtt-topic', dest='mqtt_topic', help="MQTT color topic", default="airlinecolor")
parser.add_argument('-d', '--max-distance', dest='max_distance', type=float, help="Max distance to light the LED (km)", default=10.0)
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true", help="Verbose output")
parser.add_argument('-l', '--logger', dest='log_host', help="Remote log host")
args = parser.parse_args()
if bingconfig.key == None:
print "You really need to specify a Bing API key, see bingconfig.py"
sys.exit(1)
bing.setKey(bingconfig.key)
signal.signal(signal.SIGINT, signal_handler)
imagecolor.loadColorData()
try:
signal.signal(signal.SIGINT, signal_handler)
if args.verbose:
loggingInit(logging.DEBUG, args.log_host)
else:
loggingInit(logging.INFO, args.log_host)
log.info("Client started")
mqttConnect()
except Exception as e:
log.error("Mainloop got exception: %s" % (e))
print traceback.format_exc()
gQuitting = True
log.debug("MQTT disconnect")
mqttc.disconnect();
# Ye ol main
if __name__ == "__main__":
main()
|
gameState.py
|
from collections import deque
from os import system
import numpy as np
import win32ui
from grabber import Grabber
import time
import os
import sys
from directkeys import W,A,S,D,P,U,E,Q,T,L,I,R,F1,F2,F3,F11,NUM1,NUM2,NUM4,SPACE,G,E,PressKey,ReleaseKey,ReleaseKeys,PressAndRelease,PressAndFastRelease
from numpy import genfromtxt
from windowMgr import WindowMgr
import os
import subprocess
import threading
from gym import spaces
import math
import pickle
BOSSAREA="400100"
DARKSOULSDIR="C:\Program Files (x86)\Steam\steamapps\common\DARK SOULS III\Game\DarkSoulsIII.exe"
BONFIREAREA="400101"
FRAME_DIFF=0.2
SAVE_PROGRESS_SHADOWPLAY=False
SAVE_KILLS_SHADOWPLAY=True
NO_ACTION=[0,0]
HERO_BASE_HP=454
LUDEX_BASE_HP=1037
HEALTH_REWARD_MULTIPLIER=2.5
REWARD_DISTANCE=5
ESTUS_NEGATIVE_REWARD=0.3
PARRY_REWARD=0.1
TIMESTEPS_DEFENSIVE_BEHAVIOR=200
DEFENSIVE_BEHAVIOR_NEGATIVE_REWARD =0.002
start_time=-1
not_responding_lock=threading.Lock()
areaKey="locationArea"
charHpKey="heroHp"
charSpKey="heroSp"
bossHpKey="targetedEntityHp"
num_state_scalars=60
num_history_states=5
num_prev_animations=1
parryAnimationName='DamageParryEnemy1'
def parse_val(value):
try:
val=float(value)
return val
except ValueError:
if value=="??":
return 0
return value
class dsgym:
observation_space=spaces.Box(-100,1000,shape=(num_history_states*num_state_scalars,))
#1) WASD Keys: Discrete 5 - NOOP[0], W[1], A[2], S[3], D[4] - params: min: 0, max: 4
#2) Action: Discrete 6 - NOOP[0], Jump[1], Parry[2], Block[3], Attack[4], estus[5] - params: min: 0, max: 5
action_space=spaces.MultiDiscrete([5,6])
metadata=None
def __init__(self):
self.bossAnimationSet = []
self.charAnimationSet = []
self.best_so_far=-100
self.set_initial_state()
self.spawnCheckRespondingThread()
self.logfile = open("gameInfo.txt", "r", encoding="utf-8")
self.paused=False
def set_initial_state(self):
self.prev_input_actions = NO_ACTION
self.prev_char_animations = deque([],maxlen=num_prev_animations)
self.prev_boss_animations = deque([],maxlen=num_prev_animations)
self.prev_state = deque([], maxlen=num_history_states)
self.fill_frame_buffer=True
self.episode_rew=0
self.episode_len=0
self.bossHpLastFrame=LUDEX_BASE_HP
self.bossAnimationLastFrame='??'
self.bossAnimationFrameCount=0
self.charHpLastFrame=HERO_BASE_HP
self.charAnimationLastFrame='??'
self.charAnimationFrameCount=0
self.timesincecharacterattack=0
self.timesincebossattack=0
self.timesincebosslosthp=0
self.timesinceherolosthp=0
self.timesinceheroparry=0
self.numEstusLastFrame=0
self.info={}
for _ in range(num_prev_animations):
self.prev_boss_animations.append(0)
self.prev_char_animations.append(0)
def unpause_wrapper(self):
if(self.paused):
PressAndFastRelease(U)
self.paused=False
def pause_wrapper(self):
PressAndRelease(P)
self.paused=True
def speed_up_wrapper(self):
PressAndRelease(I)
def normal_speed_wrapper(self):
PressAndFastRelease(U)
def notresponding(self,name):
#os.system('tasklist /FI "IMAGENAME eq %s" /FI "STATUS eq not responding" > tmp.txt' % name)
#x = subprocess.check_output()
a = subprocess.Popen('tasklist /FI "IMAGENAME eq %s" /FI "STATUS eq running"' % name,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
a=a.communicate()[0].decode("utf-8")
b = subprocess.Popen('tasklist /FI "IMAGENAME eq WerFault.exe" /FI "STATUS eq running"',stdout=subprocess.PIPE, stderr=subprocess.PIPE)
b=b.communicate()[0].decode("utf-8")
c = subprocess.Popen('tasklist /FI "IMAGENAME eq %s" /FI "STATUS ne running"' % name,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
c=c.communicate()[0].decode("utf-8")
#tmp.close()
if c.split("\n")[-2].startswith(name) or "INFO:" not in b:
return True
elif a.split("\n")[-2].startswith(name):
return False
else:
return True
def setDsInFocus(self):
self.releaseAll()
w=WindowMgr()
w.find_window_wildcard(".*ARK SOULS.*")
try:
w.set_foreground()
except:
print("Had issues setting to foreground")
def spawnCheckRespondingThread(self):
thread = threading.Thread(target=self.CheckAndHandleNotResponding, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def window_exists(self,window_name):
try:
win32ui.FindWindow(None, window_name)
return True
except win32ui.error:
return False
def CheckAndHandleNotResponding(self):
while True:
#Cheat engine might not be responding if it fails to attach debugger
if(self.notresponding("DarkSoulsIII.exe") or self.window_exists("Error") or self.notresponding("cheatengine-x86_64.exe") or self.window_exists("Lua Engine")):
with not_responding_lock:
self.releaseAll()
print("Game not responding, waiting 5 seconds until restart")
PressAndRelease(U)
time.sleep(5)
if (self.notresponding("DarkSoulsIII.exe")or self.window_exists("Error") or self.notresponding("cheatengine-x86_64.exe") or self.window_exists("Lua Engine")):
self.kill_processes()
os.system('".\\DarkSoulsIII.CT"')
time.sleep(5)
os.system('"'+DARKSOULSDIR+'"')
w=WindowMgr()
time.sleep(40)
PressAndRelease(T)
PressAndRelease(I)
w.find_window_wildcard(".*ARK SOULS.*")
iter=0
print("Spamming E to get into game",iter)
while iter<1000:
try:
w.set_foreground()
except:
print("Had issues setting to foreground")
PressAndFastRelease(E)
iter+=1
stateDict=self.readState()
if(stateDict[areaKey]==BONFIREAREA):
break #we are in game
time.sleep(5)
print("Assuming in game now")
PressAndRelease(T)
ReleaseKey(E)
time.sleep(5)
def teleToBoss(self):
self.setDsInFocus()
time.sleep(5)
for i in range(50):
self.waitForUnpause()
self.check_responding_lock()
PressAndRelease(F1)
PressAndRelease(U)#Normal speed
PressAndRelease(E)
PressAndRelease(E)#Twice, bloodstain can be at entrance
time.sleep(2)
#Check whether we have entered boss area
stateDict=self.readState()
if(stateDict[areaKey]==BOSSAREA):
PressAndRelease(F2)
PressAndRelease(Q)
PressAndFastRelease(T)
break
elif i%20:
print("Tried 20 times, killing self and resetting boss")
PressAndRelease(F3)
time.sleep(20)
else: #For loop else, not if else
#didn't get to boss area in many tries, commit sudoku and kill both processes
PressAndRelease(F3)
print("Couldn't get to boss in 50 tries, something wrong, killing processes as well")
self.kill_processes()
def kill_or_wait(self,start_read):
elapsed = int(time.time() - start_read)
max_wait_time = 30
print("waiting for loading screen", elapsed, " of max", max_wait_time)
if elapsed >= max_wait_time:
self.kill_processes()
# wait for restart thread to pick it up, then wait for lock
time.sleep(10)
self.check_responding_lock()
else:
time.sleep(1)
def readState(self):
hasRead=False
start_read=time.time()
while (hasRead==False):
self.logfile.seek(0)
try:
loglines = self.logfile.readline()
except:
print("Couldn't read from file, will retry")
continue
if not loglines or len(loglines.split(";;"))<22:
continue
stateDict= {}
for line in loglines.split(";;"):
try:
(key,val) = line.split("::")
stateDict[key]=val
except:
print("Had issues reading state, will try again")
break
else:
hasRead = True
return stateDict
def reset(self):
self.setDsInFocus()
self.releaseAll()
self.waitForUnpause()
self.teleToBoss()
self.setDsInFocus()
self.set_initial_state()
return self.step(NO_ACTION)[0]
def waitForUnpause(self):
#Using Global Speed as a pause button (f7 and f8 in cheatengine)
stateDict=self.readState()
if stateDict["Global Speed"]=="0":
self.releaseAll()
print("Script paused as Global Speed is 0, waiting for unpause ....")
while stateDict["Global Speed"]=="0":
time.sleep(1)
stateDict=self.readState()
print("Global Speed is not 0 anymore, script is unpaused")
def can_reset(self):
self.releaseAll()
stateDict=self.readState()
#self.CheckAndHandleNotResponding()
return stateDict[charHpKey] !=0
def kill_processes(self):
os.system("taskkill /f /im DarkSoulsIII.exe /T")
# also kill cheat engine
os.system("taskkill /f /im cheatengine-x86_64.exe /T")
def check_responding_lock(self):
not_responding_lock.acquire()
not_responding_lock.release()
def step(self,input_actions):
terminal=False
reward=0.0
self.unpause_wrapper()
#Check if able to take not responding lock
self.check_responding_lock()
self.ensure_framerate()
stateDict = self.readState()
#Check if we died
if(stateDict[charHpKey]=="0" or stateDict[areaKey]==BONFIREAREA or stateDict[areaKey]=="??"):
#Unpause game and wait for hp>0
self.releaseAll()
PressAndRelease(U)
terminal=True
reward=-1
#Check if we killed the boss or missing boss into
elif stateDict[bossHpKey]=="0" or stateDict[bossHpKey]=="??":
self.releaseAll()
if stateDict[bossHpKey]=="0":
terminal=True
print("killed boss")
PressAndRelease(G)
PressAndRelease(E)
time.sleep(5)
reward=1
PressAndRelease(U)
PressAndRelease(F3)
#Check if lost target on boss
elif stateDict["targetLock"]=="0":
print("Lost target, retargeting until i die")
while stateDict["targetLock"]=="0" and stateDict[charHpKey]!="0":
self.releaseAll()
PressAndFastRelease(Q)
stateDict=self.readState()
#Input action
self.handleAction(input_actions)
if stateDict[bossHpKey]!="??" and self.bossHpLastFrame>int(stateDict[bossHpKey]):
hpdiff=self.bossHpLastFrame-int(stateDict[bossHpKey])
reward+=(hpdiff/LUDEX_BASE_HP)*HEALTH_REWARD_MULTIPLIER
self.timesincebosslosthp=0
else:
self.timesincebosslosthp = self.timesincebosslosthp+1
if self.timesincebosslosthp > TIMESTEPS_DEFENSIVE_BEHAVIOR:
print("Agent is playing too defensively, negative reward")
reward -=DEFENSIVE_BEHAVIOR_NEGATIVE_REWARD
#If our hp is different from last frame, can result in reward if char got healed
if stateDict[charHpKey]!="??" and int(stateDict[charHpKey])!=int(self.charHpLastFrame):
hpdiff=int(self.charHpLastFrame)-int(stateDict[charHpKey])
reward-=hpdiff/HERO_BASE_HP
self.timesinceherolosthp=0
else:
self.timesinceherolosthp+=1
if self.bossAnimationLastFrame!=parryAnimationName and stateDict['targetAnimationName']==parryAnimationName:
reward+=PARRY_REWARD
print("Got reward for parrying, last animation: ", self.bossAnimationLastFrame, " current animation: ", stateDict['targetAnimationName'])
self.timesinceheroparry=0
else:
self.timesinceheroparry+=1
#Keep hero close to boss and incentivise being alive
if self.calc_dist(stateDict) < REWARD_DISTANCE:
reward+=0.001
else:
reward-=0.001
#penalize using estus to prevent spam
numEstus=self.parseStateDictValue(stateDict,"numEstus")
if (self.numEstusLastFrame>numEstus):
#penalize using estus to prevent spam
#also prevents estus being used above ~80%life
reward-=ESTUS_NEGATIVE_REWARD
self.numEstusLastFrame=numEstus
if stateDict[bossHpKey]!="??":
self.bossHpLastFrame=int(stateDict[bossHpKey])
if stateDict[charHpKey]!="??":
self.charHpLastFrame=int(stateDict[charHpKey])
if self.bossAnimationLastFrame == stateDict['targetAnimationName']:
self.bossAnimationFrameCount+=1
else:
self.bossAnimationLastFrame=stateDict['targetAnimationName']
if self.bossAnimationLastFrame in self.bossAnimationSet:
self.prev_boss_animations.append(self.bossAnimationSet.index(self.bossAnimationLastFrame))
else:
self.bossAnimationSet.append(self.bossAnimationLastFrame)
self.prev_boss_animations.append(self.bossAnimationSet.index(self.bossAnimationLastFrame))
print(self.bossAnimationLastFrame,"did not exist in bossAnimationList, adding it")
self.bossAnimationFrameCount=0
if self.charAnimationLastFrame == stateDict['heroAnimationName']:
self.charAnimationFrameCount+=1
else:
self.charAnimationLastFrame=stateDict['heroAnimationName']
if self.charAnimationLastFrame in self.charAnimationSet:
self.prev_char_animations.append(self.charAnimationSet.index(self.charAnimationLastFrame))
else:
self.charAnimationSet.append(self.charAnimationLastFrame)
self.prev_char_animations.append(self.charAnimationSet.index(self.charAnimationLastFrame))
print(stateDict['heroAnimationName'],"did not exist in heroAnimationList adding it")
self.charAnimationFrameCount=0
if "Attack" in stateDict['targetAnimationName']:
self.timesincebossattack=0
else:
self.timesincebossattack+=1
stateDict["reward"]=reward
self.add_state(input_actions,stateDict)
self.episode_len+=1
self.episode_rew+=reward
if terminal:
self.releaseAll()
self.info={'episode':{'r':self.episode_rew,'l':self.episode_len,'kill':stateDict[bossHpKey]=="0",'bosshp':self.bossHpLastFrame}}
#Save shadowplay recording
if(self.episode_rew>self.best_so_far and SAVE_PROGRESS_SHADOWPLAY):
print("Saving shadowplay because of best ep rew>best so far")
print("Episode rew:",self.episode_rew)
print("Best episode rew:",self.best_so_far)
PressAndFastRelease(F11)
self.best_so_far=self.episode_rew
if(stateDict[bossHpKey]=="0" and SAVE_KILLS_SHADOWPLAY):
print("Saving shadowplay as boss was killed")
PressAndFastRelease(F11)
self.episode_rew=0
self.episode_len=0
self.fill_frame_buffer=True #Fill buffer next time, if we died
PressAndRelease(I) #speed up when dead
return np.hstack(self.prev_state), reward, terminal, self.info
def releaseAll(self):
ReleaseKeys([P,W,A,S,D,E,R,SPACE,NUM1,NUM2,NUM4])
def handleAction(self,input_actions):
self.releasePreviousActions(self.prev_input_actions,input_actions)
if input_actions[0] == 1:
PressKey(W)
if input_actions[0] == 2:
PressKey(A)
if input_actions[0] == 3:
PressKey(S)
if input_actions[0] == 4:
PressKey(D)
if input_actions[1] == 1:
PressKey(SPACE)
if input_actions[1] == 2:
self.timesincecharacterattack=0
PressKey(NUM1)
else:
self.timesincecharacterattack+=1
if input_actions[1] == 3:
PressKey(NUM2)
if input_actions[1] == 4:
PressKey(NUM4)
if input_actions[1] == 5:
if self.numEstusLastFrame == 0:
pass
else:
PressKey(R)
self.prev_input_actions=input_actions
def releasePreviousActions(self, prevaction, curaction):
keys = []
if prevaction[0] != curaction[0]:
if prevaction[0] ==1:
keys.append(W)
if prevaction[0] ==2:
keys.append(A)
if prevaction[0] ==3:
keys.append(S)
if prevaction[0] ==4:
keys.append(D)
if prevaction[1] != curaction[1]:
if prevaction[1] ==1:
keys.append(SPACE)
if prevaction[1] ==2:
keys.append(NUM1)
if prevaction[1] ==3:
keys.append(NUM2)
if prevaction[1] ==4:
keys.append(NUM4)
if prevaction[1] ==5:
keys.append(R)
ReleaseKeys(keys)
#Function makes it possible to hold key pressed, valuable for blocking or moving
def releaseAllExcept(self, action):
#Always release attack key and parry key. Holding attack key does not make sense
keys=[P,E,NUM1,NUM4,R]
if action[0] !=1:
keys.append(W)
if action[0] !=2:
keys.append(A)
if action[0] !=3:
keys.append(S)
if action[0] !=4:
keys.append(D)
if action[1] !=1:
keys.append(SPACE)
if action[1] !=2:
keys.append(NUM1)
if action[1] !=3:
keys.append(NUM2)
if action[1] !=4:
keys.append(NUM4)
ReleaseKeys(keys)
def ensure_framerate(self):
global start_time
# Sleep to ensure consistency in frames
if start_time != -1:
elapsed = time.time() - start_time
timeToSleep = FRAME_DIFF - elapsed
if timeToSleep > 0:
time.sleep(timeToSleep)
#print("New elapsed ",time.time()-start_time)
else:
print("Didn't sleep")
start_time = time.time()
def parseStateDictValue(self,stateDict,key):
if (stateDict[key]=="??" or stateDict[key]==""):
return 0
else:
try:
return float(stateDict[key].replace(",","."))
except:
print("Couldn't transform value to float for key: ",key, "using 0 instead")
return 0
def calc_dist(self,stateDict):
targetx=self.parseStateDictValue(stateDict,"targetedEntityX")
targety=self.parseStateDictValue(stateDict,"targetedEntityY")
herox=self.parseStateDictValue(stateDict,"heroX")
heroy=self.parseStateDictValue(stateDict,"heroY")
return math.sqrt((targetx-herox)**2+(targety-heroy)**2)
def print_state_dict(self,stateDict):
_ = system('cls')
for k in stateDict:
print (k,stateDict[k])
def save_state(self,save_path):
with open(save_path+"envstate.pkl", "wb") as file_handler:
objtosave={}
objtosave["bossAnimation"]= self.bossAnimationSet
objtosave["charAnimation"]= self.charAnimationSet
pickle.dump(objtosave, file_handler)
def load_state(self,load_path):
with open(load_path.replace(".zip","envstate.pkl"), "rb") as file_handler:
loadedobj = pickle.load(file_handler)
self.bossAnimationSet=loadedobj["bossAnimation"]
self.charAnimationSet=loadedobj["charAnimation"]
def add_state(self,action_to_add,stateDict):
targetX=self.parseStateDictValue(stateDict,"targetedEntityX")
targetY=self.parseStateDictValue(stateDict,"targetedEntityY")
heroX=self.parseStateDictValue(stateDict,"heroX")
heroY=self.parseStateDictValue(stateDict,"heroY")
stateToAdd=np.zeros(num_state_scalars)
stateToAdd[action_to_add[0]]=1
stateToAdd[action_to_add[1]+5]=1
stateToAdd[12]=self.parseStateDictValue(stateDict,"targetedEntityHp")
stateToAdd[13]=targetX
stateToAdd[14]=targetY
stateToAdd[15]=self.parseStateDictValue(stateDict,"targetedEntityZ")
targetAngle=self.parseStateDictValue(stateDict,"targetedEntityAngle")
stateToAdd[16]=targetAngle
stateToAdd[17]=self.parseStateDictValue(stateDict,"targetAttack1")
stateToAdd[18]=float(self.parseStateDictValue(stateDict,"targetAttack2"))
stateToAdd[19]=self.parseStateDictValue(stateDict,"targetMovement1")
stateToAdd[20]=self.parseStateDictValue(stateDict,"targetMovement2")
stateToAdd[21]=self.parseStateDictValue(stateDict,"targetComboAttack")
stateToAdd[22]=self.parseStateDictValue(stateDict,"heroHp")
stateToAdd[23]=heroX
stateToAdd[24]=heroY
dist=self.calc_dist(stateDict)
stateToAdd[25]=dist
heroAngle=self.parseStateDictValue(stateDict,"heroAngle")
stateToAdd[26]=heroAngle
stateToAdd[27]=self.parseStateDictValue(stateDict,"heroSp")
stateToAdd[28]=stateDict["reward"]
stateToAdd[29]=stateDict["HeroAnimationCounter"]
stateToAdd[30]=self.timesincecharacterattack
stateToAdd[31]=self.timesincebossattack
stateToAdd[32]=stateDict["BossAnimationCounter"]
estus=self.parseStateDictValue(stateDict,"numEstus")
stateToAdd[33]=estus
if estus>0:
stateToAdd[34]=1
stateToAdd[35]=math.sin(heroAngle)
stateToAdd[36]=math.cos(heroAngle)
stateToAdd[37]=math.sin(targetAngle)
stateToAdd[38]=math.cos(targetAngle)
stateToAdd[39]=heroX-targetX
stateToAdd[39]=heroY-targetY
stateToAdd[40]=self.timesincebosslosthp
stateToAdd[41]=stateDict["BossAnimationCounter"]
stateToAdd[42]=stateDict["HeroAnimationCounter"]
stateToAdd[43]=self.timesinceherolosthp
stateToAdd[44]=self.timesinceheroparry
charAnimationStartIndex=45
#Allow for 100 char animations and 100 boss animations
charAnimationLength=100
bossAnimationStartIndex= charAnimationStartIndex+charAnimationLength
#One hot encode prev and current animations
#charAnimationIndex=charAnimationStartIndex+self.prev_char_animations[0]
#stateToAdd[charAnimationIndex]=1
#bossAnimationIndex=bossAnimationStartIndex+self.prev_boss_animations[0]
#stateToAdd[bossAnimationIndex]=1
#Allow for 128 char animations and 128 boss animations
bossAnimationAsBinary = bin_array(self.prev_boss_animations[0],7)
charAnimationAsBinary = bin_array(self.prev_char_animations[0],7)
for i in range(7):
stateToAdd[charAnimationStartIndex+i]=bossAnimationAsBinary[i]
for i in range(7):
stateToAdd[charAnimationStartIndex+7+i]=charAnimationAsBinary[i]
if self.fill_frame_buffer:
for _ in range(num_history_states):
self.prev_state.append(stateToAdd)
self.fill_frame_buffer = False
else:
self.prev_state.append(stateToAdd)
def bin_array(num, m):
"""Convert a positive integer num into an m-bit bit vector"""
return np.array(list(np.binary_repr(num).zfill(m))).astype(np.int8)
|
tunnel.py
|
"""Code for IP tunnel over a mesh
# Note python-pytuntap was too buggy
# using pip3 install pytap2
# make sure to "sudo setcap cap_net_admin+eip /usr/bin/python3.8" so python can access tun device without being root
# sudo ip tuntap del mode tun tun0
# sudo bin/run.sh --port /dev/ttyUSB0 --setch-shortfast
# sudo bin/run.sh --port /dev/ttyUSB0 --tunnel --debug
# ssh -Y root@192.168.10.151 (or dietpi), default password p
# ncat -e /bin/cat -k -u -l 1235
# ncat -u 10.115.64.152 1235
# ping -c 1 -W 20 10.115.64.152
# ping -i 30 -W 30 10.115.64.152
# FIXME: use a more optimal MTU
"""
import logging
import threading
import platform
from pubsub import pub
from pytap2 import TapDevice
from . import portnums_pb2
from .util import ipstr, readnet_u16
from .globals import Globals
def onTunnelReceive(packet, interface):
"""Callback for received tunneled messages from mesh."""
logging.debug(f'in onTunnelReceive()')
our_globals = Globals.getInstance()
tunnelInstance = our_globals.get_tunnelInstance()
tunnelInstance.onReceive(packet)
class Tunnel:
"""A TUN based IP tunnel over meshtastic"""
def __init__(self, iface, subnet='10.115', netmask="255.255.0.0"):
"""
Constructor
iface is the already open MeshInterface instance
subnet is used to construct our network number (normally 10.115.x.x)
"""
if not iface:
raise Exception("Tunnel() must have a interface")
self.iface = iface
self.subnetPrefix = subnet
if platform.system() != 'Linux':
raise Exception("Tunnel() can only be run instantiated on a Linux system")
our_globals = Globals.getInstance()
our_globals.set_tunnelInstance(self)
"""A list of chatty UDP services we should never accidentally
forward to our slow network"""
self.udpBlacklist = {
1900, # SSDP
5353, # multicast DNS
}
"""A list of TCP services to block"""
self.tcpBlacklist = {
5900, # VNC (Note: Only adding for testing purposes.)
}
"""A list of protocols we ignore"""
self.protocolBlacklist = {
0x02, # IGMP
0x80, # Service-Specific Connection-Oriented Protocol in a Multilink and Connectionless Environment
}
# A new non standard log level that is lower level than DEBUG
self.LOG_TRACE = 5
# TODO: check if root?
logging.info("Starting IP to mesh tunnel (you must be root for this *pre-alpha* "\
"feature to work). Mesh members:")
pub.subscribe(onTunnelReceive, "meshtastic.receive.data.IP_TUNNEL_APP")
myAddr = self._nodeNumToIp(self.iface.myInfo.my_node_num)
if self.iface.nodes:
for node in self.iface.nodes.values():
nodeId = node["user"]["id"]
ip = self._nodeNumToIp(node["num"])
logging.info(f"Node { nodeId } has IP address { ip }")
logging.debug("creating TUN device with MTU=200")
# FIXME - figure out real max MTU, it should be 240 - the overhead bytes for SubPacket and Data
self.tun = None
if self.iface.noProto:
logging.warning(f"Not creating a TapDevice() because it is disabled by noProto")
else:
self.tun = TapDevice(name="mesh")
self.tun.up()
self.tun.ifconfig(address=myAddr, netmask=netmask, mtu=200)
self._rxThread = None
if self.iface.noProto:
logging.warning(f"Not starting TUN reader because it is disabled by noProto")
else:
logging.debug(f"starting TUN reader, our IP address is {myAddr}")
self._rxThread = threading.Thread(target=self.__tunReader, args=(), daemon=True)
self._rxThread.start()
def onReceive(self, packet):
"""onReceive"""
p = packet["decoded"]["payload"]
if packet["from"] == self.iface.myInfo.my_node_num:
logging.debug("Ignoring message we sent")
else:
logging.debug(f"Received mesh tunnel message type={type(p)} len={len(p)}")
# we don't really need to check for filtering here (sender should have checked),
# but this provides useful debug printing on types of packets received
if not self.iface.noProto:
if not self._shouldFilterPacket(p):
self.tun.write(p)
def _shouldFilterPacket(self, p):
"""Given a packet, decode it and return true if it should be ignored"""
protocol = p[8 + 1]
srcaddr = p[12:16]
destAddr = p[16:20]
subheader = 20
ignore = False # Assume we will be forwarding the packet
if protocol in self.protocolBlacklist:
ignore = True
logging.log(self.LOG_TRACE, f"Ignoring blacklisted protocol 0x{protocol:02x}")
elif protocol == 0x01: # ICMP
icmpType = p[20]
icmpCode = p[21]
checksum = p[22:24]
# pylint: disable=line-too-long
logging.debug(f"forwarding ICMP message src={ipstr(srcaddr)}, dest={ipstr(destAddr)}, type={icmpType}, code={icmpCode}, checksum={checksum}")
# reply to pings (swap src and dest but keep rest of packet unchanged)
#pingback = p[:12]+p[16:20]+p[12:16]+p[20:]
# tap.write(pingback)
elif protocol == 0x11: # UDP
srcport = readnet_u16(p, subheader)
destport = readnet_u16(p, subheader + 2)
if destport in self.udpBlacklist:
ignore = True
logging.log(self.LOG_TRACE, f"ignoring blacklisted UDP port {destport}")
else:
logging.debug(f"forwarding udp srcport={srcport}, destport={destport}")
elif protocol == 0x06: # TCP
srcport = readnet_u16(p, subheader)
destport = readnet_u16(p, subheader + 2)
if destport in self.tcpBlacklist:
ignore = True
logging.log(self.LOG_TRACE, f"ignoring blacklisted TCP port {destport}")
else:
logging.debug(f"forwarding tcp srcport={srcport}, destport={destport}")
else:
logging.warning(f"forwarding unexpected protocol 0x{protocol:02x}, "\
"src={ipstr(srcaddr)}, dest={ipstr(destAddr)}")
return ignore
def __tunReader(self):
tap = self.tun
logging.debug("TUN reader running")
while True:
p = tap.read()
#logging.debug(f"IP packet received on TUN interface, type={type(p)}")
destAddr = p[16:20]
if not self._shouldFilterPacket(p):
self.sendPacket(destAddr, p)
def _ipToNodeId(self, ipAddr):
# We only consider the last 16 bits of the nodenum for IP address matching
ipBits = ipAddr[2] * 256 + ipAddr[3]
if ipBits == 0xffff:
return "^all"
for node in self.iface.nodes.values():
nodeNum = node["num"] & 0xffff
# logging.debug(f"Considering nodenum 0x{nodeNum:x} for ipBits 0x{ipBits:x}")
if (nodeNum) == ipBits:
return node["user"]["id"]
return None
def _nodeNumToIp(self, nodeNum):
return f"{self.subnetPrefix}.{(nodeNum >> 8) & 0xff}.{nodeNum & 0xff}"
def sendPacket(self, destAddr, p):
"""Forward the provided IP packet into the mesh"""
nodeId = self._ipToNodeId(destAddr)
if nodeId is not None:
logging.debug(f"Forwarding packet bytelen={len(p)} dest={ipstr(destAddr)}, destNode={nodeId}")
self.iface.sendData(
p, nodeId, portnums_pb2.IP_TUNNEL_APP, wantAck=False)
else:
logging.warning(f"Dropping packet because no node found for destIP={ipstr(destAddr)}")
def close(self):
"""Close"""
self.tun.close()
|
multi_process_runner_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from six.moves import queue as Queue
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data(test_obj, val):
test_obj.assertEqual(val, 3)
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1),
args=(self, 3))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_15_sec():
time.sleep(5)
print('foo', flush=True)
time.sleep(20)
print('bar', flush=True)
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_15_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=15)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_signal_doesnt_fire_after_process_exits(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_does_nothing,
multi_worker_test_base.create_cluster_spec(num_workers=1),
max_run_time=10)
mpr.start()
mpr.join()
with self.assertRaisesRegexp(Queue.Empty, ''):
# If the signal was fired, another message would be added to internal
# queue, so verifying it's empty.
multi_process_runner._resource(
multi_process_runner.PROCESS_STATUS_QUEUE).get(block=False)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def follow_ups():
mpr.start_single_process(task_type='evaluator', task_id=0)
threading.Thread(target=follow_ups).start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
if __name__ == '__main__':
multi_process_runner.test_main()
|
di_lan.py
|
import paramiko,time,datetime, re, os
import threading
from threading import Thread, Lock
_db_lock = Lock()
import sys
import time
ssh1 = paramiko.SSHClient()
ssh1.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh2 = paramiko.SSHClient()
ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh3 = paramiko.SSHClient()
ssh3.set_missing_host_key_policy(paramiko.AutoAddPolicy())
nodeList = ['10.205.62.4', '10.205.67.4', '10.205.57.4']
sshList = [ssh1,ssh2,ssh3]
threads = []
username = 'cisco.candresa'
password = 'password*1'
def login(node, ssh):
#print 'logged in'
logfile1 = open('logfile1-' + node + '.txt', 'a+')
logfile2 = open('logfile2-' + node + '.txt', 'a+')
count = 0
starttime = time.time()
while count < 24:
ssh.connect(node, username=username, password=password)
chan = ssh.invoke_shell()
resp = chan.recv(10000)
#print resp
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
#print 'R1',resp
chan.send('cli test-commands password boxer\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
#print 'R2',resp
chan.send('show messenger usage table all\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
logfile1.write(resp.decode('utf-8'))
logfile1.write('\n')
chan.send('show cloud monitor di-network detail\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
logfile2.write(resp.decode('utf-8'))
logfile2.write('\n')
chan.send('show cloud performance dinet pps\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
logfile2.write(resp.decode('utf-8'))
logfile2.write('\n')
chan.send('show cloud performance port\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
logfile2.write(resp.decode('utf-8'))
logfile2.write('\n')
chan.send('show iftask stats summary\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
logfile2.write(resp.decode('utf-8'))
logfile2.write('\n')
chan.send('show cpu info verbose\n')
resp = ''
while '>' not in str(resp):
#chan.send('\n')
resp = chan.recv(10000)
logfile2.write(resp.decode('utf-8'))
logfile2.write('\n')
chan.send('exit\n')
count = count + 1
if count == 24 :
break
time.sleep(3600.0 - ((time.time() - starttime) % 3600.0))
logfile1.close()
logfile2.close()
for node,ssh_ele in zip(nodeList,sshList):
t = threading.Thread(target = login, args=(node,ssh_ele,))
t.start()
threads.append(t)
for t in threads:
t.join()
|
SpotifyLyrics.pyw
|
#!/usr/bin/env python3
import sys
import webbrowser
import sentry_sdk
import configparser
import os
import re
import subprocess
import threading
import time
import pathvalidate
import pylrc
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QSystemTrayIcon, QAction, QMenu, qApp, QMessageBox
import backend
from services import SETTINGS_DIR, LYRICS_DIR
if os.name == "nt":
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("spotifylyrics.version1")
class Communicate(QtCore.QObject):
signal = QtCore.pyqtSignal(str, str)
class LyricsTextBrowserWidget(QtWidgets.QTextBrowser):
wheelSignal = QtCore.pyqtSignal()
def wheelEvent(self, e):
try:
modifiers = e.modifiers()
if modifiers == QtCore.Qt.ControlModifier:
num_pixels = e.pixelDelta()
num_degrees = e.angleDelta()
factor = 1
if not num_pixels.isNull():
sign = 1 if num_pixels.y() > 0 else -1
UI.change_fontsize(sign * factor)
elif not num_degrees.isNull():
sign = 1 if num_degrees.y() > 0 else -1
UI.change_fontsize(sign * factor)
else:
super(QtWidgets.QTextBrowser, self).wheelEvent(e)
except:
pass
BRACKETS = re.compile(r'\[.+?\]')
HTML_TAGS = re.compile(r'<.+?>')
class UiForm:
sync = False
ontop = False
open_spotify = False
changed = False
dark_theme = False
info = False
minimize_to_tray = False
tray_icon = None
streaming_services = [backend.SpotifyStreamingService(), backend.VlcMediaPlayer(), backend.TidalStreamingService()]
def __init__(self):
self.lyrics = ""
self.timed = False
self.is_loading_settings = False
self.comm = Communicate()
self.comm.signal.connect(self.refresh_lyrics)
FORM.setObjectName("Form")
FORM.resize(550, 610)
FORM.setMinimumSize(QtCore.QSize(350, 310))
self.grid_layout_2 = QtWidgets.QGridLayout(FORM)
self.grid_layout_2.setObjectName("gridLayout_2")
self.vertical_layout_2 = QtWidgets.QVBoxLayout()
self.vertical_layout_2.setObjectName("verticalLayout_2")
self.horizontal_layout_2 = QtWidgets.QHBoxLayout()
self.horizontal_layout_2.setObjectName("horizontalLayout_2")
self.horizontal_layout_1 = QtWidgets.QHBoxLayout()
self.horizontal_layout_1.setObjectName("horizontalLayout_1")
self.label_song_name = QtWidgets.QLabel(FORM)
self.label_song_name.setObjectName("label_song_name")
self.label_song_name.setOpenExternalLinks(True)
self.horizontal_layout_2.addWidget(self.label_song_name, 0, QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
spacer_item = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontal_layout_2.addItem(spacer_item)
self.streaming_services_box = QtWidgets.QComboBox(FORM)
self.streaming_services_box.setGeometry(QtCore.QRect(160, 120, 69, 22))
self.streaming_services_box.addItems(str(n) for n in self.streaming_services)
self.streaming_services_box.setCurrentIndex(0)
self.streaming_services_box.currentIndexChanged.connect(self.options_changed)
self.horizontal_layout_2.addWidget(self.streaming_services_box, 0,
QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.change_lyrics_button = QtWidgets.QPushButton(FORM)
self.change_lyrics_button.setObjectName("pushButton")
self.change_lyrics_button.setText("Change Lyrics")
self.change_lyrics_button.clicked.connect(self.change_lyrics)
self.horizontal_layout_2.addWidget(self.change_lyrics_button, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.save_button = QtWidgets.QPushButton(FORM)
self.save_button.setObjectName("saveButton")
self.save_button.setText("Save Lyrics")
self.save_button.clicked.connect(self.save_lyrics)
self.horizontal_layout_2.addWidget(self.save_button, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
# Open Tab Button
self.chords_button = QtWidgets.QPushButton(FORM)
self.chords_button.setObjectName("chordsButton")
self.chords_button.setText("Chords")
self.chords_button.clicked.connect(self.get_chords)
self.horizontal_layout_2.addWidget(self.chords_button, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.options_combobox = QtWidgets.QComboBox(FORM)
self.options_combobox.setGeometry(QtCore.QRect(160, 120, 69, 22))
self.options_combobox.setObjectName("comboBox")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.tray_icon = QSystemTrayIcon(FORM)
self.tray_icon.setIcon(QtGui.QIcon(self.get_resource_path('icon.png')))
show_action = QAction("Show", FORM)
quit_action = QAction("Exit", FORM)
show_action.triggered.connect(FORM.show)
quit_action.triggered.connect(qApp.quit)
tray_menu = QMenu()
tray_menu.addAction(show_action)
tray_menu.addAction(quit_action)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
self.tray_icon.activated.connect(FORM.icon_activated)
if os.name == "nt":
self.options_combobox.addItem("")
self.horizontal_layout_2.addWidget(self.options_combobox, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.font_size_box = QtWidgets.QSpinBox(FORM)
self.font_size_box.setMinimum(1)
self.font_size_box.setProperty("value", 10)
self.font_size_box.setObjectName("fontBox")
self.horizontal_layout_2.addWidget(self.font_size_box, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.vertical_layout_2.addLayout(self.horizontal_layout_2)
self.sync_adjustment_slider = QtWidgets.QSlider(FORM)
self.sync_adjustment_slider.setInvertedAppearance(True)
self.sync_adjustment_slider.setMinimum(-60)
self.sync_adjustment_slider.setMaximum(60)
self.sync_adjustment_slider.setSingleStep(1)
self.sync_adjustment_slider.setToolTipDuration(5000)
self.sync_adjustment_slider.setFixedWidth(25)
self.sync_adjustment_slider.valueChanged.connect(self.changed_slider)
self.sync_adjustment_slider.setValue(0)
self.horizontal_layout_1.addWidget(self.sync_adjustment_slider)
self.text_browser = LyricsTextBrowserWidget(FORM)
self.text_browser.setObjectName("textBrowser")
self.text_browser.setAcceptRichText(True)
self.text_browser.setStyleSheet("font-size: %spt;" % self.font_size_box.value() * 2)
self.text_browser.setFontPointSize(self.font_size_box.value())
self.horizontal_layout_1.addWidget(self.text_browser)
self.info_table = QtWidgets.QTableWidget(FORM)
self.info_table.setStyleSheet("font-size: %spt;" % self.font_size_box.value() * 2)
self.info_table.setColumnCount(2)
self.info_table.setMaximumWidth(300)
self.info_table.verticalHeader().setVisible(False)
self.info_table.horizontalHeader().setVisible(False)
self.info_table.horizontalHeader().setStretchLastSection(True)
self.info_table.setVisible(False)
self.horizontal_layout_1.addWidget(self.info_table)
self.vertical_layout_2.addLayout(self.horizontal_layout_1)
self.grid_layout_2.addLayout(self.vertical_layout_2, 2, 0, 1, 1)
self.retranslate_ui(FORM)
self.font_size_box.valueChanged.connect(self.update_fontsize)
self.options_combobox.currentIndexChanged.connect(self.options_changed)
QtCore.QMetaObject.connectSlotsByName(FORM)
FORM.setTabOrder(self.text_browser, self.options_combobox)
FORM.setTabOrder(self.options_combobox, self.font_size_box)
self.set_style()
self.load_save_settings()
self.spotify()
self.start_thread()
self.song = None
def changed_slider(self, value) -> None:
self.sync_adjustment_slider.setToolTip("%d seconds shifted" % value)
def streaming_service_changed(self) -> None:
self.spotify()
self.load_save_settings(save=True)
def get_current_streaming_service(self) -> backend.StreamingService:
return self.streaming_services[self.streaming_services_box.currentIndex()]
def load_save_settings(self, save=False) -> None:
if self.is_loading_settings:
return
settings_file = SETTINGS_DIR + "settings.ini"
section = "settings"
if not os.path.exists(settings_file):
directory = os.path.dirname(settings_file)
if not os.path.exists(directory):
os.makedirs(directory)
config = configparser.ConfigParser(strict=False)
if not save:
self.is_loading_settings = True
config.read(settings_file)
self.sync = config.getboolean(section, "syncedlyrics", fallback=False)
self.ontop = config.getboolean(section, "alwaysontop", fallback=False)
self.open_spotify = config.getboolean(section, "openspotify", fallback=False)
self.dark_theme = config.getboolean(section, "darktheme", fallback=False)
self.info = config.getboolean(section, "info", fallback=False)
self.minimize_to_tray = config.getboolean(section, "minimizetotray", fallback=False)
self.font_size_box.setValue(config.getint(section, "fontsize", fallback=10))
streaming_service_name = config.get(section, "StreamingService", fallback=None)
if streaming_service_name:
for i in range(len(self.streaming_services)):
if str(self.streaming_services[i]) == streaming_service_name:
self.streaming_services_box.setCurrentIndex(i)
break
FORM.move(config.getint(section, "X", fallback=FORM.pos().x()),
config.getint(section, "Y", fallback=FORM.pos().y()))
if config.getboolean(section, "FullScreen", fallback=False):
FORM.showFullScreen()
elif config.getboolean(section, "Maximized", fallback=False):
FORM.showMaximized()
else:
FORM.resize(config.getint(section, "Width", fallback=FORM.width().real),
config.getint(section, "Height", fallback=FORM.height().real))
if config.getboolean(section, "disableErrorReporting", fallback=False):
self.disableErrorReporting = True
sentry_sdk.init()
self.options_combobox.setItemText(8, "Error reporting disabled")
else:
self.disableErrorReporting = False
if self.dark_theme:
self.set_dark_theme()
if self.sync:
self.options_combobox.setItemText(2, "Synced Lyrics (on)")
if self.ontop:
FORM.setWindowFlags(FORM.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.options_combobox.setItemText(3, "Always on Top (on)")
FORM.show()
if self.open_spotify:
self.options_combobox.setItemText(4, "Open Spotify (on)")
if self.info:
self.options_combobox.setItemText(5, "Info (on)")
self.info_table.setVisible(True)
if self.minimize_to_tray:
self.options_combobox.setItemText(7, "Minimize to Tray (on)")
else:
config.add_section(section)
config[section]["SyncedLyrics"] = str(self.sync)
config[section]["AlwaysOnTop"] = str(self.ontop)
config[section]["OpenSpotify"] = str(self.open_spotify)
config[section]["DarkTheme"] = str(self.dark_theme)
config[section]["Info"] = str(self.info)
config[section]["MinimizeToTray"] = str(self.minimize_to_tray)
config[section]["FontSize"] = str(self.font_size_box.value())
config[section]["StreamingService"] = str(self.get_current_streaming_service())
config[section]["FullScreen"] = str(FORM.isFullScreen())
config[section]["Maximized"] = str(FORM.isMaximized())
config[section]["X"] = str(FORM.pos().x())
config[section]["Y"] = str(FORM.pos().y())
config[section]["Width"] = str(FORM.width().real)
config[section]["Height"] = str(FORM.height().real)
config[section]["disableErrorReporting"] = str(self.disableErrorReporting)
with open(settings_file, 'w+') as settings:
config.write(settings)
self.is_loading_settings = False
def options_changed(self) -> None:
current_index = self.options_combobox.currentIndex()
if current_index == 1:
if self.dark_theme is False:
self.set_dark_theme()
else:
self.dark_theme = False
self.text_browser.setStyleSheet("")
self.label_song_name.setStyleSheet("")
self.options_combobox.setStyleSheet("")
self.font_size_box.setStyleSheet("")
self.sync_adjustment_slider.setStyleSheet("")
self.streaming_services_box.setStyleSheet("")
self.change_lyrics_button.setStyleSheet("")
self.save_button.setStyleSheet("")
self.chords_button.setStyleSheet("")
self.info_table.setStyleSheet("")
self.options_combobox.setItemText(1, "Dark Theme")
text = re.sub("color:.*?;", "color: black;", self.label_song_name.text())
self.label_song_name.setText(text)
FORM.setWindowOpacity(1.0)
FORM.setStyleSheet("")
self.set_style()
elif current_index == 2:
if self.sync:
self.options_combobox.setItemText(2, "Synced Lyrics")
else:
self.options_combobox.setItemText(2, "Synced Lyrics (on)")
self.sync = not self.sync
elif current_index == 3:
if self.ontop is False:
FORM.setWindowFlags(FORM.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.options_combobox.setItemText(3, "Always on Top (on)")
FORM.show()
else:
FORM.setWindowFlags(FORM.windowFlags() & ~QtCore.Qt.WindowStaysOnTopHint)
self.options_combobox.setItemText(3, "Always on Top")
FORM.show()
self.ontop = not self.ontop
elif current_index == 4:
if self.open_spotify:
self.options_combobox.setItemText(4, "Open Spotify")
else:
self.spotify()
self.options_combobox.setItemText(4, "Open Spotify (on)")
self.open_spotify = not self.open_spotify
elif current_index == 5:
if self.info:
self.options_combobox.setItemText(5, "Info")
self.info_table.setVisible(False)
else:
self.options_combobox.setItemText(5, "Info (on)")
self.info_table.setVisible(True)
self.info = not self.info
elif current_index == 6:
if os.name == "nt":
subprocess.Popen(r'explorer "' + LYRICS_DIR + '"')
elif current_index == 7:
if self.minimize_to_tray:
self.options_combobox.setItemText(7, "Minimize to System Tray")
else:
self.options_combobox.setItemText(7, "Minimize to System Tray (on)")
self.minimize_to_tray = not self.minimize_to_tray
elif current_index == 8:
if self.disableErrorReporting:
self.options_combobox.setItemText(8, "Disable Error reporting")
else:
self.options_combobox.setItemText(8, "Error Reporting disabled")
self.disableErrorReporting = not self.disableErrorReporting
self.options_combobox.setCurrentIndex(0)
self.load_save_settings(save=True)
def set_style(self):
self.lyrics_text_align = QtCore.Qt.AlignLeft
if os.path.exists(SETTINGS_DIR + "theme.ini"):
theme_file = SETTINGS_DIR + "theme.ini"
else:
theme_file = "theme.ini"
if not os.path.exists(theme_file):
self.label_song_name.setStyleSheet("color: black; text-decoration: underline;")
return
section = "theme"
config = configparser.ConfigParser()
with open(theme_file, 'r') as theme:
config.read_string("[%s]\n%s" % (section, theme.read()))
align = config.get(section, "lyricstextalign", fallback="")
if align:
if align == "center":
self.lyrics_text_align = QtCore.Qt.AlignCenter
elif align == "right":
self.lyrics_text_align = QtCore.Qt.AlignRight
FORM.setWindowOpacity(config.getfloat(section, "windowopacity", fallback=1))
background = config.get(section, "backgroundcolor", fallback="")
if background:
FORM.setStyleSheet("background-color: %s;" % background)
style = self.text_browser.styleSheet()
text_background = config.get(section, "lyricsbackgroundcolor", fallback="")
if text_background:
style = style + "background-color: %s;" % text_background
text_color = config.get(section, "lyricstextcolor", fallback="")
if text_color:
style = style + "color: %s;" % text_color
text_font = config.get(section, "lyricsfont", fallback="")
if text_font:
style = style + "font-family: %s;" % text_font
self.text_browser.setStyleSheet(style)
style = self.label_song_name.styleSheet()
label_color = config.get(section, "songnamecolor", fallback="")
if label_color:
style = style + "color: %s;" % label_color
text = re.sub("color:.*?;", "color: %s;" % label_color, self.label_song_name.text())
self.label_song_name.setText(text)
label_underline = config.getboolean(section, "songnameunderline", fallback=False)
if label_underline:
style = style + "text-decoration: underline;"
self.label_song_name.setStyleSheet(style)
style = self.font_size_box.styleSheet()
font_size_background = config.get(section, "fontboxbackgroundcolor", fallback="")
if font_size_background:
style = style + "background-color: %s;" % font_size_background
font_size_color = config.get(section, "fontboxtextcolor", fallback="")
if font_size_color:
style = style + "color: %s;" % font_size_color
self.streaming_services_box.setStyleSheet(style)
self.options_combobox.setStyleSheet(style)
self.font_size_box.setStyleSheet(style)
self.change_lyrics_button.setStyleSheet(style)
self.save_button.setStyleSheet(style)
self.chords_button.setStyleSheet(style)
def set_dark_theme(self):
self.dark_theme = True
self.text_browser.setStyleSheet("background-color: #181818; color: #ffffff;")
self.label_song_name.setStyleSheet("color: #9c9c9c; text-decoration: underline;")
text = re.sub("color:.*?;", "color: #9c9c9c;", self.label_song_name.text())
self.label_song_name.setText(text)
self.sync_adjustment_slider.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.streaming_services_box.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.options_combobox.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.font_size_box.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.change_lyrics_button.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.save_button.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.chords_button.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.info_table.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.options_combobox.setItemText(1, "Dark Theme (on)")
FORM.setWindowOpacity(1.0)
FORM.setStyleSheet("background-color: #282828;")
@staticmethod
def get_resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def set_lyrics_with_alignment(self, lyrics):
self.text_browser.clear()
for line in lyrics.splitlines():
self.text_browser.append(line)
self.text_browser.setAlignment(self.lyrics_text_align)
def change_fontsize(self, offset):
self.font_size_box.setValue(self.font_size_box.value() + offset)
def update_fontsize(self):
self.text_browser.setFontPointSize(self.font_size_box.value())
style = self.text_browser.styleSheet()
style = style.replace('%s' % style[style.find("font"):style.find("pt;") + 3], '')
style = style.replace('p ', '')
self.text_browser.setStyleSheet(style + "p font-size: %spt;" % self.font_size_box.value() * 2)
lyrics = self.text_browser.toPlainText()
self.set_lyrics_with_alignment(lyrics)
self.load_save_settings(save=True)
def retranslate_ui(self, form):
_translate = QtCore.QCoreApplication.translate
form.setWindowTitle(_translate("Form", "Spotify Lyrics - {}".format(backend.get_version())))
form.setWindowIcon(QtGui.QIcon(self.get_resource_path('icon.png')))
if backend.check_version():
self.label_song_name.setText(_translate("Form", "Spotify Lyrics"))
else:
self.label_song_name.setText(_translate("Form",
"Spotify Lyrics <style type=\"text/css\">a {text-decoration: "
"none}</style><a "
"href=\"https://github.com/SimonIT/spotifylyrics/releases\"><sup>("
"update)</sup></a>"))
update_dialog = QMessageBox()
update_dialog.setWindowIcon(FORM.windowIcon())
update_dialog.setIcon(QMessageBox.Information)
update_dialog.setText("A newer version of SpotifyLyrics is available!")
update_dialog.setInformativeText("Do you want to download the newer version?")
update_dialog.setWindowTitle("Update available")
update_dialog.setStandardButtons(QMessageBox.Open | QMessageBox.Close)
update_result = update_dialog.exec()
if update_result == QMessageBox.Open:
webbrowser.open("https://github.com/SimonIT/spotifylyrics/releases")
self.text_browser.setText(_translate("Form", "Play a song in Spotify to fetch lyrics."))
self.font_size_box.setToolTip(_translate("Form", "Font Size"))
self.options_combobox.setItemText(0, _translate("Form", "Options"))
self.options_combobox.setItemText(1, _translate("Form", "Dark Theme"))
self.options_combobox.setItemText(2, _translate("Form", "Synced Lyrics"))
self.options_combobox.setItemText(3, _translate("Form", "Always on Top"))
self.options_combobox.setItemText(4, _translate("Form", "Open Spotify"))
self.options_combobox.setItemText(5, _translate("Form", "Info"))
if os.name == "nt":
self.options_combobox.setItemText(6, _translate("Form", "Open Lyrics Directory"))
self.options_combobox.setItemText(7, _translate("Form", "Minimize to Tray"))
self.options_combobox.setItemText(8, _translate("Form", "Disable error reporting"))
def add_service_name_to_lyrics(self, lyrics, service_name):
return '''<span style="font-size:%spx; font-style:italic;">Lyrics loaded from: %s</span>\n\n%s''' % (
(self.font_size_box.value() - 2) * 2, service_name, lyrics)
def display_lyrics(self, comm):
old_song_name = ""
while True:
song_name = backend.get_window_title(self.get_current_streaming_service())
if (old_song_name != song_name or self.changed) \
and song_name not in self.get_current_streaming_service().get_not_playing_windows_title():
self.sync_adjustment_slider.setValue(0)
comm.signal.emit(song_name, "Loading...")
if not self.changed:
old_song_name = song_name
start = time.time()
self.song = backend.Song.get_from_string(song_name)
self.lyrics = ""
if self.info:
backend.load_info(self, self.song)
lyrics_metadata = backend.get_lyrics(song=self.song, sync=self.sync)
else:
lyrics_metadata = backend.next_lyrics(song=self.song, sync=self.sync)
self.changed = False
self.lyrics = lyrics_metadata.lyrics
self.timed = lyrics_metadata.timed
if not lyrics_metadata.url:
header = song_name
else:
style = self.label_song_name.styleSheet()
if style == "":
color = "color: black"
else:
color = style
header = '''<style type="text/css">a {text-decoration: none; %s}</style><a href="%s">%s</a>''' \
% (color, lyrics_metadata.url, song_name)
lyrics_clean = lyrics_metadata.lyrics
if lyrics_metadata.timed:
self.sync_adjustment_slider.setVisible(self.sync)
lrc = pylrc.parse(lyrics_metadata.lyrics)
if lrc.album:
self.song.album = lrc.album
lyrics_clean = '\n'.join(e.text for e in lrc)
comm.signal.emit(header,
self.add_service_name_to_lyrics(lyrics_clean, lyrics_metadata.service_name))
count = 0
line_changed = True
while self.sync and not self.changed:
time_title_start = time.time()
window_title = backend.get_window_title(self.get_current_streaming_service())
time_title_end = time.time()
if window_title in self.get_current_streaming_service().get_not_playing_windows_title():
time.sleep(0.2)
start += 0.2 + time_title_end - time_title_start
elif song_name != window_title or not count + 1 < len(lrc):
self.sync_adjustment_slider.setValue(0)
break
else:
if lrc[count + 1].time - self.sync_adjustment_slider.value() <= time.time() - start:
count += 1
line_changed = True
if line_changed:
lrc[count - 1].text = HTML_TAGS.sub("", lrc[count - 1].text)
lrc[count].text = """<b style="font-size: %spt">%s</b>""" % \
(self.font_size_box.value() * 1.25, lrc[count].text)
if count - 2 > 0:
lrc[count - 3].text = HTML_TAGS.sub("", lrc[count - 3].text)
lrc[count - 2].text = "<a name=\"#scrollHere\">%s</a>" % lrc[count - 2].text
bold_lyrics = '<style type="text/css">p {font-size: %spt}</style><p>%s</p>' % \
(
self.font_size_box.value(),
'<br>'.join(e.text for e in lrc)
)
comm.signal.emit(
header,
self.add_service_name_to_lyrics(bold_lyrics, lyrics_metadata.service_name)
)
line_changed = False
time.sleep(0.5)
else:
time.sleep(0.2)
else:
self.sync_adjustment_slider.setVisible(False)
comm.signal.emit(
header,
self.add_service_name_to_lyrics(lyrics_clean, lyrics_metadata.service_name))
time.sleep(1)
def start_thread(self):
lyrics_thread = threading.Thread(target=self.display_lyrics, args=(self.comm,))
lyrics_thread.daemon = True
lyrics_thread.start()
def refresh_lyrics(self, song_name, lyrics):
_translate = QtCore.QCoreApplication.translate
if backend.get_window_title(self.get_current_streaming_service()):
self.label_song_name.setText(_translate("Form", song_name))
self.set_lyrics_with_alignment(_translate("Form", lyrics))
self.text_browser.scrollToAnchor("#scrollHere")
self.refresh_info()
def refresh_info(self):
self.info_table.clearContents()
if not self.song:
return
self.info_table.setRowCount(8)
index = 0
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Title"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.name))
index += 1
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Artist"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.artist))
index += 1
if self.song.album != "UNKNOWN":
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Album"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.album))
index += 1
if self.song.genre != "UNKNOWN":
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Genre"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.genre))
index += 1
if self.song.year != -1:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Year"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(str(self.song.year)))
index += 1
if self.song.cycles_per_minute != -1:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Cycles Per Minute"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(str(self.song.cycles_per_minute)))
index += 1
if self.song.beats_per_minute != -1:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Beats Per Minute"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(str(self.song.beats_per_minute)))
index += 1
if self.song.dances:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Dances"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem("\n".join(self.song.dances)))
self.info_table.resizeRowsToContents()
self.info_table.resizeColumnsToContents()
def get_chords(self):
_translate = QtCore.QCoreApplication.translate
if self.song:
backend.load_chords(self.song)
else:
self.text_browser.append(_translate("Form", "I'm sorry, Dave. I'm afraid I can't do that."))
def change_lyrics(self):
_translate = QtCore.QCoreApplication.translate
if self.song:
self.changed = True
else:
self.text_browser.append(_translate("Form", "I'm sorry, Dave. I'm afraid I can't do that."))
def save_lyrics(self):
if not self.song or not self.lyrics:
return
if not os.path.exists(LYRICS_DIR):
os.makedirs(LYRICS_DIR)
artist = pathvalidate.sanitize_filename(self.song.artist)
name = pathvalidate.sanitize_filename(self.song.name)
new_lyrics_file = None
for lyrics_file in os.listdir(LYRICS_DIR):
lyrics_file = os.path.join(LYRICS_DIR, lyrics_file)
if os.path.isfile(lyrics_file):
file_parts = os.path.splitext(lyrics_file)
file_extension = file_parts[1].lower()
if file_extension in (".txt", ".lrc"):
file_name = file_parts[0].lower()
if name.lower() in file_name and artist.lower() in file_name:
save_dialog = QMessageBox()
save_dialog.setWindowIcon(FORM.windowIcon())
save_dialog.setIcon(QMessageBox.Information)
save_dialog.setText("You got already saved lyrics for the song %s by %s!" %
(self.song.name, self.song.artist))
save_dialog.setInformativeText("Do you want overwrite them?")
save_dialog.setWindowTitle("Lyrics already saved")
save_dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
save_anyway = save_dialog.exec()
if save_anyway == QMessageBox.Yes:
new_lyrics_file = file_name
break
else:
return
if not new_lyrics_file:
new_lyrics_file = os.path.join(LYRICS_DIR, "%s - %s" % (artist, name))
text = self.lyrics
if self.timed:
lyrics_file = new_lyrics_file + ".lrc"
if self.sync_adjustment_slider.value() != 0:
lrc = pylrc.parse(text)
lrc.offset -= self.sync_adjustment_slider.value() * 1000
text = lrc.toLRC()
else:
lyrics_file = new_lyrics_file + ".txt"
with open(lyrics_file, "w", encoding="utf-8") as lyrics_file:
lyrics_file.write(text)
def spotify(self) -> None:
if not self.open_spotify:
return
if not backend.open_spotify(self.get_current_streaming_service()):
save_dialog = QMessageBox()
save_dialog.setWindowIcon(FORM.windowIcon())
save_dialog.setIcon(QMessageBox.Warning)
save_dialog.setText("Couldn't open %s!" % str(self.get_current_streaming_service()))
save_dialog.setStandardButtons(QMessageBox.Ok)
save_dialog.exec()
class FormWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
def closeEvent(self, event):
UI.load_save_settings(save=True)
if UI.minimize_to_tray:
event.ignore()
self.hide()
def icon_activated(self, reason):
if reason == QtWidgets.QSystemTrayIcon.DoubleClick:
self.show()
def moveEvent(self, a0: QtGui.QMoveEvent) -> None:
try:
UI.load_save_settings(save=True)
except:
pass
def resizeEvent(self, a0: QtGui.QResizeEvent) -> None:
try:
UI.load_save_settings(save=True)
except:
pass
if __name__ == "__main__":
sentry_sdk.init("https://71bf000cb7c5448c8c08660b29a12c09@o407859.ingest.sentry.io/5277612",
release="spotifylyrics@" + str(backend.get_version()))
with sentry_sdk.configure_scope() as scope:
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
running = "pyinstaller"
else:
running = "source"
scope.set_tag("running_from", running)
APP = QtWidgets.QApplication(sys.argv)
APP.setStyle("fusion")
FORM = FormWidget()
UI = UiForm()
FORM.show()
sys.exit(APP.exec())
|
rabbitmq.py
|
#! /usr/bin/env python
import time
import datetime
import functools
import logging
import pika
import threading
from contextlib import suppress
from pika.exceptions import StreamLostError, ChannelClosed, AMQPConnectionError, ConnectionWrongStateError
from sqapi.processing.exception import SqapiPluginExecutionError, PluginFailure
log = logging.getLogger(__name__)
class Listener:
def __init__(self, config: dict, process_message):
self.config = config if config else dict()
self.pm_callback = process_message
log.info('Loading RabbitMQ')
self.retry_interval = float(config.get('retry_interval', 3))
self.delay = config.get('process_delay', 0)
self.routing_keys = config.get('routing_keys', [])
routing_key = config.get('routing_key')
if routing_key:
self.routing_keys.append(routing_key)
self.queue_name = config.get('queue_name', 'q_sqapi')
self.exchange_name = config.get('exchange_name', 'x_sqapi')
self.exchange_type = config.get('exchange_type', 'fanout')
self.requeue_failures = config.get('requeue', True)
dlq_config = config.get('dlq', {})
self.dlq_exchange = dlq_config.get('exchange', 'DLQ')
self.dlq_exchange_type = dlq_config.get('exchange_type', 'topic')
self.dlq_routing_key = dlq_config.get('routing_key', 'message.sqapi')
self.host = config.get('host', 'localhost')
self.port = config.get('port', 5672)
self.test_connection()
def test_connection(self):
log.debug('Testing connection to RabbitMQ on {}:{}'.format(self.host, self.port))
while True:
try:
log.debug('Establishing connection through Pika module')
connection = pika.BlockingConnection(pika.ConnectionParameters(self.host, self.port))
if connection.is_open:
log.info('Connection tested: OK')
connection.close()
break
else:
err = 'Could not connect to RabbitMQ'
log.debug(err)
raise ConnectionError('Could not connect to RabbitMQ')
except Exception as e:
log.debug('Connection tested: {}'.format(str(e)))
time.sleep(self.retry_interval)
def start_listener(self):
while True:
try:
listener = self.listen_exchange if self.config.get('exchange_name') else self.listen_queue
listener()
except (StreamLostError, ChannelClosed, AMQPConnectionError) as e:
log.warning('Lost connection to broker: {}'.format(str(e)))
except (InterruptedError, KeyboardInterrupt) as e:
log.error('Interrupted, exiting consumer: {}'.format(str(e)))
break
except SystemExit:
log.error('System is shutting down - exiting RabbitMQ consumer')
break
except Exception as e:
log.error('Something unexpected happened ({}) while listening on broker: {}'.format(type(e), str(e)))
finally:
time.sleep(1)
log.info('Finished consuming from RabbitMQ')
def listen_queue(self):
log.info('Starting Queue listener with routing key: {}'.format(self.routing_key))
connection, channel = self._create_connection()
# Create a queue
res = channel.queue_declare(
queue=self.queue_name,
arguments={
'x-max-priority': 3
})
queue_name = res.method.queue
callback = functools.partial(self.message_receiver, connection=connection)
channel.basic_consume(queue=queue_name, on_message_callback=callback)
log.debug('Starting to consume from queue: {}'.format(queue_name))
channel.start_consuming()
def listen_exchange(self):
log.debug('Starting Exchange listener {} as type {}'.format(self.exchange_name, self.exchange_type))
connection, channel = self._create_connection()
channel.exchange_declare(exchange=self.exchange_name, exchange_type=self.exchange_type, durable=True)
# Create a queue
res = channel.queue_declare(
self.queue_name,
arguments={
'x-max-priority': 3
})
queue_name = res.method.queue
for key in self.routing_keys:
channel.queue_bind(exchange=self.exchange_name, queue=queue_name, routing_key=key)
callback = functools.partial(self.message_receiver, connection=connection)
channel.basic_consume(queue=queue_name, on_message_callback=callback)
log.debug('Starting to consume from exchange: {}'.format(self.exchange_name))
channel.start_consuming()
def _create_connection(self):
connection = pika.BlockingConnection(pika.ConnectionParameters(self.host, self.port))
channel = connection.channel()
return channel
def publish_to_dlq(self, method, properties, body, e: SqapiPluginExecutionError):
try:
connection, channel = self._create_connection()
channel.exchange_declare(exchange=self.dlq_exchange, exchange_type=self.dlq_exchange_type, durable=True)
for error in e.failures:
properties.headers = {
'x-death': {
'x-exception-information': {
'x-exception-timestamp': str(datetime.datetime.utcnow().isoformat()),
'x-exception-reason': str(error.reason),
'x-exception-system': 'Sqapi',
'x-exception-type': str(error.exception_type),
},
'queue': '.'.join([x for x in [self.queue_name, error.plugin] if x]),
'exchange': method.exchange,
'routing-keys': [method.routing_key],
}
}
log.debug(f'Headers: {properties.headers}')
channel.basic_publish(exchange=self.dlq_exchange,
routing_key='.'.join([
self.dlq_routing_key, error.plugin
]),
properties=properties,
body=body)
except Exception as e:
log.error(f'Failed publishing message to DLQ: {str(e)}')
def message_receiver(self, ch, method, properties, body, connection):
log.info('Received message')
log.debug(f'Channel: {ch}, Method: {method}, Properties: {properties}, Message: {body}')
t = threading.Thread(target=self.handle_message, args=(connection, ch, method, properties, body))
t.start()
def handle_message(self, connection, ch, method, properties, body):
cb = None
try:
rk_parts = method.routing_key.split('.')
specific_plugin = rk_parts[2] if len(rk_parts) is 3 else None
self.pm_callback(body, specific_plugin)
cb = functools.partial(self.send_ack, ch=ch, tag=method.delivery_tag)
except SqapiPluginExecutionError as e:
log.warning('Registering {len(e.failures)} errors from plugin execution')
cb = functools.partial(self.send_nack, ch=ch, tag=method.delivery_tag)
self.publish_to_dlq(method, properties, body, e)
except Exception as e:
log.warning(f'Could not process received message: {str(e)}')
cb = functools.partial(self.send_nack, ch=ch, tag=method.delivery_tag, rq=self.requeue_failures)
self.publish_to_dlq(method, properties, body, SqapiPluginExecutionError([PluginFailure('', e)]))
except SystemExit as e:
log.warning('Could not process received message, due to shutdown')
with suppress(ConnectionWrongStateError, StreamLostError):
ch.stop_consuming()
ch.close()
connection.close()
raise e
finally:
if cb:
connection.add_callback_threadsafe(cb)
def send_ack(self, ch, tag):
try:
ch.basic_ack(delivery_tag=tag)
log.info('Message acknowledged sent')
except Exception as e:
err = f'Failed sending ack'
log.warning(err)
raise ConnectionError(err, e)
def send_nack(self, ch, tag, rq=False):
try:
ch.basic_nack(delivery_tag=tag, requeue=rq)
log.info('Message acknowledged sent')
except Exception as e:
err = f'Failed sending nack'
log.warning(err)
raise ConnectionError(err, e)
|
a3c_test.py
|
# -*- coding: utf-8 -*-
import pdb
import tensorflow as tf
import os
import threading
import numpy as np
import h5py
import glob
import random
from networks.qa_planner_network import QAPlannerNetwork
from networks.free_space_network import FreeSpaceNetwork
from networks.end_to_end_baseline_network import EndToEndBaselineNetwork
from reinforcement_learning.a3c_testing_thread import A3CTestingThread
from utils import tf_util
from utils import py_util
import constants
np.set_printoptions(threshold=np.inf)
def main():
if constants.OBJECT_DETECTION:
from darknet_object_detection import detector
detector.setup_detectors(constants.PARALLEL_SIZE)
with tf.device('/gpu:' + str(constants.GPU_ID)):
with tf.variable_scope('global_network'):
if constants.END_TO_END_BASELINE:
global_network = EndToEndBaselineNetwork()
else:
global_network = QAPlannerNetwork(constants.RL_GRU_SIZE, 1, 1)
global_network.create_net()
if constants.USE_NAVIGATION_AGENT:
with tf.variable_scope('nav_global_network') as net_scope:
free_space_network = FreeSpaceNetwork(constants.GRU_SIZE, 1, 1)
free_space_network.create_net()
else:
net_scope = None
# prepare session
sess = tf_util.Session()
if constants.PREDICT_DEPTH:
from depth_estimation_network import depth_estimator
with tf.variable_scope('') as depth_scope:
depth_estimator = depth_estimator.get_depth_estimator(sess)
else:
depth_scope = None
sess.run(tf.global_variables_initializer())
# Initialize pretrained weights after init.
if constants.PREDICT_DEPTH:
depth_estimator.load_weights()
testing_threads = []
for i in range(constants.PARALLEL_SIZE):
testing_thread = A3CTestingThread(i, sess, net_scope, depth_scope)
testing_threads.append(testing_thread)
tf_util.restore_from_dir(sess, constants.CHECKPOINT_DIR, True)
if constants.USE_NAVIGATION_AGENT:
print('now trying to restore nav model')
tf_util.restore_from_dir(sess, os.path.join(constants.CHECKPOINT_PREFIX, 'navigation'), True)
sess.graph.finalize()
question_types = constants.USED_QUESTION_TYPES
rows = []
for q_type in question_types:
curr_rows = list(range(len(testing_thread.agent.game_state.test_datasets[q_type])))
rows.extend(list(zip(curr_rows, [q_type] * len(curr_rows))))
#random.seed(999)
if constants.RANDOM_BY_SCENE:
rows = shuffle_by_scene(rows)
else:
random.shuffle(rows)
answers_correct = []
ep_lengths = []
ep_rewards = []
invalid_percents = []
time_lock = threading.Lock()
if not os.path.exists(constants.LOG_FILE):
os.makedirs(constants.LOG_FILE)
out_file = open(constants.LOG_FILE + '/results_' + constants.TEST_SET + '_' + py_util.get_time_str() + '.csv', 'w')
out_file.write(constants.LOG_FILE + '\n')
out_file.write('question_type, answer_correct, answer, gt_answer, episode_length, invalid_action_percent, scene number, seed, required_interaction, union, inter, max, early_stop\n')
#out_file.write('question_type, answer_correct, answer, gt_answer, episode_length, invalid_action_percent, scene number, seed, required_interaction\n')
def test_function(thread_ind):
testing_thread = testing_threads[thread_ind]
sess.run(testing_thread.sync)
#from game_state import QuestionGameState
#if testing_thread.agent.game_state is None:
#testing_thread.agent.game_state = QuestionGameState(sess=sess)
while len(rows) > 0:
time_lock.acquire()
if len(rows) == 0:
break
row = rows.pop()
time_lock.release()
answer_correct, answer, gt_answer, ep_length, ep_reward, invalid_percent, scene_num, seed, required_interaction, union, inter, maxc, early_stop = testing_thread.process(row)
#answer_correct, answer, gt_answer, ep_length, ep_reward, invalid_percent, scene_num, seed, required_interaction, early_stop = testing_thread.process(row)
question_type = row[1] + 1
time_lock.acquire()
output_str = ('%d, %d, %d, %d, %d, %f, %d, %d, %d, %d, %d, %d, %d\n' % (question_type, answer_correct, answer, gt_answer, ep_length, invalid_percent, scene_num, seed, required_interaction, union, inter, maxc, early_stop))
#output_str = ('%d, %d, %d, %d, %d, %f, %d, %d, %d, %d\n' % (question_type, answer_correct, answer, gt_answer, ep_length, invalid_percent, scene_num, seed, required_interaction, early_stop))
out_file.write(output_str)
out_file.flush()
answers_correct.append(int(answer_correct))
ep_lengths.append(ep_length)
ep_rewards.append(ep_reward)
invalid_percents.append(invalid_percent)
print('###############################')
print('ep ', row)
print('num episodes', len(answers_correct))
print('average correct', np.mean(answers_correct))
print('invalid percents', np.mean(invalid_percents), np.median(invalid_percents))
print('###############################')
time_lock.release()
test_threads = []
for i in range(constants.PARALLEL_SIZE):
test_threads.append(threading.Thread(target=test_function, args=(i,)))
for t in test_threads:
t.start()
for t in test_threads:
t.join()
out_file.close()
def shuffle_by_scene(rows):
#test_data
question_types = ['existence', 'counting', 'contains']
test_datasets = []
for qq,question_type in enumerate(question_types):
prefix = 'questions/'
path = prefix + 'val/' + constants.TEST_SET + '/data' + '_' + question_type
#print('path', path)
data_file = sorted(glob.glob(path + '/*.h5'), key=os.path.getmtime)
if len(data_file) > 0 and qq in constants.USED_QUESTION_TYPES:
dataset = h5py.File(data_file[-1])
dataset_np = dataset['questions/question'][...]
dataset.close()
test_dataset = dataset_np
sums = np.sum(np.abs(test_dataset), axis=1)
test_datasets.append(test_dataset[sums > 0])
print('Type', question_type, 'test num_questions', test_datasets[-1].shape)
else:
test_datasets.append([])
rows_np = np.empty((0, 3), int)
for question_row, question_type_ind in rows:
scene_num = test_datasets[question_type_ind][question_row, :][0]
#print ("data: ",question_row, question_type_ind,scene_num)
if scene_num in constants.USED_SCENE:
rows_np = np.concatenate((rows_np, [[question_row, question_type_ind, scene_num]]))
rows_np = rows_np[rows_np[:,2].argsort()]
#print (rows_np)
rows_np_slim = np.array([], int).reshape(0,3)
for i in np.unique(rows_np[:,2]):
mask = np.where(rows_np[:,2] == i)
rows_np[mask] = np.random.permutation(rows_np[mask])
#print ("rows_np mask: ",rows_np[mask].shape)
rows_np_slim = np.vstack([rows_np_slim, rows_np[mask][:11, :]])
#print ("rows: ",rows.shape)
#print ("rows_np: ",rows_np.shape)
#print ("rows_np: ",rows_np[:, :2])
#print ("rows_np_slim: ",rows_np_slim.shape)
#print ("rows_np_slim: ",rows_np_slim[:, :2])
#return rows
return list(rows_np[:, :2])
#return list(rows_np_slim[:, :2])
if __name__ == '__main__':
main()
|
obj.py
|
'''
from math import pi
class Circel(object):
def __init__(self,radius):
self.radius = radius
def getRadius(self):
return self.radius
def setRadius(self,value):
if not isinstance(value,(int, float) ): #python3 没有long
raise ValueError('wrong type')
self.radius = float(value)
def getArea(self):
return self.radius ** 2 * pi
R = property(getRadius,setRadius)
c = Circel(2)
print(c.getArea())
#c.radius = 'asf'
#c.setRadius('asf')
c.R = 2 #可以达到setter 效果
print(c.getArea())
'''
'''
#减少类实例占内存大小
import sys
class stu1(object):
"""docstring for stu1"""
def __init__(self, name):
super(stu1, self).__init__()
self.name = name
class stu2(object):
#用__slots__ 定义了类的属性后不能再动态修改类属性
__slots__ = ['name']
"""docstring for stu2"""
def __init__(self, name):
super(stu2, self).__init__()
self.name = name
s1 = stu1('allen')
print(sys.getsizeof(s1))
s1.age = 18
print(s1.__dict__)
s2 = stu2('allen')
print(sys.getsizeof(s2))
#s2.age = 18 # 这是一个错误行为
'''
#类里比较大小
'''
class Rectangle(object):
def __init__(self,w,h):
self.w = w
self.h = h
def area(self):
return self.w * self.h
def __lt__(self,obj):
return self.area() < obj.area()
def __gt__(self,obj):
return self.area() > obj.area()
r1 = Rectangle(2,3)
r2 = Rectangle(4,5)
print(r1 < r2)
#这里实质是调用了 r1.__lt__(r2)方法,把r2当参数传进去,
#如需比较大,也要实现相应的方法
print(r1 > r2)
'''
#=============
'''
from functools import total_ordering
from math import pi
from abc import ABCMeta,abstractmethod
@total_ordering
class Shape(object,metaclass = ABCMeta):
@abstractmethod
def area(self,obj):
pass
def __lt__(self,obj):
return self.area() < obj.area()
def __eq__(self,obj):
return self.area() == obj.area()
def __gt__(self,obj):
return self.area() > obj.area()
class Rectangle(Shape):
def __init__(self,w,h):
self.w = w
self.h = h
def area(self):
#print('Rectangle area count')
return self.w * self.h
class Circle(Shape):
def __init__(self,r):
self.r = r
def area(self):
#print('Circle area count')
return self.r ** 2 * pi
r1 = Rectangle(2,3)
r2 = Rectangle(4,5)
#print(r1 < r2)
#print(r1 == r2)
c1 = Circle(3)
print(r1 < c1 )
print(c1 >= r2 )
'''
#metaclass test
#function
'''
def upper_attr(future_class_name,future_class_parents,future_class_attr):
uppercase_attr = {}
for name,val in future_class_attr.items():
if not name.startswith('__'):
uppercase_attr[name.upper()] = val
else:
uppercase_attr[name] = val
return type(future_class_name,future_class_parents,uppercase_attr)
class Foo(object,metaclass=upper_attr):
__name__ = 'GG'
bar = 'bip'
print(hasattr(Foo,'bar'))
print(hasattr(Foo,'BAR'))
print(hasattr(Foo,'__name__'))
'''
'''
class UpperAttrMetaclass(type):
def __new__(cls, clsname, bases, dct):
uppercase_attr = {}
for name, val in dct.items():
if not name.startswith('__'):
uppercase_attr[name.upper()] = val
else:
uppercase_attr[name] = val
return super(UpperAttrMetaclass, cls).__new__(cls, clsname, bases, uppercase_attr)
class Foo(object,metaclass=UpperAttrMetaclass):
__name__ = 'GG'
bar = 'bip'
print(hasattr(Foo,'bar'))
print(hasattr(Foo,'BAR'))
print(hasattr(Foo,'__name__'))
print(Foo.__class__)
print(Foo.__class__.__class__)
'''
#多线程
#python 多线程有一个GIL(全局解析器锁),他不适合用做cpu密集型多线程,可以做IO密集型多线程
#实现两种方法
#1.直接使用Thread(target = handle,args=(x,))
#2.继承Thread类,实现run方法,把自己的方法放到run里实现
from threading import Thread
from time import sleep
#这是cpu密集型操作,所以效果有点差
def handle(name):
sleep(1)
print('hello ',name,"\n")
'''
threads = []
for x in range(1,10):
t = Thread(target = handle,args=(x,))
threads.append(t)
t.start()
for t in threads:
t.join()
'''
class MyThread(Thread):
def __init__(self, arg):
super(MyThread,self).__init__()
#Thread.__init__(self)
self.arg = arg
def run(self):
handle(self.arg)
threads = []
for x in range(1,10):
t = MyThread(x)
threads.append(t)
t.start()
for t in threads:
t.join()
|
test_debug.py
|
import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from unittest import mock
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.http import Http404
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.urls.converters import IntConverter
from django.utils.functional import SimpleLazyObject
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_safe
from django.views.debug import (
CallableSettingWrapper, ExceptionCycleWarning, ExceptionReporter,
Path as DebugPath, SafeExceptionReporterFilter, default_urlconf,
get_default_exception_reporter_filter, technical_404_response,
technical_500_response,
)
from django.views.decorators.debug import (
sensitive_post_parameters, sensitive_variables,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.request', 'WARNING') as cm:
response = self.client.get('/raises400_bad_request/')
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
'Malformed request syntax: /raises400_bad_request/',
)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(
response,
'<p>The current path, <code>not-in-urls</code>, didn’t match any '
'of these.</p>',
status_code=404,
html=True,
)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(
response,
'<p>The current path, <code>not-in-urls</code>, didn’t match any '
'of these.</p>',
status_code=404,
html=True,
)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(
response,
'<p>The empty path didn’t match any of these.</p>',
status_code=404,
html=True,
)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
self.assertContains(
response,
'<p>The current path, <code>technical404/</code>, matched the '
'last one.</p>',
status_code=404,
html=True,
)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match['id']
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown instead of the
technical 404 page, if the user has not altered their URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, 'open') as m:
default_urlconf(None)
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding='utf-8')
def test_technical_404_converter_raise_404(self):
with mock.patch.object(IntConverter, 'to_python', side_effect=Http404):
response = self.client.get('/path-post/1/')
self.assertContains(response, 'Page not found', status_code=404)
def test_exception_reporter_from_request(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/custom_reporter_class_view/')
self.assertContains(response, 'custom traceback text', status_code=500)
@override_settings(DEFAULT_EXCEPTION_REPORTER='view_tests.views.CustomExceptionReporter')
def test_exception_reporter_from_settings(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
self.assertContains(response, 'custom traceback text', status_code=500)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.request', 'WARNING') as cm:
response = self.client.get('/raises400_bad_request/')
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
'Malformed request syntax: /raises400_bad_request/',
)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_sharing_traceback(self):
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn(
'<form action="https://dpaste.com/" name="pasteform" '
'id="pasteform" method="post">',
html,
)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_suppressed_context(self):
try:
try:
raise RuntimeError("Can't find my keys")
except RuntimeError:
raise ValueError("Can't find my keys") from None
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
self.assertNotIn('During handling of the above exception', html)
def test_innermost_exception_without_traceback(self):
try:
try:
raise RuntimeError('Oops')
except Exception as exc:
new_exc = RuntimeError('My context')
exc.__context__ = new_exc
raise
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
self.assertEqual(len(frames), 1)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>RuntimeError</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
self.assertIn(
'During handling of the above exception (My context), another '
'exception occurred',
html,
)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_source_not_match(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
with mock.patch(
'django.views.debug.ExceptionReporter._get_source',
return_value=['wrong source'],
):
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
msg = (
"Cycle in the exception chain detected: exception 'inner' "
"encountered again."
)
with self.assertWarnsMessage(ExceptionCycleWarning, msg):
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, 'open') as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding='utf-8')
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parents[1], 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_cleanse_setting_basic(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(reporter_filter.cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(
reporter_filter.cleanse_setting('PASSWORD', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_ignore_case(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.cleanse_setting('password', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_recurses_in_dictionary(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {'login': 'cooper', 'password': 'secret'}
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
{'login': 'cooper', 'password': reporter_filter.cleansed_substitute},
)
def test_cleanse_setting_recurses_in_dictionary_with_non_string_key(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {('localhost', 8000): {'login': 'cooper', 'password': 'secret'}}
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
{
('localhost', 8000): {
'login': 'cooper',
'password': reporter_filter.cleansed_substitute,
},
},
)
def test_cleanse_setting_recurses_in_list_tuples(self):
reporter_filter = SafeExceptionReporterFilter()
initial = [
{
'login': 'cooper',
'password': 'secret',
'apps': (
{'name': 'app1', 'api_key': 'a06b-c462cffae87a'},
{'name': 'app2', 'api_key': 'a9f4-f152e97ad808'},
),
'tokens': ['98b37c57-ec62-4e39', '8690ef7d-8004-4916'],
},
{'SECRET_KEY': 'c4d77c62-6196-4f17-a06b-c462cffae87a'},
]
cleansed = [
{
'login': 'cooper',
'password': reporter_filter.cleansed_substitute,
'apps': (
{'name': 'app1', 'api_key': reporter_filter.cleansed_substitute},
{'name': 'app2', 'api_key': reporter_filter.cleansed_substitute},
),
'tokens': reporter_filter.cleansed_substitute,
},
{'SECRET_KEY': reporter_filter.cleansed_substitute},
]
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', initial),
cleansed,
)
self.assertEqual(
reporter_filter.cleanse_setting('SETTING_NAME', tuple(initial)),
tuple(cleansed),
)
def test_request_meta_filtering(self):
request = self.rf.get('/', HTTP_SECRET_HEADER='super_secret')
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.get_safe_request_meta(request)['HTTP_SECRET_HEADER'],
reporter_filter.cleansed_substitute,
)
def test_exception_report_uses_meta_filtering(self):
response = self.client.get('/raises500/', HTTP_SECRET_HEADER='super_secret')
self.assertNotIn(b'super_secret', response.content)
response = self.client.get(
'/raises500/',
HTTP_SECRET_HEADER='super_secret',
HTTP_ACCEPT='application/json',
)
self.assertNotIn(b'super_secret', response.content)
class CustomExceptionReporterFilter(SafeExceptionReporterFilter):
cleansed_substitute = 'XXXXXXXXXXXXXXXXXXXX'
hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL', flags=re.I)
@override_settings(
ROOT_URLCONF='view_tests.urls',
DEFAULT_EXCEPTION_REPORTER_FILTER='%s.CustomExceptionReporterFilter' % __name__,
)
class CustomExceptionReporterFilterTests(SimpleTestCase):
def setUp(self):
get_default_exception_reporter_filter.cache_clear()
def tearDown(self):
get_default_exception_reporter_filter.cache_clear()
def test_setting_allows_custom_subclass(self):
self.assertIsInstance(
get_default_exception_reporter_filter(),
CustomExceptionReporterFilter,
)
def test_cleansed_substitute_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting('password', 'super_secret'),
reporter_filter.cleansed_substitute,
)
def test_hidden_settings_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting('database_url', 'super_secret'),
reporter_filter.cleansed_substitute,
)
class NonHTMLResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
The plain text 500 debug-only error page is served when it has been
detected the request doesn't accept HTML content. Don't check for
(non)existence of frames vars in the traceback information section of the
response content because they're not included in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_ACCEPT='application/json')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_non_html_response_encoding(self):
response = self.client.get('/raises500/', HTTP_ACCEPT='application/json')
self.assertEqual(response.headers['Content-Type'], 'text/plain; charset=utf-8')
class DecoratorsTests(SimpleTestCase):
def test_sensitive_variables_not_called(self):
msg = (
'sensitive_variables() must be called to use it as a decorator, '
'e.g., use @sensitive_variables(), not @sensitive_variables.'
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_variables
def test_func(password):
pass
def test_sensitive_post_parameters_not_called(self):
msg = (
'sensitive_post_parameters() must be called to use it as a '
'decorator, e.g., use @sensitive_post_parameters(), not '
'@sensitive_post_parameters.'
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_post_parameters
def test_func(request):
return index_page(request)
|
apps.py
|
# based on: https://github.com/bokeh/bokeh/blob/0.12.16/examples/howto/server_embed/flask_embed.py
from django.apps import AppConfig
from bokeh.server.server import Server
from tornado.ioloop import IOLoop
from . import bk_sliders
from . import bk_config
def bk_worker():
# Note: num_procs must be 1; see e.g. flask_gunicorn_embed.py for num_procs>1
server = Server({'/bk_sliders_app': bk_sliders.app},
io_loop=IOLoop(),
address=bk_config.server['address'],
port=bk_config.server['port'],
allow_websocket_origin=["localhost:8000", "localhost:5006", "127.0.0.1:8000", "127.0.0.1:5006"])
server.start()
server.io_loop.start()
class Sliders(AppConfig):
name = 'sliders'
def ready(self):
# For development, django provides autoreload, which results
# in ready() being called twice on startup. We only want one
# bokeh server, though. Trying to start a second bokeh server
# just produces an error that's skipped over (port already in
# use). Alternatively, using "python manage.py runserver
# --noreload" avoids the problem. Otherwise, could add some
# kind of lock...
from threading import Thread
Thread(target=bk_worker).start()
|
parallel_render.py
|
"""
Small addon for blender to help with rendering in VSE.
It automates rendering with multiple instances of blender.
It should come up as "Parallel Render" in addons list.
Copyright (c) 2017 Krzysztof Trzcinski
"""
from bpy import props
from bpy import types
from collections import namedtuple
from enum import Enum
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool
from queue import Queue
from threading import Lock
from threading import Thread
import bpy
import errno
import itertools
import json
import logging
import os
import shutil
import socket
import struct
import subprocess
import sys
import tempfile
import time
LOGGER = logging.getLogger(__name__)
bl_info = {
"name": "Parallel Render",
"author": "Krzysztof Trzciński",
"version": (1, 0),
"blender": (2, 91, 0),
"location": "Properties > Parallel Render Panel or Render menu",
"description": "Render the output from the Sequencer multithreaded",
"warning": "",
"wiki_url": "https://github.com/elmopl/ktba/wiki/Addons#parallel-render",
"tracker_url": "",
"category": "Sequencer",
}
def _can_concatenate(scene):
return scene.render.is_movie_format
class ParallelRenderPanel(bpy.types.Panel):
"""Render the Output from the Sequencer Multithreaded"""
bl_label = "Parallel Render"
bl_idname = "OBJECT_PT_parallel_render"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
bl_parent_id = "RENDER_PT_output"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.operator('render.parallel_render', icon='RENDER_ANIMATION')
addon_props = context.preferences.addons[__name__].preferences
props = context.scene.parallel_render_panel
layout.prop(props, "max_parallel")
layout.prop(props, "batch_type", expand=True)
sub_prop = str(props.batch_type)
if hasattr(props, sub_prop):
layout.prop(props, sub_prop)
layout.prop(props, "overwrite")
layout.prop(props, "mixdown")
col = layout.column()
col.prop(props, "concatenate")
if not _can_concatenate(context.scene):
col.enabled = False
col.label(text='Concatenation only available for video file format', icon='ERROR')
elif addon_props.ffmpeg_valid:
col = layout.column()
col.prop(props, "clean_up_parts")
col.enabled = props.concatenate
else:
col.enabled = False
col.use_property_split = False
col.label(text='Check add-on preferences', icon='ERROR')
class MessageChannel(object):
MSG_SIZE_FMT = '!i'
MSG_SIZE_SIZE = struct.calcsize(MSG_SIZE_FMT)
def __init__(self, conn):
self._conn = conn
def __enter__(self):
return self
def __exit__(self, exc_t, exc_v, tb):
self._conn.close()
def send(self, msg):
msg = json.dumps(msg).encode('utf8')
msg_size = len(msg)
packed_size = struct.pack(self.MSG_SIZE_FMT, msg_size)
self._conn.sendall(packed_size)
self._conn.sendall(msg)
def _recv(self, size):
buf = b''
while len(buf) < size:
read = self._conn.recv(size - len(buf))
if len(read) == 0:
raise Exception('Unexpected end of connection')
buf += read
return buf
def recv(self):
msg_size_packed = self._recv(self.MSG_SIZE_SIZE)
msg_size = struct.unpack(self.MSG_SIZE_FMT, msg_size_packed)[0]
if msg_size == 0:
return None
return json.loads(self._recv(msg_size).decode('utf8'))
class CurrentProjectFile(object):
def __init__(self):
self.path = None
def __enter__(self):
self.path = bpy.data.filepath
return self
def __exit__(self, exc_type, exc_value, tb):
self.path = None
class TemporaryProjectCopy(object):
def __init__(self):
self.path = None
def __enter__(self):
project_file = tempfile.NamedTemporaryFile(
delete=False,
# Temporary project files has to be in the
# same directory to ensure relative paths work.
dir=bpy.path.abspath("//"),
prefix='parallel_render_copy_{}_'.format(os.path.splitext(os.path.basename(bpy.data.filepath))[0]),
suffix='.blend',
)
project_file.close()
try:
self.path = project_file.name
bpy.ops.wm.save_as_mainfile(
filepath=self.path,
copy=True,
check_existing=False,
relative_remap=True,
)
assert os.path.exists(self.path)
return self
except:
self._cleanup()
raise
def __exit__(self, exc_type, exc_value, tb):
self._cleanup()
def _cleanup(self):
os.unlink(self.path)
self._cleanup_autosave_files()
def _cleanup_autosave_files(self):
# TODO: Work out proper way to clean up .blend{n} files
try:
n = 1
while True:
os.unlink(self.path + str(n))
n += 1
except OSError:
pass
class WorkerProcess(object):
CONNECT_TIMEOUT = 30
@staticmethod
def read_config():
config = json.load(sys.stdin)
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.connect(tuple(config['controller']))
return MessageChannel(sck), config['args']
def __init__(
self,
worker_id,
args,
project_file,
subprocess_stdout,
subprocess_stderr
):
self._args = args
self._p = None
self._incoming = None
self._sck = None
self._project_file = project_file
self._logger = LOGGER.getChild('worker[{}]'.format(worker_id))
self._connection = None
self.return_code = None
self.subprocess_stdout = subprocess_stdout
self.subprocess_stderr = subprocess_stderr
def _create_socket(self):
self._sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sck.bind(('localhost', 0))
self._sck.listen(1)
def _detroy_socket(self):
self._sck.close()
def __enter__(self):
cmd = (
bpy.app.binary_path,
self._project_file,
'--background',
'--python',
__file__,
'--',
'render'
)
self._create_socket()
self._logger.info("Starting worker process: %s", cmd)
self._p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=self.subprocess_stdout,
stderr=self.subprocess_stderr,
)
config = {
'controller': self._sck.getsockname(),
'args': self._args
}
self._p.stdin.write(json.dumps(config).encode('utf8'))
self._p.stdin.close()
# This is rather arbitrary.
# It is meant to protect accept() from hanging in case
# something very wrong happens to launched process.
self._sck.settimeout(self.CONNECT_TIMEOUT)
self._connection, _addr = self._sck.accept()
self._logger.info("Started worker process")
return MessageChannel(self._connection)
def __exit__(self, exc_t, exc_v, tb):
self._logger.info('waiting')
self.return_code = self._p.wait()
self._logger.info('finished with rc: %s', self.return_code)
self._p = None
self._connection.close()
self._connection = None
self._detroy_socket()
def _add_multiline_label(layout, lines, icon='NONE'):
for line in lines:
row = layout.row()
row.alignment = 'CENTER'
row.label(text=line, icon=icon)
icon = 'NONE'
def _is_valid_ffmpeg_executable(path):
res = None
try:
subprocess.check_output((path, '-version'))
except (OSError, subprocess.CalledProcessError):
res = "Path `{}` cannot be executed".format(path)
LOGGER.info("_is_valid_ffmpeg_executable(%s): %s", path, res)
return res
class ParallelRenderPreferences(types.AddonPreferences):
bl_idname = __name__
ffmpeg_executable: props.StringProperty(
name="Path to ffmpeg executable",
default="",
update=lambda self, context: self.update(context),
subtype='FILE_PATH',
)
ffmpeg_status: props.StringProperty(default="")
ffmpeg_valid: props.BoolProperty(default=False)
def update(self, context):
error = _is_valid_ffmpeg_executable(self.ffmpeg_executable)
if error is None:
self.ffmpeg_valid = True
info = subprocess.check_output((self.ffmpeg_executable, '-version')).decode('utf-8')
info = info.split('\r', 1)[0]
self.ffmpeg_status = 'Version: {}'.format(info)
else:
self.ffmpeg_valid = False
self.ffmpeg_status = error
context.scene.parallel_render_panel.update(context)
def draw(self, context):
layout = self.layout
layout.prop(self, "ffmpeg_executable")
icon = 'INFO' if self.ffmpeg_valid else 'ERROR'
if icon == 'ERROR':
layout.label(text="The path to FFmpeg executable is invalid.", icon=icon)
else:
layout.label(text=self.ffmpeg_status, icon=icon)
def _need_temporary_file(data):
return data.is_dirty
def parallel_render_menu_draw(self, context):
layout = self.layout
layout.operator('render.parallel_render', icon='RENDER_ANIMATION')
layout.separator()
class ParallelRenderPropertyGroup(types.PropertyGroup):
def update(self, context):
addon_props = context.preferences.addons[__name__].preferences
if not addon_props.ffmpeg_valid and self.concatenate:
LOGGER.info("ParallelRenderPropertyGroup forcing concatenate to false")
self.concatenate = False
self.clean_up_parts = False
if not self.concatenate:
self.clean_up_parts = False
last_run_result: props.EnumProperty(
items=[
# (identifier, name, description, icon, number)
('done', '', ''),
('pending', '', ''),
('failed', '', ''),
],
name="Render Batch Size"
)
batch_type: props.EnumProperty(
items=[
# (identifier, name, description, icon, number)
('parts', 'Parts', 'Render in given number of batches (automatically splits it)'),
('fixed', 'Fixed', 'Render in fixed size batches'),
],
name="Render Batch Size"
)
max_parallel: props.IntProperty(
name="Blender Instances",
min=1,
default=cpu_count() - 1,
max=10000
)
overwrite: props.BoolProperty(
name="Overwrite Existing Files",
default=True,
)
mixdown: props.BoolProperty(
name="Mixdown Sound",
default=True,
)
concatenate: props.BoolProperty(
name="Concatenate Output Files",
update=lambda self, context: self.update(context),
)
clean_up_parts: props.BoolProperty(
name="Clean Up Partial Files",
)
fixed: props.IntProperty(
name="Number of Frames per Batch",
min=1,
default=300,
max=10000
)
parts: props.IntProperty(
name="Number of Parts",
min=1,
default=(cpu_count()-1) * 2,
max=10000
)
class ParallelRenderState(Enum):
CLEANING = 1
RUNNING = 2
MIXDOWN = 3
CONCATENATE = 4
FAILED = 5
CANCELLING = 6
def describe(self):
return {
self.CLEANING: ('INFO', 'Cleaning Up'),
self.RUNNING: ('INFO', 'Rendering'),
self.MIXDOWN: ('INFO', 'Mixing Sound'),
self.CONCATENATE: ('INFO', 'Concatenating'),
self.FAILED: ('ERROR', 'Failed'),
self.CANCELLING: ('WARNING', 'Cancelling'),
}[self]
def get_ranges_parts(scn):
offset = scn.frame_start
current = 0
end = scn.frame_end - offset
length = end + 1
parts = int(scn.parallel_render_panel.parts)
if length <= parts:
yield (scn.frame_start, scn.frame_end)
return
for i in range(1, parts + 1):
end = i * length // parts
yield (offset + current, offset + end - 1)
current = end
def get_ranges_fixed(scn):
start = scn.frame_start
end = scn.frame_end
increment = int(scn.parallel_render_panel.fixed)
while start <= end:
yield (start, min(start + increment, end))
start += increment + 1
RANGE_CALCULATORS = {
'parts': get_ranges_parts,
'fixed': get_ranges_fixed,
}
class ParallelRender(types.Operator):
"""Render the Output from the Sequencer Multithreaded"""
bl_idname = "render.parallel_render"
bl_label = "Parallel Render"
bl_options = {'REGISTER'}
still_running = False
thread = None
state = None
subprocess_stdout = sys.stdout
subprocess_stderr = sys.stderr
def draw(self, context):
layout = self.layout
if _need_temporary_file(bpy.data):
_add_multiline_label(
layout,
[
'Unsaved changes to project.',
'Will attempt to create temporary file.',
],
icon='ERROR',
)
layout.row().label(text='Will render frames from {} to {}'.format(context.scene.frame_start, context.scene.frame_end))
def __init__(self):
super(ParallelRender, self).__init__()
self.summary_mutex = None
def check(self, context):
return True
def _render_project_file(self, scn, project_file):
LOGGER.info("Going to render file %s", project_file)
self.summary_mutex = Lock()
props = scn.parallel_render_panel
range_type = str(props.batch_type)
ranges = tuple(RANGE_CALCULATORS[range_type](scn))
cmds = tuple(
(
(start, end),
{
'--scene': str(scn.name),
'--start-frame': start,
'--end-frame': end,
'--overwrite': bool(props.overwrite),
}
)
for start, end in ranges
)
self.summary = {
'batches': len(cmds),
'batches_done': 0,
'frames': max(s[1] for s in ranges) - min(s[0] for s in ranges) + 1,
'frames_done': 0,
}
RunResult = namedtuple('RunResult', ('range', 'command', 'rc', 'output_file'))
self.report({'INFO'}, 'Working on file {0}'.format(project_file))
def run(args):
rng, cmd = args
res = None
output_file = None
if self.state == ParallelRenderState.RUNNING:
try:
worker_id = '{}-{}'.format(rng[0], rng[1])
worker = WorkerProcess(
worker_id,
cmd,
project_file=project_file,
subprocess_stdout=self.subprocess_stdout,
subprocess_stderr=self.subprocess_stderr,
)
msg = None
with worker as channel:
msgs = iter(channel.recv, None)
last_done = rng[0]
for msg in msgs:
frame_done = msg['current_frame']
with self.summary_mutex:
self.summary['frames_done'] += (frame_done - last_done)
last_done = frame_done
with self.summary_mutex:
self.summary['frames_done'] += 1
res = worker.return_code
if msg is not None:
output_file = msg['output_file']
status_msg = 'Worker finished writing {}'.format(output_file)
LOGGER.info(status_msg)
except Exception as exc:
LOGGER.exception(exc)
res = -1
return RunResult(rng, cmd, res, output_file)
self.state = ParallelRenderState.RUNNING
self.report({'INFO'}, 'Starting 0/{0} [0.0%]'.format(
len(cmds)
))
with Pool(props.max_parallel) as pool:
pending = pool.imap_unordered(run, cmds)
results = {}
for num, res in enumerate(pending, 1):
with self.summary_mutex:
self.summary['batches_done'] = num
results[res.range] = res
self._report_progress()
for result in sorted(results.values(), key=lambda r: r.range[0]):
LOGGER.info('Result: %s', result)
if result.rc != 0:
self.state = ParallelRenderState.FAILED
if result.output_file is not None:
LOGGER.error('Cleaning up failed %s', result.output_file)
try:
os.unlink(result.output_file)
except OSError as exc:
assert exc.errno == errno.ENOENT
self._report_progress()
sound_path = os.path.abspath(os.path.splitext(scn.render.frame_path())[0] + '.mp3')
if self.state == self.state.RUNNING and props.mixdown:
self.state = ParallelRenderState.MIXDOWN
with self.summary_mutex:
self.report({'INFO'}, 'Mixing down sound')
logging.info('Going to mixdown to %s')
bpy.ops.sound.mixdown(filepath=sound_path)
self._report_progress()
self.state = ParallelRenderState.RUNNING
LOGGER.debug('Checkpoint %s %s %s', self.state, props.concatenate, _can_concatenate(scn))
if self.state == ParallelRenderState.RUNNING and props.concatenate and _can_concatenate(scn):
fd, concatenate_files_name = tempfile.mkstemp(
dir=bpy.path.abspath("//"),
)
os.close(fd)
LOGGER.info('Going to concatenate concatenate (list file: %s)', concatenate_files_name)
self.state = ParallelRenderState.CONCATENATE
self.report({'INFO'}, 'Concatenating')
with open(concatenate_files_name, 'w') as data:
for range, res in sorted(results.items()):
data.write("file '{}'\n".format(res.output_file))
outfile = bpy.context.scene.render.frame_path()
LOGGER.info('Final render name: %s', outfile)
sound = ()
if props.mixdown:
sound = ('-i', sound_path, '-codec:a', 'copy', '-q:a', '0')
overwrite = ('-y' if bool(props.overwrite) else '-n',)
base_cmd = (
self.ffmpeg_executable,
'-nostdin',
'-f', 'concat',
'-safe', '0',
'-i', concatenate_files_name,
'-codec:v', 'copy',
outfile,
)
cmd = base_cmd + sound + overwrite
LOGGER.info('Running: %s', cmd)
res = subprocess.call(
cmd,
stdout=self.subprocess_stdout,
stderr=self.subprocess_stderr,
)
LOGGER.info('Finished running [rc: %s]: %s', res, cmd)
if res == 0:
self.state = self.state.RUNNING
else:
self.state = self.state.FAILED
os.unlink(concatenate_files_name)
assert os.path.exists(outfile)
if self.state == ParallelRenderState.RUNNING and props.clean_up_parts:
to_clean = [res.output_file for res in results.values()]
LOGGER.info('Going to clean up parts (%s)', to_clean)
self.state = ParallelRenderState.CLEANING
os.unlink(sound_path)
for filename in to_clean:
os.unlink(filename)
self.state = ParallelRenderState.RUNNING
def _run(self, scn):
props = scn.parallel_render_panel
props.last_run_result = 'pending'
if _need_temporary_file(bpy.data):
work_project_file = TemporaryProjectCopy()
else:
work_project_file = CurrentProjectFile()
try:
with work_project_file:
self._render_project_file(scn, work_project_file.path)
props.last_run_result = 'done' if self.state == ParallelRenderState.RUNNING else 'failed'
except Exception as exc:
LOGGER.exception(exc)
props.last_run_result = 'failed'
def _report_progress(self):
rep_type, action = self.state.describe()
with self.summary_mutex:
self.report({rep_type}, '{0} Batches: {1}/{2} Frames: {3}/{4} [{5:.1f}%]'.format(
action.replace('ing', 'ed'),
self.summary['batches_done'],
self.summary['batches'],
self.summary['frames_done'],
self.summary['frames'],
100.0 * self.summary['frames_done'] / self.summary['frames']
))
def execute(self, context):
scn = context.scene
wm = context.window_manager
self.timer = wm.event_timer_add(0.5, window=context.window)
wm.modal_handler_add(self)
wm.progress_begin(0., 100.)
addon_props = context.preferences.addons[__name__].preferences
self.max_parallel = scn.parallel_render_panel.max_parallel
self.ffmpeg_executable = addon_props.ffmpeg_executable
self.thread = Thread(target=self._run, args=(scn,))
self.thread.start()
return {'RUNNING_MODAL'}
def modal(self, context, event):
if self.summary_mutex is None:
return {'PASS_THROUGH'}
wm = context.window_manager
# Stop the thread when ESCAPE is pressed.
if event.type == 'ESC':
self.state = ParallelRenderState.CANCELLING
self._report_progress()
if event.type == 'TIMER':
still_running = self.thread.is_alive()
with self.summary_mutex:
percent = 100.0 * self.summary['batches_done'] / self.summary['batches']
if still_running:
wm.progress_update(percent)
self._report_progress()
return {'PASS_THROUGH'}
self.thread.join()
wm.event_timer_remove(self.timer)
wm.progress_end()
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
def render():
channel, args = WorkerProcess.read_config()
with channel:
try:
scn_name = args['--scene']
scn = bpy.data.scenes[scn_name]
scn.frame_start = args['--start-frame']
scn.frame_end = args['--end-frame']
outfile = bpy.context.scene.render.frame_path()
def _update_progress(_ignored):
send_stats(bpy.context.scene.frame_current)
def send_stats(frame):
channel.send({
'output_file': outfile,
'current_frame': frame,
})
LOGGER.info("Writing file {}".format(outfile))
if args['--overwrite'] or not os.path.exists(outfile):
bpy.app.handlers.render_stats.append(_update_progress)
bpy.ops.render.render(animation=True, scene=scn_name)
else:
LOGGER.warning('%s already exists.', outfile)
send_stats(scn.frame_end)
LOGGER.info("Done writing {}".format(outfile))
assert os.path.exists(outfile)
finally:
channel.send(None)
sys.exit(0)
def main():
logging.basicConfig(level=logging.INFO)
covstart = os.environ.get('COVERAGE_PROCESS_START')
if covstart is not None:
sys.path.extend(os.environ['PYTHONPATH'].split(':'))
logging.info('sys.path: %s', sys.path)
import coverage
coverage.process_startup()
# Get everything after '--' as those are arguments
# to our script
args = sys.argv[sys.argv.index('--') + 1:]
action = args[0]
if action == 'render':
render()
CLASSES = (
ParallelRenderPropertyGroup,
ParallelRenderPreferences,
ParallelRender,
ParallelRenderPanel,
)
def register():
for cls in CLASSES:
bpy.utils.register_class(cls)
bpy.types.Scene.parallel_render_panel = bpy.props.PointerProperty(type=ParallelRenderPropertyGroup)
# TODO: I am not quite sure how to put it after actual "Render Animation"
bpy.types.TOPBAR_MT_render.prepend(parallel_render_menu_draw)
def unregister():
bpy.types.TOPBAR_MT_render.remove(parallel_render_menu_draw)
del bpy.types.Scene.parallel_render_panel
for cls in reversed(CLASSES):
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
main()
|
thread_stopper.py
|
import threading
import inspect
import ctypes
import time
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
class Thread(threading.Thread):
def _get_my_tid(self):
"""determines this (self's) thread id"""
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
# do we have it cached?
if hasattr(self, "_thread_id"):
return self._thread_id
# no, look for it in the _active dict
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = tid
return tid
raise AssertionError("could not determine the thread's id")
def raise_exc(self, exctype):
"""raises the given exception type in the context of this thread"""
_async_raise(self._get_my_tid(), exctype)
def terminate(self):
"""raises SystemExit in the context of the given thread, which should
cause the thread to exit silently (unless caught)"""
self.raise_exc(SystemExit)
def f():
try:
while True:
time.sleep(0.1)
finally:
print "outta here"
if __name__ == '__main__':
t = Thread(target = f)
t.start()
print 'is alive', t.isAlive()
time.sleep(1)
print 'terminating'
t.terminate()
print 'is alive', t.isAlive()
print 'join'
t.join()
print 'is alive', t.isAlive()
|
tcp.py
|
# -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import msgpack
import socket
import os
import weakref
import time
import traceback
# Import Salt Libs
import salt.crypt
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
else:
log.debug('Re-using AsyncTCPReqChannel for %s', key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
master_host, master_port = parse.netloc.rsplit(':', 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_ret_port')})
def close(self):
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b'salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get('publish_port', 4505)) != 4505:
self.publish_port = self.opts.get('publish_port')
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds['publish_port']
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.publish_port),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_publish_port')})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in six.text_type(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
def __del__(self):
self.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
ssl_options=self.opts.get('ssl'))
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: %s', exc, exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = tornado.ioloop.IOLoop.current()
self.clients = []
self.message_handler = message_handler
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected %s', address)
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
'''
Shutdown the whole server
'''
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
max_buffer_size=max_buffer_size)
if tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.close()
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None,
source_ip=None, source_port=None):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
elif self.io_loop != tornado.ioloop.IOLoop.current(instance=False):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {'source_ip': self.source_ip,
'source_port': self.source_port}
else:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
**kwargs)
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id %s that we are not tracking', message_id)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s:%s closed, unable to recv', self.host, self.port)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s closed, unable to recv', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response from %s', client.address, exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at %s connected', address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: %s', package)
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target %s not connected', topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at %s has disconnected from publisher', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get('log_queue')
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get('log_queue_level')
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual asynchronous interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
if isinstance(load['tgt'], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
else:
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
gdb.py
|
import gdb
import re
import zmq
import msgpack
import threading
class GdbEvent():
def __init__(self, cmd, callback=None, callback_data=None):
self.cmd = cmd;
self.callback = callback
self.callback_data = callback_data
def __call__(self):
if self.callback != None:
ret = gdb.execute(self.cmd, to_string=True)
# Post the event
if self.callback_data == 'eval_word_callback':
# Filter out dollar sign
a_ret = ret.split('\n')
first_line_filt = re.findall(r'= (.*)', a_ret[0])[0]
out_str = first_line_filt
for i in range(1, len(a_ret)):
if a_ret[i] != '':
out_str += '\n' + a_ret[i]
ret = out_str
cbk_data = {'type': self.callback_data, 'data': str(ret)}
self.callback(cbk_data)
else:
gdb.execute(self.cmd, to_string=True)
class NvGdb(object):
def __init__(self):
self.nvim_socket_connected = False
self.pwd = gdb.execute('pwd', to_string=True)
gdb.execute('set print pretty on', to_string=True)
gdb.execute('set pagination off', to_string=True)
self.pwd = self.pwd.split()[2][:-1]
def get_breakpoints(self):
bps = gdb.breakpoints()
bps_list = []
for b in bps:
if b.enabled:
bps_list.append(b.location)
return bps_list
def toggle_breakpoint(self, currentFile, currentLine):
bps = gdb.breakpoints()
currentFile = currentFile + ':' + str(currentLine)
currentBp = None
for b in bps:
if currentFile == b.location:
currentBp = b
if currentBp != None:
currentBp.enabled = not currentBp.enabled
else:
gdb.execute('b ' + currentLine, to_string=True)
bps = self.get_breakpoints()
return {'breakpoints': bps}
def event_get_breakpoints(self):
bps = self.get_breakpoints()
return {'breakpoints': bps}
def handle_event(self, msg):
if msg['type'] == 'toggle_breakpoint':
return self.toggle_breakpoint(msg['file'], msg['line'])
elif msg['type'] == 'stop':
# FIXME How to implement this?
gdb.post_event(GdbEvent('c'))
return {'status': True}
elif msg['type'] == 'resume':
gdb.post_event(GdbEvent('c'))
return {'status': True}
elif msg['type'] == 'step':
gdb.post_event(GdbEvent('s'))
return {'status': True}
elif msg['type'] == 'over':
gdb.post_event(GdbEvent('n'))
return {'status': True}
elif msg['type'] == 'reset':
gdb.post_event(GdbEvent('r'))
return {'status': True}
elif msg['type'] == 'get_breakpoints':
return self.event_get_breakpoints()
elif msg['type'] == 'get_frames_string':
ret = gdb.execute('bt', to_string=True)
return {'frames_string': ret}
elif msg['type'] == 'select_frame':
gdb.execute('frame ' + str(msg['frame']), to_string=True)
sal = gdb.selected_frame().find_sal()
nvim_data = {}
nvim_data['file'] = sal.symtab.fullname()
nvim_data['line'] = sal.line
return nvim_data
elif msg['type'] == 'eval_word':
gdb.post_event(GdbEvent('p ' + msg['word'], callback=self.nvim_post, callback_data='eval_word_callback'))
return {'status': 'wait_for_callback'}
else:
return {'status': True}
def serve(self):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:8765")
while True:
raw = socket.recv()
msg = msgpack.unpackb(raw, raw=False)
ret = self.handle_event(msg)
socket.send(msgpack.packb(ret, use_bin_type=True))
def start_server(self):
self.t = threading.Thread(target=self.serve, daemon=True)
self.t.start()
def nvim_post(self, msg):
if self.nvim_socket_connected is False:
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
self.socket.connect("tcp://localhost:5678")
self.nvim_socket_connected = True
msg_data = msgpack.packb(msg, use_bin_type=True)
self.socket.send(msg_data)
raw_resp = self.socket.recv()
resp = msgpack.unpackb(raw_resp, raw=False)
return resp
def stop_event (self, event):
sal = gdb.selected_frame().find_sal()
nvim_data = {}
nvim_data['type'] = 'bp_hit'
nvim_data['file'] = sal.symtab.fullname()
nvim_data['line'] = sal.line
self.nvim_post(nvim_data)
# Initialize main class
nvg = NvGdb()
nvg.start_server()
gdb.events.stop.connect(nvg.stop_event)
|
__init__.py
|
#
# pymldb
# Nicolas Kructhen, 2015-05-28
# Mich, 2016-01-26
# Copyright (c) 2013 Datacratic. All rights reserved.
#
from __future__ import absolute_import, division, print_function
from .version import __version__ # noqa
import pandas as pd
import requests
import json
from pymldb.util import add_repr_html_to_response
import threading
from .progress_monitor import ProgressMonitor
def decorate_response(fn):
def inner(*args, **kwargs):
result = add_repr_html_to_response(fn(*args, **kwargs))
if result.status_code < 200 or result.status_code >= 400:
raise ResourceError(result)
return result
return inner
class ResourceError(Exception):
def __init__(self, r):
try:
message = json.dumps(r.json(), indent=2)
except:
message = r.content
super(ResourceError, self).__init__(
"'%d %s' response to '%s %s'\n\n%s" %
(r.status_code, r.reason, r.request.method, r.request.url, message)
)
self.result = r
class Connection(object):
def __init__(self, host="http://localhost", notebook=True):
if not host.startswith("http"):
raise Exception("URIs must start with 'http'")
if host[-1] == '/':
host = host[:-1]
self.uri = host
self.notebook = notebook
@decorate_response
def get(self, url, data=None, **kwargs):
params = {}
for k, v in kwargs.iteritems():
if type(v) in [dict, list]:
v = json.dumps(v)
params[str(k)] = v
return requests.get(self.uri + url, params=params, json=data)
@decorate_response
def put(self, url, payload=None):
if payload is None:
payload = {}
return requests.put(self.uri + url, json=payload)
@decorate_response
def post(self, url, payload=None):
if payload is None:
payload = {}
return requests.post(self.uri + url, json=payload)
@decorate_response
def delete(self, url):
return requests.delete(self.uri + url)
def query(self, sql, **kwargs):
"""
Shortcut for GET /v1/query, except with argument format='dataframe'
(the default), in which case it will simply wrap the result of the GET
query to /v1/query (with format='table') in a `pandas.DataFrame`.
"""
if 'format' not in kwargs or kwargs['format'] == 'dataframe':
resp = self.get('/v1/query', data={'q': sql, 'format': 'table'}).json()
if len(resp) == 0:
return pd.DataFrame()
else:
return pd.DataFrame.from_records(resp[1:], columns=resp[0],
index="_rowName")
kwargs['q'] = sql
return self.get('/v1/query', **kwargs).json()
def put_and_track(self, url, payload, refresh_rate_sec=1):
"""
Put and track progress, displaying progress bars.
May display the wrong progress if 2 things post/put on the same
procedure name at the same time.
"""
if not url.startswith('/v1/procedures'):
raise Exception("The only supported route is /v1/procedures")
parts = url.split('/')
len_parts = len(parts)
if len_parts not in [4, 6]:
raise Exception(
"You must either PUT a procedure or a procedure run")
proc_id = parts[3]
run_id = None
if len_parts == 4:
if 'params' not in payload:
payload['params'] = {}
payload['params']['runOnCreation'] = True
elif len_parts == 6:
run_id = parts[-1]
pm = ProgressMonitor(self, refresh_rate_sec, proc_id, run_id,
self.notebook)
t = threading.Thread(target=pm.monitor_progress)
t.start()
try:
return self.put(url, payload)
except Exception as e:
print(e)
finally:
pass
pm.event.set()
t.join()
def post_and_track(self, url, payload, refresh_rate_sec=1):
"""
Post and track progress, displaying progress bars.
May display the wrong progress if 2 things post/put on the same
procedure name at the same time.
"""
if not url.startswith('/v1/procedures'):
raise Exception("The only supported route is /v1/procedures")
if url.endswith('/runs'):
raise Exception(
"Posting and tracking run is unsupported at the moment")
if len(url.split('/')) != 3:
raise Exception("You must POST a procedure")
if 'params' not in payload:
payload['params'] = {}
payload['params']['runOnCreation'] = False
res = self.post('/v1/procedures', payload).json()
proc_id = res['id']
pm = ProgressMonitor(self, refresh_rate_sec, proc_id,
notebook=self.notebook)
t = threading.Thread(target=pm.monitor_progress)
t.start()
try:
return self.post('/v1/procedures/{}/runs'.format(proc_id), {})
except Exception as e:
print(e)
finally:
pm.event.set()
t.join()
|
main.py
|
import time
import multiprocessing
from renderer import base16_render_loop, base10_render_loop
from render_server import start_render_server
if __name__ == "__main__":
base16_render_process = multiprocessing.Process(target=base16_render_loop)
base16_render_process.start()
base10_render_process = multiprocessing.Process(target=base10_render_loop)
base10_render_process.start()
render_server_process = multiprocessing.Process(target=start_render_server)
render_server_process.start()
while True:
time.sleep(10)
if not base10_render_process.is_alive():
print("Restarting Base10 renderer")
base10_render_process.terminate()
base10_render_process = multiprocessing.Process(target=base10_render_loop)
base10_render_process.start()
if not base16_render_process.is_alive():
print("Restarting Base16 renderer")
base16_render_process.terminate()
base16_render_process = multiprocessing.Process(target=base16_render_loop)
base16_render_process.start()
if not render_server_process.is_alive():
print("Restarting render server")
render_server_process.terminate()
render_server_process = multiprocessing.Process(target=start_render_server)
render_server_process.start()
|
perf2.py
|
# perf2.py
# request/sec of a fast request
from threading import Thread
from socket import *
import time
sock = socket(AF_INET, SOCK_STREAM) # create a socket connection to the server
sock.connect(('localhost', 25000))
n = 0 # start global counter
def monitor():
global n
while True:
time.sleep(1)
print(n, 'reqs/sec')
n = 0
Thread(target=monitor).start()
while True: # start the infinite request loop
sock.send(b'1') # send a request for small fibonacci number
resp = sock.recv(100)
n += 1
|
test_celery.py
|
import threading
import pytest
pytest.importorskip("celery")
from sentry_sdk import Hub, configure_scope, start_transaction
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk._compat import text_type
from celery import Celery, VERSION
from celery.bin import worker
try:
from unittest import mock # python 3.3 and above
except ImportError:
import mock # python < 3.3
@pytest.fixture
def connect_signal(request):
def inner(signal, f):
signal.connect(f)
request.addfinalizer(lambda: signal.disconnect(f))
return inner
@pytest.fixture
def init_celery(sentry_init, request):
def inner(propagate_traces=True, backend="always_eager", **kwargs):
sentry_init(
integrations=[CeleryIntegration(propagate_traces=propagate_traces)],
**kwargs
)
celery = Celery(__name__)
if backend == "always_eager":
if VERSION < (4,):
celery.conf.CELERY_ALWAYS_EAGER = True
else:
celery.conf.task_always_eager = True
elif backend == "redis":
# broken on celery 3
if VERSION < (4,):
pytest.skip("Redis backend broken for some reason")
# this backend requires capture_events_forksafe
celery.conf.worker_max_tasks_per_child = 1
celery.conf.worker_concurrency = 1
celery.conf.broker_url = "redis://127.0.0.1:6379"
celery.conf.result_backend = "redis://127.0.0.1:6379"
celery.conf.task_always_eager = False
Hub.main.bind_client(Hub.current.client)
request.addfinalizer(lambda: Hub.main.bind_client(None))
# Once we drop celery 3 we can use the celery_worker fixture
if VERSION < (5,):
worker_fn = worker.worker(app=celery).run
else:
from celery.bin.base import CLIContext
worker_fn = lambda: worker.worker(
obj=CLIContext(app=celery, no_color=True, workdir=".", quiet=False),
args=[],
)
worker_thread = threading.Thread(target=worker_fn)
worker_thread.daemon = True
worker_thread.start()
else:
raise ValueError(backend)
return celery
return inner
@pytest.fixture
def celery(init_celery):
return init_celery()
@pytest.fixture(
params=[
lambda task, x, y: (task.delay(x, y), {"args": [x, y], "kwargs": {}}),
lambda task, x, y: (task.apply_async((x, y)), {"args": [x, y], "kwargs": {}}),
lambda task, x, y: (
task.apply_async(args=(x, y)),
{"args": [x, y], "kwargs": {}},
),
lambda task, x, y: (
task.apply_async(kwargs=dict(x=x, y=y)),
{"args": [], "kwargs": {"x": x, "y": y}},
),
]
)
def celery_invocation(request):
"""
Invokes a task in multiple ways Celery allows you to (testing our apply_async monkeypatch).
Currently limited to a task signature of the form foo(x, y)
"""
return request.param
def test_simple(capture_events, celery, celery_invocation):
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task(x, y):
foo = 42 # noqa
return x / y
with start_transaction() as transaction:
celery_invocation(dummy_task, 1, 2)
_, expected_context = celery_invocation(dummy_task, 1, 0)
(event,) = events
assert event["contexts"]["trace"]["trace_id"] == transaction.trace_id
assert event["contexts"]["trace"]["span_id"] != transaction.span_id
assert event["transaction"] == "dummy_task"
assert "celery_task_id" in event["tags"]
assert event["extra"]["celery-job"] == dict(
task_name="dummy_task", **expected_context
)
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "celery"
assert exception["stacktrace"]["frames"][0]["vars"]["foo"] == "42"
@pytest.mark.parametrize("task_fails", [True, False], ids=["error", "success"])
def test_transaction_events(capture_events, init_celery, celery_invocation, task_fails):
celery = init_celery(traces_sample_rate=1.0)
@celery.task(name="dummy_task")
def dummy_task(x, y):
return x / y
# XXX: For some reason the first call does not get instrumented properly.
celery_invocation(dummy_task, 1, 1)
events = capture_events()
with start_transaction(name="submission") as transaction:
celery_invocation(dummy_task, 1, 0 if task_fails else 1)
if task_fails:
error_event = events.pop(0)
assert error_event["contexts"]["trace"]["trace_id"] == transaction.trace_id
assert error_event["exception"]["values"][0]["type"] == "ZeroDivisionError"
execution_event, submission_event = events
assert execution_event["transaction"] == "dummy_task"
assert submission_event["transaction"] == "submission"
assert execution_event["type"] == submission_event["type"] == "transaction"
assert execution_event["contexts"]["trace"]["trace_id"] == transaction.trace_id
assert submission_event["contexts"]["trace"]["trace_id"] == transaction.trace_id
if task_fails:
assert execution_event["contexts"]["trace"]["status"] == "internal_error"
else:
assert execution_event["contexts"]["trace"]["status"] == "ok"
assert execution_event["spans"] == []
assert submission_event["spans"] == [
{
"description": "dummy_task",
"op": "celery.submit",
"parent_span_id": submission_event["contexts"]["trace"]["span_id"],
"same_process_as_parent": True,
"span_id": submission_event["spans"][0]["span_id"],
"start_timestamp": submission_event["spans"][0]["start_timestamp"],
"timestamp": submission_event["spans"][0]["timestamp"],
"trace_id": text_type(transaction.trace_id),
}
]
def test_no_stackoverflows(celery):
"""We used to have a bug in the Celery integration where its monkeypatching
was repeated for every task invocation, leading to stackoverflows.
See https://github.com/getsentry/sentry-python/issues/265
"""
results = []
@celery.task(name="dummy_task")
def dummy_task():
with configure_scope() as scope:
scope.set_tag("foo", "bar")
results.append(42)
for _ in range(10000):
dummy_task.delay()
assert results == [42] * 10000
with configure_scope() as scope:
assert not scope._tags
def test_simple_no_propagation(capture_events, init_celery):
celery = init_celery(propagate_traces=False)
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task():
1 / 0
with start_transaction() as transaction:
dummy_task.delay()
(event,) = events
assert event["contexts"]["trace"]["trace_id"] != transaction.trace_id
assert event["transaction"] == "dummy_task"
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
def test_ignore_expected(capture_events, celery):
events = capture_events()
@celery.task(name="dummy_task", throws=(ZeroDivisionError,))
def dummy_task(x, y):
return x / y
dummy_task.delay(1, 2)
dummy_task.delay(1, 0)
assert not events
def test_broken_prerun(init_celery, connect_signal):
from celery.signals import task_prerun
stack_lengths = []
def crash(*args, **kwargs):
# scope should exist in prerun
stack_lengths.append(len(Hub.current._stack))
1 / 0
# Order here is important to reproduce the bug: In Celery 3, a crashing
# prerun would prevent other preruns from running.
connect_signal(task_prerun, crash)
celery = init_celery()
assert len(Hub.current._stack) == 1
@celery.task(name="dummy_task")
def dummy_task(x, y):
stack_lengths.append(len(Hub.current._stack))
return x / y
if VERSION >= (4,):
dummy_task.delay(2, 2)
else:
with pytest.raises(ZeroDivisionError):
dummy_task.delay(2, 2)
assert len(Hub.current._stack) == 1
if VERSION < (4,):
assert stack_lengths == [2]
else:
assert stack_lengths == [2, 2]
@pytest.mark.xfail(
(4, 2, 0) <= VERSION < (4, 4, 3),
strict=True,
reason="https://github.com/celery/celery/issues/4661",
)
def test_retry(celery, capture_events):
events = capture_events()
failures = [True, True, False]
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
try:
if failures.pop(0):
1 / 0
except Exception as exc:
self.retry(max_retries=2, exc=exc)
dummy_task.delay()
assert len(runs) == 3
assert not events
failures = [True, True, True]
runs = []
dummy_task.delay()
assert len(runs) == 3
(event,) = events
exceptions = event["exception"]["values"]
for e in exceptions:
assert e["type"] == "ZeroDivisionError"
@pytest.mark.forked
def test_redis_backend_trace_propagation(init_celery, capture_events_forksafe, tmpdir):
celery = init_celery(traces_sample_rate=1.0, backend="redis", debug=True)
events = capture_events_forksafe()
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
1 / 0
with start_transaction(name="submit_celery"):
# Curious: Cannot use delay() here or py2.7-celery-4.2 crashes
res = dummy_task.apply_async()
with pytest.raises(Exception):
# Celery 4.1 raises a gibberish exception
res.wait()
# if this is nonempty, the worker never really forked
assert not runs
submit_transaction = events.read_event()
assert submit_transaction["type"] == "transaction"
assert submit_transaction["transaction"] == "submit_celery"
assert len(
submit_transaction["spans"]
), 4 # Because redis integration was auto enabled
span = submit_transaction["spans"][0]
assert span["op"] == "celery.submit"
assert span["description"] == "dummy_task"
event = events.read_event()
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
transaction = events.read_event()
assert (
transaction["contexts"]["trace"]["trace_id"]
== event["contexts"]["trace"]["trace_id"]
== submit_transaction["contexts"]["trace"]["trace_id"]
)
events.read_flush()
# if this is nonempty, the worker never really forked
assert not runs
@pytest.mark.forked
@pytest.mark.parametrize("newrelic_order", ["sentry_first", "sentry_last"])
def test_newrelic_interference(init_celery, newrelic_order, celery_invocation):
def instrument_newrelic():
import celery.app.trace as celery_mod
from newrelic.hooks.application_celery import instrument_celery_execute_trace
assert hasattr(celery_mod, "build_tracer")
instrument_celery_execute_trace(celery_mod)
if newrelic_order == "sentry_first":
celery = init_celery()
instrument_newrelic()
elif newrelic_order == "sentry_last":
instrument_newrelic()
celery = init_celery()
else:
raise ValueError(newrelic_order)
@celery.task(name="dummy_task", bind=True)
def dummy_task(self, x, y):
return x / y
assert dummy_task.apply(kwargs={"x": 1, "y": 1}).wait() == 1
assert celery_invocation(dummy_task, 1, 1)[0].wait() == 1
def test_traces_sampler_gets_task_info_in_sampling_context(
init_celery, celery_invocation, DictionaryContaining # noqa:N803
):
traces_sampler = mock.Mock()
celery = init_celery(traces_sampler=traces_sampler)
@celery.task(name="dog_walk")
def walk_dogs(x, y):
dogs, route = x
num_loops = y
return dogs, route, num_loops
_, args_kwargs = celery_invocation(
walk_dogs, [["Maisey", "Charlie", "Bodhi", "Cory"], "Dog park round trip"], 1
)
traces_sampler.assert_any_call(
# depending on the iteration of celery_invocation, the data might be
# passed as args or as kwargs, so make this generic
DictionaryContaining({"celery_job": dict(task="dog_walk", **args_kwargs)})
)
def test_abstract_task(capture_events, celery, celery_invocation):
events = capture_events()
class AbstractTask(celery.Task):
abstract = True
def __call__(self, *args, **kwargs):
try:
return self.run(*args, **kwargs)
except ZeroDivisionError:
return None
@celery.task(name="dummy_task", base=AbstractTask)
def dummy_task(x, y):
return x / y
with start_transaction():
celery_invocation(dummy_task, 1, 0)
assert not events
|
test_etcdlock.py
|
import unittest
import time
import etcd
from threading import Thread
from etcdlock.etcdlock import EtcdLock
class TestEtcdLock(unittest.TestCase):
def setUp(self):
self._thread_value = None
self._etcdlock = EtcdLock(key="key", id="id")
try:
self._etcdlock._client.delete("key")
except:
pass
def test_same_id_reacquires_lock(self):
self._etcdlock._client.write("key", "id")
self._etcdlock.acquire()
self.assertEquals("id", self._etcdlock._client.read("key").value)
def test_on_key_not_found_acquire_lock(self):
self._etcdlock.acquire()
self.assertEquals("id", self._etcdlock._client.read("key").value)
def test_with_statement(self):
with self._etcdlock:
self.assertEquals("id", self._etcdlock._client.read("key").value)
self.assertRaises(etcd.EtcdKeyNotFound, lambda: self._etcdlock._client.read("key"))
def test_block_others_until_done(self):
self._etcdlock.acquire()
Thread(target=lambda: self.__second_client()).start()
time.sleep(1)
self.assertIsNone(self._thread_value)
self._etcdlock.release()
time.sleep(1)
self.assertEqual("done", self._thread_value)
def __second_client(self):
with EtcdLock(key="key", id="id2"):
self._thread_value = "done"
|
robocode_test.py
|
import json
import os
import pickle
import shlex
import socket
import tempfile
import time
from threading import Thread
import numpy as np
from rlai.environments.robocode import RobocodeFeatureExtractor
from rlai.runners.trainer import run
def test_learn():
# set the following to True to update the fixture. if you do this, then you'll also need to start the robocode game
# and uncomment some stuff in rlai.environments.network.TcpMdpEnvironment.read_from_client in order to update the
# test fixture. run a battle for 10 rounds to complete the fixture update.
update_fixture = False
robocode_port = 54321
robocode_mock_thread = None
if not update_fixture:
with open(f'{os.path.dirname(__file__)}/fixtures/test_robocode.pickle', 'rb') as file:
state_sequence, fixture_pi, fixture_q_S_A = pickle.load(file)
# set up a mock robocode game that sends state sequence
def robocode_mock():
# wait for environment to start up and listen for connections
time.sleep(5)
t = 0
while t < len(state_sequence):
# start episode by connecting
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('127.0.0.1', robocode_port))
try:
while t < len(state_sequence):
# send the current game state in the sequence
state_dict_json = state_sequence[t]
s.sendall(state_dict_json.encode('utf-8'))
t += 1
# receive next action
s.recv(99999999)
# if the next state starts a new episode, then break.
if t < len(state_sequence):
next_state_dict = json.loads(state_sequence[t])
if next_state_dict['state']['time'] == 0:
break
# if environment closes connection during receive, it ends the episode.
except Exception: # pragma no cover
pass
robocode_mock_thread = Thread(target=robocode_mock)
robocode_mock_thread.start()
# run training and load resulting agent
agent_path = tempfile.NamedTemporaryFile(delete=False).name
cmd = f'--random-seed 12345 --agent rlai.environments.robocode.RobocodeAgent --gamma 0.95 --environment rlai.environments.robocode.RobocodeEnvironment --port {robocode_port} --bullet-power-decay 0.75 --train-function rlai.gpi.temporal_difference.iteration.iterate_value_q_pi --mode SARSA --n-steps 50 --num-improvements 10 --num-episodes-per-improvement 1 --num-updates-per-improvement 1 --epsilon 0.25 --q-S-A rlai.q_S_A.function_approximation.estimators.ApproximateStateActionValueEstimator --function-approximation-model rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD --loss squared_loss --sgd-alpha 0.0 --learning-rate constant --eta0 0.0001 --feature-extractor rlai.environments.robocode.RobocodeFeatureExtractor --scanned-robot-decay 0.75 --make-final-policy-greedy True --num-improvements-per-plot 100 --save-agent-path {agent_path} --log DEBUG'
run(shlex.split(cmd))
if not update_fixture:
robocode_mock_thread.join()
with open(agent_path, 'rb') as f:
agent = pickle.load(f)
# if we're updating the test fixture, then save the state sequence and resulting policy to disk.
if update_fixture: # pragma no cover
with open(os.path.expanduser('~/Desktop/state_sequence.txt'), 'r') as f:
state_sequence = f.readlines()
with open(f'{os.path.dirname(__file__)}/fixtures/test_robocode.pickle', 'wb') as file:
pickle.dump((state_sequence, agent.pi, agent.pi.estimator), file)
else:
assert np.allclose(agent.pi.estimator.model.model.coef_, fixture_q_S_A.model.model.coef_)
assert np.allclose(agent.pi.estimator.model.model.intercept_, fixture_q_S_A.model.model.intercept_)
def test_feature_extractor():
assert RobocodeFeatureExtractor.is_clockwise_move(0, 1)
assert RobocodeFeatureExtractor.is_clockwise_move(-1, 1)
assert not RobocodeFeatureExtractor.is_clockwise_move(1, 1)
assert RobocodeFeatureExtractor.get_shortest_degree_change(1, -1) == -2
assert RobocodeFeatureExtractor.get_shortest_degree_change(-1, 1) == 2
assert RobocodeFeatureExtractor.get_shortest_degree_change(1, 1) == 0
assert RobocodeFeatureExtractor.normalize(366) == 6
assert RobocodeFeatureExtractor.normalize(-5) == 355
|
test_sys.py
|
import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
import locale
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, '_'):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), '')
self.assertTrue(not hasattr(builtins, '_'))
dh(42)
self.assertEqual(out.getvalue(), '42\n')
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile('42', '<string>', 'single')
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile('42', '<string>', 'single')
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith('ValueError: 42\n'))
def test_excepthook(self):
with test.support.captured_output('stderr') as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue(
'TypeError: print_exception(): Exception expected for value, str found'
in stderr.getvalue())
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
with self.assertRaises(SystemExit) as cm:
sys.exit('exit')
self.assertEqual(cm.exception.code, 'exit')
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
check_exit_message(
'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b'unflushed,message')
check_exit_message('import sys; sys.exit("surrogates:\\uDCFF")',
b'surrogates:\\udcff')
check_exit_message('import sys; sys.exit("h\\xe9")', b'h\xe9',
PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
self.assertIsInstance(sys.getdefaultencoding(), str)
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in (0, 100, 120, orig):
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, 'a')
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
self.assertTrue(orig < 0.5, orig)
try:
for n in (1e-05, 0.05, 3.0, orig):
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
continue
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
'cannot set the recursion limit to [0-9]+ at the recursion depth [0-9]+: the limit is too low'
)
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
code = textwrap.dedent(
"""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()"""
)
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b'Fatal Python error: Cannot recover from stack overflow',
err)
def test_getwindowsversion(self):
test.support.get_attribute(sys, 'getwindowsversion')
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, 'setdlopenflags'),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, 'getdlopenflags'))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags + 1)
self.assertEqual(sys.getdlopenflags(), oldflags + 1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c + 1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, 'gettotalrefcount'):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(SysModuleTest.test_getframe.__code__ is sys.
_getframe().f_code)
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = []
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == 'f123':
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, 'g456()')
filename, lineno, funcname, sourceline = stack[i + 1]
self.assertEqual(funcname, 'g456')
self.assertIn(sourceline, ['leave_g.wait()', 'entered_g.set()'])
leave_g.set()
t.join()
def current_frames_without_threads(self):
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ('little', 'big'))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2 ** sys.hash_info.width)
for x in range(1, 100):
self.assertEqual(pow(x, sys.hash_info.modulus - 1, sys.
hash_info.modulus), 1,
'sys.hash_info.modulus {} is a non-prime'.format(sys.
hash_info.modulus))
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var('Py_HASH_ALGORITHM')
if sys.hash_info.algorithm in {'fnv', 'siphash24'}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, 'siphash24')
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, 'fnv')
else:
self.assertIn(sys.hash_info.algorithm, {'fnv', 'siphash24'})
else:
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 1114111)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ('alpha', 'beta', 'candidate', 'final'))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ('alpha', 'beta', 'candidate', 'final'))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1, 0, 0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = 'never interned before' + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S('abc'))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ('debug', 'inspect', 'interactive', 'optimize',
'dont_write_bytecode', 'no_user_site', 'no_site',
'ignore_environment', 'verbose', 'bytes_warning', 'quiet',
'hash_randomization', 'isolated')
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
test.support.get_attribute(sys, 'getwindowsversion')
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
env['PYTHONIOENCODING'] = 'cp424'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ('¢' + os.linesep).encode('cp424')
self.assertEqual(out, expected)
env['PYTHONIOENCODING'] = 'ascii:replace'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env['PYTHONIOENCODING'] = 'ascii'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(b"'\\xa2'", err)
env['PYTHONIOENCODING'] = 'ascii:'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(b"'\\xa2'", err)
env['PYTHONIOENCODING'] = ':surrogateescape'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.
getpreferredencoding(False), 'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env['PYTHONIOENCODING'] = ''
p = subprocess.Popen([sys.executable, '-c', 'print(%a)' % test.
support.FS_NONASCII], stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(['nonexistent', '-c',
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'
], executable=sys.executable, stdout=subprocess.PIPE, cwd=
python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode('ASCII')
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode(
'ascii', 'backslashreplace'))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
env = os.environ.copy()
env['LC_ALL'] = 'C'
code = '\n'.join(('import sys', 'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))', 'dump("stdin")',
'dump("stdout")', 'dump("stderr")'))
args = [sys.executable, '-c', code]
if isolated:
args.append('-I')
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=
subprocess.STDOUT, env=env, universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
"""stdin: surrogateescape
stdout: surrogateescape
stderr: backslashreplace
"""
)
out = self.c_locale_get_error_handler(encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\nstdout: ignore\nstderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\nstdout: strict\nstderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\nstdout: strict\nstderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding=':')
self.assertEqual(out,
"""stdin: surrogateescape
stdout: surrogateescape
stderr: backslashreplace
"""
)
out = self.c_locale_get_error_handler(encoding='')
self.assertEqual(out,
"""stdin: surrogateescape
stdout: surrogateescape
stderr: backslashreplace
"""
)
def test_implementation(self):
levels = {'alpha': 10, 'beta': 11, 'candidate': 12, 'final': 15}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 | version.
micro << 8 | levels[version.releaselevel] << 4 | version.serial <<
0)
self.assertEqual(sys.implementation.hexversion, hexversion)
self.assertEqual(sys.implementation.name, sys.implementation.name.
lower())
@test.support.cpython_only
def test_debugmallocstats(self):
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b'free PyDictObjects', err)
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, 'getallocatedblocks'),
'sys.getallocatedblocks unavailable on this build')
def test_getallocatedblocks(self):
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
self.assertGreaterEqual(a, 0)
try:
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ['sentinel']
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)), sys.
maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
check(True, vsize('') + self.longdigit)
check(len, size('4P'))
samples = [b'', b'u' * 100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
check(iter(bytearray()), size('nP'))
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
check(complex(0, 1), size('2d'))
check(str.lower, size('3PP'))
import datetime
check(datetime.timedelta.days, size('3PP'))
import collections
check(collections.defaultdict.default_factory, size('3PP'))
check(int.__add__, size('3P2P'))
check({}.__iter__, size('2P'))
check({}, size('nQ2P') + calcsize('2nP2n') + 8 + 8 * 2 // 3 *
calcsize('n2P'))
longdict = {(1): 1, (2): 2, (3): 3, (4): 4, (5): 5, (6): 6, (7): 7,
(8): 8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + 16 * 2 // 3 *
calcsize('n2P'))
check({}.keys(), size('P'))
check({}.values(), size('P'))
check({}.items(), size('P'))
check(iter({}), size('P2nPn'))
check(iter({}.keys()), size('P2nPn'))
check(iter({}.values()), size('P2nPn'))
check(iter({}.items()), size('P2nPn'))
class C(object):
pass
check(C.__dict__, size('P'))
check(BaseException(), size('5Pb'))
check(UnicodeEncodeError('', '', 0, 0, ''), size('5Pb 2P2nP'))
check(UnicodeDecodeError('', b'', 0, 0, ''), size('5Pb 2P2nP'))
check(UnicodeTranslateError('', 0, 1, ''), size('5Pb 2P2nP'))
check(Ellipsis, size(''))
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
check(enumerate([]), size('n3P'))
check(reversed(''), size('nP'))
check(float(0), size('d'))
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = (x.f_code.co_stacksize + x.f_code.co_nlocals + ncells +
nfrees - 1)
check(x, vsize('12P3ic' + CO_MAXBLOCKS * '3i' + 'P' + extras * 'P'))
def func():
pass
check(func, size('12P'))
class c:
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
check(foo, size('PP'))
check(bar, size('PP'))
def get_gen():
yield 1
check(get_gen(), size('Pb2PPP'))
check(iter('abc'), size('lP'))
import re
check(re.finditer('', ''), size('2P'))
samples = [[], [1, 2, 3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample) * self.P)
check(iter([]), size('lP'))
check(reversed([]), size('nP'))
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2 ** sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2 * self.longdigit)
check(int(PyLong_BASE ** 2 - 1), vsize('') + 2 * self.longdigit)
check(int(PyLong_BASE ** 2), vsize('') + 3 * self.longdigit)
check(unittest, size('PnPPP'))
check(None, size(''))
check(NotImplemented, size(''))
check(object(), size(''))
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, '')
check(x, size('4Pi'))
check(iter(range(1)), size('4l'))
check(reversed(''), size('nP'))
check(range(1), size('4P'))
check(range(66000), size('4P'))
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE * 'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0:
tmp = 1
minused = minused * 2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize * calcsize('nP'))
check(frozenset(sample), s + newsize * calcsize('nP'))
check(iter(set()), size('P3n'))
check(slice(0), size('3P'))
check(super(int), size('3P'))
check((), vsize(''))
check((1, 2, 3), vsize('') + 3 * self.P)
fmt = 'P2n15Pl4Pn9Pn11PIP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
s = vsize(fmt + '3P36P3P10P2P4P')
s += calcsize('2nP2n') + 8 + 5 * calcsize('n2P')
class newstyleclass(object):
pass
check(newstyleclass, s)
check(newstyleclass().__dict__, size('nQ2P' + '2nP2n'))
samples = ['1' * 100, 'ÿ' * 50, 'Ā' * 40, '\uffff' * 100, '𐀀' * 30,
'\U0010ffff' * 100]
asciifields = 'nnbP'
compactfields = asciifields + 'nPn'
unicodefields = compactfields + 'P'
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2 * (len(s) + 1)
else:
L = size(compactfields) + 4 * (len(s) + 1)
check(s, L)
s = chr(16384)
check(s, size(compactfields) + 4)
compile(s, '<stdin>', 'eval')
check(s, size(compactfields) + 4 + 4)
import weakref
check(weakref.ref(int), size('2Pn2P'))
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
if tb is not None:
check(tb, size('2P2i'))
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == '__main__':
test_main()
|
main.py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from plumbum import cli
import time
import threading
import socket
from log import *
from influxdb import InfluxDBClient
import pandas as pd
import pyarrow.parquet as pq
import pyarrow as pa
from utils import set_last_time,get_last_time_config
import numpy as np
from parquetcolumns import all_name_dict2
socket.setdefaulttimeout(10)
# influx -username root -password root -port 58086 -precision rfc3339
class Influxdb2parquet(cli.Application):
# this program is build to check controller tracelog
PROGNAME = 'data cycle Scheduler'
VERSION = '0.1'
config = None
client = None
farm = None
def initialize(self):
# noinspection PyBroadException
try:
self.client = InfluxDBClient(host=self.config.get('influxdb', 'host'),
port=self.config.get('influxdb', 'port'),
username=self.config.get('influxdb', 'username'),
password=self.config.get('influxdb', 'password'),
database=self.config.get('influxdb', 'database'))
self.farm = get_last_time_config("farm")
logger_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs', 'influxdb2parquet.log')
log_init(logger_path, site_name=self.config.get("global", "site"), service='influxdb to parquet')
except Exception as e:
print('Start service failed.'+ str(e))
self.client.close()
exit(-1)
log(level='info', msg='service init complete!') # info
def load_config(self):
self.config = configparser.RawConfigParser()
self.config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', 'config.cfg'))
def _heart_beat(self):
while True:
time.sleep(600)
log('info', 'Heartbeat')
def heart_beat(self):
thread = threading.Thread(target=self._heart_beat, args=())
thread.start()
def exportInfluxdb_day(self,start_time,end_time):
try:
# 时间日期格式化2018-07-16T10:00:00Z 注意UTC时间比当前时间小8 小时
start = (start_time - datetime.timedelta(hours=8))
end = (end_time - datetime.timedelta(hours=8))
for turbineid in range(int(self.config.get("global", "turbines"))):
currentid = str(turbineid + 1).zfill(2) + "#"
for measurement in eval(self.config.get("global", "measurements")):
pqwriter = None
current = start
while current<end:
query = "SELECT * FROM "+measurement+" WHERE farm = '{0}' and turbine = '{1}' and time>= '{2}' and time < '{3}';".format(
self.farm, currentid, current.strftime("%Y-%m-%dT%H:%M:%SZ"), (current+datetime.timedelta(hours=1)).strftime("%Y-%m-%dT%H:%M:%SZ"))
# print(query)
df = pd.DataFrame(self.client.query(query).get_points())
if df.size>0:
for c in all_name_dict2:
try:
df[c]
except:
df[c]=np.nan
df = df.reindex(columns=all_name_dict2)
df =df.fillna(np.nan)
df['time'] = pd.to_datetime(df['time']).dt.tz_localize(None)
df['time'] = df['time'] + datetime.timedelta(hours=8)
dir_name = self.config.get("global", "uploadpath") + start_time.strftime("%Y-%m-%d") +"/"
filename = currentid+ "_" + start_time.strftime("%Y-%m-%d") + "_" + end_time.strftime("%Y-%m-%d") + '.parquet'
filepath = dir_name + filename
if not os.path.exists(dir_name):
os.makedirs(dir_name)
table = pa.Table.from_pandas(df)
if pqwriter is None:
pqwriter = pq.ParquetWriter(filepath, table.schema)
pqwriter.write_table(table=table)
print("write parquet ["+filepath+"]")
log("info", "export " + measurement + " to " + filepath + " success")
current=current + datetime.timedelta(hours=1)
if pqwriter:
pqwriter.close()
time_string = end_time.strftime("%Y-%m-%d")
set_last_time("influxdb_day_lasttime", time_string)
log("info", "end influx day data , date is: " + str(start_time) + " - " + str(end_time))
except Exception as e:
print(e)
log("error", str(e))
# 从last_day递增 一天 直到now-1
def data_complement(self):
try:
# influxdb每日导出执行日期
influxdb_day_time_string = get_last_time_config("influxdb_day_lasttime")
print(influxdb_day_time_string+"start.")
last_infuxdb_day = datetime.datetime.strptime(influxdb_day_time_string, "%Y-%m-%d")
influx_different_days = (datetime.datetime.now() - last_infuxdb_day).days
end_influx_day_date = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
if influx_different_days > 0:#导出历史文件
log("info", "start influx day data [" +influxdb_day_time_string+"]---["+ str(influx_different_days) + "] day")
for day in range(influx_different_days):
start = end_influx_day_date - datetime.timedelta(days=influx_different_days - day)
end = end_influx_day_date - datetime.timedelta(days=influx_different_days - day - 1)
#导出文件
self.exportInfluxdb_day(start,end)
elif influx_different_days == 0: #导出当天文件
log("info", "start influx day data [" +influxdb_day_time_string+"]---["+ str(influx_different_days) + "] day")
start = end_influx_day_date
end = end_influx_day_date + datetime.timedelta(days=1)
#导出当天文件
self.exportInfluxdb_day(start,end)
except Exception as e:
log("error", "influx day data complement failed, " + str(e))
def main(self):
print("-----------")
self.load_config()
self.initialize()
# check time every 10 minutes 检测心跳
self.heart_beat()
while True:
# 从 lasttime 到 now 每一天导出一个文件
self.data_complement()
print(datetime.datetime.now().strftime("%Y-%m-%d")+"finished.")
# 每隔 1小时检测一次
time.sleep(3600)
if __name__ == '__main__':
Influxdb2parquet.run()
|
test_mongoexp.py
|
import six.moves.cPickle as pickle
import os
import signal
import subprocess
import sys
import threading
import time
import unittest
import numpy as np
import nose
import nose.plugins.skip
from hyperopt.base import JOB_STATE_DONE
from hyperopt.mongoexp import parse_url
from hyperopt.mongoexp import MongoTrials
from hyperopt.mongoexp import MongoWorker
from hyperopt.mongoexp import ReserveTimeout
from hyperopt.mongoexp import as_mongo_str
from hyperopt.mongoexp import main_worker_helper
from hyperopt.mongoexp import MongoJobs
from hyperopt.fmin import fmin
from hyperopt import rand
import hyperopt.tests.test_base
from .test_domains import gauss_wave2
def skiptest(f):
def wrapper(*args, **kwargs):
raise nose.plugins.skip.SkipTest()
wrapper.__name__ = f.__name__
return wrapper
class TempMongo:
"""
Context manager for tests requiring a live database.
with TempMongo() as foo:
mj = foo.mongo_jobs('test1')
"""
def __init__(self, workdir="/tmp/hyperopt_test"):
self.workdir = workdir
def __enter__(self):
try:
open(self.workdir)
assert 0
except OSError:
subprocess.call(["mkdir", "-p", "%s/db" % self.workdir])
proc_args = [
"mongod",
"--dbpath=%s/db" % self.workdir,
"--noprealloc",
"--port=22334",
]
print("starting mongod", proc_args)
self.mongo_proc = subprocess.Popen(
proc_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.workdir, # this prevented mongod assertion fail
)
try:
interval = 0.125
while interval <= 2:
if interval > 0.125:
print("Waiting for mongo to come up")
time.sleep(interval)
interval *= 2
if self.db_up():
break
if self.db_up():
return self
else:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
out, err = self.mongo_proc.communicate()
print(out, file=sys.stderr)
print(err, file=sys.stderr)
raise RuntimeError("No database connection", proc_args)
except Exception as e:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
raise e
def __exit__(self, *args):
os.kill(self.mongo_proc.pid, signal.SIGTERM)
self.mongo_proc.wait()
subprocess.call(["rm", "-Rf", self.workdir])
@staticmethod
def connection_string(dbname):
return as_mongo_str(f"localhost:22334/{dbname}/jobs")
@staticmethod
def mongo_jobs(dbname):
return MongoJobs.new_from_connection_str(TempMongo.connection_string(dbname))
def db_up(self):
try:
self.mongo_jobs("__test_db")
return True
except: # XXX: don't know what exceptions to put here
return False
def test_parse_url():
uris = [
"mongo://hyperopt:foobar@127.0.0.1:27017/hyperoptdb/jobs",
"mongo://hyperopt:foobar@127.0.0.1:27017/hyperoptdb/jobs?authSource=db1",
]
expected = [
("mongo", "hyperopt", "foobar", "127.0.0.1", 27017, "hyperoptdb", "jobs", None),
(
"mongo",
"hyperopt",
"foobar",
"127.0.0.1",
27017,
"hyperoptdb",
"jobs",
"db1",
),
]
for i, uri in enumerate(uris):
assert parse_url(uri) == expected[i]
# -- If we can't create a TempMongo instance, then
# simply print what happened,
try:
with TempMongo() as temp_mongo:
pass
except OSError as e:
print(e, file=sys.stderr)
print(
("Failed to create a TempMongo context," " skipping all mongo tests."),
file=sys.stderr,
)
if "such file" in str(e):
print("Hint: is mongod executable on path?", file=sys.stderr)
raise nose.SkipTest()
class TestMongoTrials(hyperopt.tests.test_base.TestTrials):
def setUp(self):
self.temp_mongo = TempMongo()
self.temp_mongo.__enter__()
self.trials = MongoTrials(
self.temp_mongo.connection_string("foo"), exp_key=None
)
def tearDown(self, *args):
self.temp_mongo.__exit__(*args)
def with_mongo_trials(f, exp_key=None):
def wrapper():
with TempMongo() as temp_mongo:
trials = MongoTrials(temp_mongo.connection_string("foo"), exp_key=exp_key)
print("Length of trials: ", len(trials.results))
f(trials)
wrapper.__name__ = f.__name__
return wrapper
def _worker_thread_fn(host_id, n_jobs, timeout, dbname="foo", logfilename=None):
mw = MongoWorker(
mj=TempMongo.mongo_jobs(dbname),
logfilename=logfilename,
workdir="mongoexp_test_dir",
)
try:
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
except ReserveTimeout:
print("worker timed out:", host_id)
pass
def with_worker_threads(n_threads, dbname="foo", n_jobs=sys.maxsize, timeout=10.0):
"""
Decorator that will run a test with some MongoWorker threads in flight
"""
def newth(ii):
return threading.Thread(
target=_worker_thread_fn, args=(("hostname", ii), n_jobs, timeout, dbname)
)
def deco(f):
def wrapper(*args, **kwargs):
# --start some threads
threads = list(map(newth, list(range(n_threads))))
[th.start() for th in threads]
try:
return f(*args, **kwargs)
finally:
[th.join() for th in threads]
wrapper.__name__ = f.__name__ # -- nose requires test in name
return wrapper
return deco
@with_mongo_trials
def test_with_temp_mongo(trials):
pass # -- just verify that the decorator can run
@with_mongo_trials
def test_new_trial_ids(trials):
a = trials.new_trial_ids(1)
b = trials.new_trial_ids(2)
c = trials.new_trial_ids(3)
assert len(a) == 1
assert len(b) == 2
assert len(c) == 3
s = set()
s.update(a)
s.update(b)
s.update(c)
assert len(s) == 6
@with_mongo_trials
def test_attachments(trials):
blob = b"abcde"
assert "aname" not in trials.attachments
trials.attachments["aname"] = blob
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob
assert trials.attachments["aname"] == blob
blob2 = b"zzz"
trials.attachments["aname"] = blob2
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob2
assert trials.attachments["aname"] == blob2
del trials.attachments["aname"]
assert "aname" not in trials.attachments
@with_mongo_trials
def test_delete_all_on_attachments(trials):
trials.attachments["aname"] = "a"
trials.attachments["aname2"] = "b"
assert "aname2" in trials.attachments
trials.delete_all()
assert "aname" not in trials.attachments
assert "aname2" not in trials.attachments
def test_handles_are_independent():
with TempMongo() as tm:
t1 = tm.mongo_jobs("t1")
t2 = tm.mongo_jobs("t2")
assert len(t1) == 0
assert len(t2) == 0
# test that inserting into t1 doesn't affect t2
t1.insert({"a": 7})
assert len(t1) == 1
assert len(t2) == 0
def passthrough(x):
assert os.path.split(os.getcwd()).count("mongoexp_test_dir") == 1, (
"cwd is %s" % os.getcwd()
)
return x
class TestExperimentWithThreads(unittest.TestCase):
@staticmethod
def worker_thread_fn(host_id, n_jobs, timeout):
mw = MongoWorker(
mj=TempMongo.mongo_jobs("foodb"),
logfilename=None,
workdir="mongoexp_test_dir",
)
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
@staticmethod
def fmin_thread_fn(space, trials, max_evals, seed):
fmin(
fn=passthrough,
space=space,
algo=rand.suggest,
trials=trials,
rstate=np.random.RandomState(seed),
max_evals=max_evals,
return_argmin=False,
)
def test_seeds_AAB(self):
# launch 3 simultaneous experiments with seeds A, A, B.
# Verify all experiments run to completion.
# Verify first two experiments run identically.
# Verify third experiment runs differently.
exp_keys = ["A0", "A1", "B"]
seeds = [1, 1, 2]
n_workers = 2
jobs_per_thread = 6
# -- total jobs = 2 * 6 = 12
# -- divided by 3 experiments: 4 jobs per fmin
max_evals = (n_workers * jobs_per_thread) // len(exp_keys)
# -- should not matter which domain is used here
domain = gauss_wave2()
pickle.dumps(domain.expr)
pickle.dumps(passthrough)
worker_threads = [
threading.Thread(
target=TestExperimentWithThreads.worker_thread_fn,
args=(("hostname", ii), jobs_per_thread, 30.0),
)
for ii in range(n_workers)
]
with TempMongo() as tm:
mj = tm.mongo_jobs("foodb")
print(mj)
trials_list = [
MongoTrials(tm.connection_string("foodb"), key) for key in exp_keys
]
fmin_threads = [
threading.Thread(
target=TestExperimentWithThreads.fmin_thread_fn,
args=(domain.expr, trials, max_evals, seed),
)
for seed, trials in zip(seeds, trials_list)
]
try:
[th.start() for th in worker_threads + fmin_threads]
finally:
print("joining worker threads...")
[th.join() for th in worker_threads + fmin_threads]
# -- not using an exp_key gives a handle to all the trials
# in foodb
all_trials = MongoTrials(tm.connection_string("foodb"))
self.assertEqual(len(all_trials), n_workers * jobs_per_thread)
# Verify that the fmin calls terminated correctly:
for trials in trials_list:
self.assertEqual(
trials.count_by_state_synced(JOB_STATE_DONE), max_evals
)
self.assertEqual(
trials.count_by_state_unsynced(JOB_STATE_DONE), max_evals
)
self.assertEqual(len(trials), max_evals)
# Verify that the first two experiments match.
# (Do these need sorting by trial id?)
trials_A0, trials_A1, trials_B0 = trials_list
self.assertEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_A1.trials],
)
# Verify that the last experiment does not match.
# (Do these need sorting by trial id?)
self.assertNotEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_B0.trials],
)
class FakeOptions:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# -- assert that the test raises a ReserveTimeout within 5 seconds
@nose.tools.timed(10.0) # XXX: this needs a suspiciously long timeout
@nose.tools.raises(ReserveTimeout)
@with_mongo_trials
def test_main_worker(trials):
options = FakeOptions(
max_jobs=1,
# XXX: sync this with TempMongo
mongo=as_mongo_str("localhost:22334/foodb"),
reserve_timeout=1,
poll_interval=0.5,
workdir=None,
exp_key="foo",
last_job_timeout=None,
)
# -- check that it runs
# and that the reserve timeout is respected
main_worker_helper(options, ())
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(support.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
run.py
|
#!/usr/bin/env python3
# encoding: utf-8
"""
Usage:
python [options]
Options:
-h,--help 显示帮助
-a,--algorithm=<name> 算法
specify the training algorithm [default: ppo]
-c,--copys=<n> 指定并行训练的数量
nums of environment copys that collect data in parallel [default: 1]
-e,--env=<file> 指定Unity环境路径
specify the path of builded training environment of UNITY3D [default: None]
-g,--graphic 是否显示图形界面
whether show graphic interface when using UNITY3D [default: False]
-i,--inference 推断
inference the trained model, not train policies [default: False]
-m,--models=<n> 同时训练多少个模型
specify the number of trails that using different random seeds [default: 1]
-n,--name=<name> 训练的名字
specify the name of this training task [default: None]
-p,--port=<n> 端口
specify the port that communicate with training environment of UNITY3D [default: 5005]
-r,--rnn 是否使用RNN模型
whether use rnn[GRU, LSTM, ...] or not [default: False]
-s,--save-frequency=<n> 保存频率
specify the interval that saving model checkpoint [default: None]
-t,--train-step=<n> 总的训练次数
specify the training step that optimize the policy model [default: None]
-u,--unity 是否使用unity客户端
whether training with UNITY3D editor [default: False]
--apex=<str> i.e. "learner"/"worker"/"buffer"/"evaluator" [default: None]
--unity-env=<name> 指定unity环境的名字
specify the name of training environment of UNITY3D [default: None]
--config-file=<file> 指定模型的超参数config文件
specify the path of training configuration file [default: None]
--store-dir=<file> 指定要保存模型、日志、数据的文件夹路径
specify the directory that store model, log and others [default: None]
--seed=<n> 指定训练器全局随机种子
specify the random seed of module random, numpy and tensorflow [default: 42]
--unity-env-seed=<n> 指定unity环境的随机种子
specify the environment random seed of UNITY3D [default: 42]
--max-step=<n> 每回合最大步长
specify the maximum step per episode [default: None]
--train-episode=<n> 总的训练回合数
specify the training maximum episode [default: None]
--train-frame=<n> 总的训练采样次数
specify the training maximum steps interacting with environment [default: None]
--load=<name> 指定载入model的训练名称
specify the name of pre-trained model that need to load [default: None]
--prefill-steps=<n> 指定预填充的经验数量
specify the number of experiences that should be collected before start training, use for off-policy algorithms [default: None]
--prefill-choose 指定no_op操作时随机选择动作,或者置0
whether choose action using model or choose randomly [default: False]
--gym 是否使用gym训练环境
whether training with gym [default: False]
--gym-env=<name> 指定gym环境的名字
specify the environment name of gym [default: CartPole-v0]
--gym-env-seed=<n> 指定gym环境的随机种子
specify the environment random seed of gym [default: 42]
--render-episode=<n> 指定gym环境从何时开始渲染
specify when to render the graphic interface of gym environment [default: None]
--info=<str> 抒写该训练的描述,用双引号包裹
write another information that describe this training task [default: None]
--hostname 是否在训练名称后附加上主机名称
whether concatenate hostname with the training name [default: False]
--no-save 指定是否在训练中保存模型、日志及训练数据
specify whether save models/logs/summaries while training or not [default: False]
Example:
gym:
python run.py --gym -a dqn --gym-env CartPole-v0 -c 12 -n dqn_cartpole --no-save
unity:
python run.py -u -a ppo -n run_with_unity
python run.py -e /root/env/3dball.app -a sac -n run_with_execution_file
"""
import os
import sys
import time
import logging
from typing import Dict
from copy import deepcopy
from docopt import docopt
from multiprocessing import Process
from rls.common.trainer import Trainer
from rls.common.config import Config
from rls.common.yaml_ops import load_yaml
from rls.parse.parse_op import parse_options
from rls.utils.display import show_dict
from rls.utils.logging_utils import set_log_level
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
set_log_level(logging.INFO)
if sys.platform.startswith('win'):
import pywintypes # necessary when using python 3.8+
import win32api
import win32con
import _thread
def _win_handler(event, hook_sigint=_thread.interrupt_main):
'''
handle the event of 'Ctrl+c' in windows operating system.
'''
if event == 0:
hook_sigint()
return 1
return 0
# Add the _win_handler function to the windows console's handler function list
win32api.SetConsoleCtrlHandler(_win_handler, 1)
def get_options(options: Dict) -> Config:
'''
Resolves command-line arguments
params:
options: dictionary of command-line arguments
return:
op: an instance of Config class that contains the parameters
'''
def f(k, t): return None if options[k] == 'None' else t(options[k])
op = Config()
op.add_dict(dict([
['inference', bool(options['--inference'])],
['algo', str(options['--algorithm'])],
['use_rnn', bool(options['--rnn'])],
['algo_config', f('--config-file', str)],
['env', f('--env', str)],
['port', int(options['--port'])],
['unity', bool(options['--unity'])],
['graphic', bool(options['--graphic'])],
['name', f('--name', str)],
['save_frequency', f('--save-frequency', int)],
['models', int(options['--models'])],
['store_dir', f('--store-dir', str)],
['seed', int(options['--seed'])],
['unity_env_seed', int(options['--unity-env-seed'])],
['max_step_per_episode', f('--max-step', int)],
['max_train_step', f('--train-step', int)],
['max_train_frame', f('--train-frame', int)],
['max_train_episode', f('--train-episode', int)],
['load', f('--load', str)],
['prefill_steps', f('--prefill-steps', int)],
['prefill_choose', bool(options['--prefill-choose'])],
['gym', bool(options['--gym'])],
['n_copys', int(options['--copys'])],
['gym_env', str(options['--gym-env'])],
['gym_env_seed', int(options['--gym-env-seed'])],
['render_episode', f('--render-episode', int)],
['info', f('--info', str)],
['unity_env', f('--unity-env', str)],
['apex', f('--apex', str)],
['hostname', bool(options['--hostname'])],
['no_save', bool(options['--no-save'])],
]))
return op
def agent_run(*args):
'''
Start a training task
'''
Trainer(*args)()
def main():
options = docopt(__doc__)
options = get_options(dict(options))
show_dict(options.to_dict)
trails = options.models
assert trails > 0, '--models must greater than 0.'
env_args, buffer_args, train_args = parse_options(options, default_config=load_yaml(f'./config.yaml'))
if options.inference:
Trainer(env_args, buffer_args, train_args).evaluate()
return
if options.apex is not None:
train_args.update(load_yaml(f'./rls/distribute/apex/config.yaml'))
Trainer(env_args, buffer_args, train_args).apex()
else:
if trails == 1:
agent_run(env_args, buffer_args, train_args)
elif trails > 1:
processes = []
for i in range(trails):
_env_args, _buffer_args, _train_args = map(deepcopy, [env_args, buffer_args, train_args])
_train_args.seed += i * 10
_train_args.name += f'/{i}'
_train_args.allow_print = True # NOTE: set this could block other processes' print function
if _env_args.type == 'unity':
_env_args.worker_id = env_args.worker_id + i
p = Process(target=agent_run, args=(_env_args, _buffer_args, _train_args))
p.start()
time.sleep(10)
processes.append(p)
[p.join() for p in processes]
if __name__ == "__main__":
try:
import colored_traceback
colored_traceback.add_hook()
except ImportError:
pass
try:
main()
except Exception as e:
print(e)
sys.exit()
|
_streamer.py
|
'''Utilities for processing a continuous stream of data.
An input data stream goes through a series of operations.
The target use case is that one or more operations is I/O bound,
hence can benefit from multi-thread concurrency.
These operations are triggered via `transform`.
The other operations are typically light weight and supportive
of the main (concurrent) operation. These operations perform batching,
unbatching, buffering, filtering, logging, etc.
===========
Basic usage
===========
In a typical use case, one starts with a `Stream` object, and calls
its methods in a "chained" fashion:
data = range(100)
pipeline = (
Stream(data)
.batch(10)
.transform(my_op_that_takes_a_batch, workers=4)
.unbatch()
)
After this setup, there are several ways to use the object `pipeline`.
1. Since `pipeline` is an Iterable and an Iterator, we can use it as such.
Most naturally, iterate over it and process each element however
we like.
We can of couse also provide `pipeline` as a parameter where an iterable
or iterator is expected. For example, the `mpservice.mpserver.Server`
class has a method `stream` that expects an iterable, hence
we can do things like
server = Server(...)
with server:
pipeline = ...
pipeline = server.stream(pipeline)
pipeline = pipeline.transform(yet_another_io_op)
2. If the stream is not too long (not "big data"), we can convert it to
a list by the method `collect`:
result = pipeline.collect()
3. If we don't need the elements coming out of `pipeline`, but rather
just need the original data (`data`) to flow through all the operations
of the pipeline (e.g. if the last "substantial" operation is inserting
the data into a database), we can "drain" the pipeline:
n = pipeline.drain()
where the returned `n` is the number of elements coming out of the
last operation in the pipeline.
4. We can continue to add more operations to the pipeline, for example,
pipeline = pipeline.transform(another_op, workers=3)
======================
Handling of exceptions
======================
There are two modes of exception handling.
In the first mode, exception propagates and, as it should, halts the program with
a printout of traceback. Any not-yet-processed data is discarded.
In the second mode, exception object is passed on in the pipeline as if it is
a regular data item. Subsequent data items are processed as usual.
This mode is enabled by `return_exceptions=True` to the function `transform`.
However, to the next operation, the exception object that is coming along
with regular data elements (i.e. regular output of the previous operation)
is most likely a problem. One may want to call `drop_exceptions` to remove
exception objects from the data stream before they reach the next operation.
In order to be knowledgeable about exceptions before they are removed,
the function `log_exceptions` can be used. Therefore, this is a useful pattern:
(
data_stream
.transform(func1,..., return_exceptions=True)
.log_exceptions()
.drop_exceptions()
.transform(func2,..., return_exceptions=True)
)
Bear in mind that the first mode, with `return_exceptions=False` (the default),
is a totally legitimate and useful mode.
=====
Hooks
=====
There are several "hooks" that allow user to pass in custom functions to
perform operations tailored to their need. Check out the following functions:
`drop_if`
`keep_if`
`peek`
`transform`
Both `drop_if` and `keep_if` accept a function that evaluates a data element
and return a boolean value. Dependending on the return value, the element
is dropped from or kept in the data stream.
`peek` accepts a function that takes a data element and usually does
informational printing or logging (including persisting info to files).
This function may check conditions on the data element to decide whether
to do the printing or do nothing. (Usually we don't want to print for
every element; that would be overwhelming.)
This function is called for the side-effect;
it does not affect the flow of the data stream. The user-provided operator
should not modify the data element.
`transform` accepts a function that takes a data element, does something
about it, and returns a value. For example, modify the element and return
a new value, or call an external service with the data element as part of
the payload. Each input element will produce a new elment, becoming the
resultant stream. This method can not "drop" a data element (i.e do not
produce a result corresponding to an input element), neither can it produce
multiple results for a single input element (if it produces a list, say,
that list would be the result for the single input.)
If the operation is mainly for the side effect, e.g.
saving data in files or a database, hence there isn't much useful result,
then the result could be `None`, which is not a problem. Regardless,
the returned `None`s will still become the resultant stream.
'''
# Iterable vs iterator
#
# if we need to use
#
# for v in X:
# ...
#
# then `X.__iter__()` is called to get an "iterator".
# In this case, `X` is an "iterable", and it must implement `__iter__`.
#
# If we do not use it that way, but rather only directly call
#
# next(X)
#
# then `X` must implement `__next__` (but does not need to implement `__iter__`).
# This `X` is an "iterator".
#
# Often we let `__iter__` return `self`, and implement `__next__` in the same class.
# This way the class is both an iterable and an iterator.
# Async generator returns an async iterator.
from __future__ import annotations
# Enable using a class in type annotations in the code
# that defines that class itself.
# https://stackoverflow.com/a/49872353
# Will no longer be needed in Python 3.10.
import asyncio
import collections.abc
import concurrent.futures
import functools
import inspect
import logging
import multiprocessing
import queue
import random
import threading
from time import sleep
from typing import (
Callable, TypeVar, Union, Optional,
Iterable, Iterator,
Tuple, Type,
)
MAX_THREADS = min(32, multiprocessing.cpu_count() + 4)
# This default is suitable for I/O bound operations.
# For others, user may want to specify a smaller value.
logger = logging.getLogger(__name__)
T = TypeVar('T')
TT = TypeVar('TT')
def is_exception(e):
return isinstance(e, Exception) or (
inspect.isclass(e) and issubclass(e, Exception)
)
def _default_peek_func(i, x):
print('')
print('#', i)
print(x)
class StreamMixin:
def drop_exceptions(self):
return self.drop_if(lambda i, x: is_exception(x))
def drop_nones(self):
return self.drop_if(lambda i, x: x is None)
def drop_first_n(self, n: int):
assert n >= 0
return self.drop_if(lambda i, x: i < n)
def keep_if(self, func: Callable[[int, T], bool]):
return self.drop_if(lambda i, x: not func(i, x))
def keep_every_nth(self, nth: int):
assert nth > 0
return self.keep_if(lambda i, x: i % nth == 0)
def keep_random(self, frac: float):
assert 0 < frac <= 1
rand = random.random
return self.keep_if(lambda i, x: rand() < frac)
def peek_every_nth(self, nth: int, peek_func: Callable[[int, T], None] = None):
assert nth > 0
if peek_func is None:
peek_func = _default_peek_func
def foo(i, x):
if i % nth == 0:
peek_func(i, x)
return self.peek(foo)
def peek_random(self, frac: float, peek_func: Callable[[int, T], None] = None):
assert 0 < frac <= 1
rand = random.random
if peek_func is None:
peek_func = _default_peek_func
def foo(i, x):
if rand() < frac:
peek_func(i, x)
return self.peek(foo)
def log_every_nth(self, nth: int, level: str = 'info'):
assert nth > 0
flog = getattr(logger, level)
def foo(i, x):
flog('#%d: %r', i, x)
return self.peek_every_nth(nth, foo)
def log_exceptions(self, level: str = 'error') -> Peeker:
flog = getattr(logger, level)
def func(i, x):
if is_exception(x):
flog('#%d: %r', i, x)
return self.peek(func)
class Stream(collections.abc.Iterator, StreamMixin):
@classmethod
def register(cls, class_: Type[Stream], name: str):
def f(self, *args, **kwargs):
return class_(self, *args, **kwargs)
setattr(cls, name, f)
def __init__(self, instream: Union[Stream, Iterator, Iterable]):
if isinstance(instream, Stream):
self._to_shutdown = instream._to_shutdown
self._instream = instream
else:
self._to_shutdown = threading.Event()
if hasattr(instream, '__next__'):
self._instream = instream
else:
self._instream = iter(instream)
self.index = 0
# Index of the upcoming element; 0 based.
# This is also the count of finished elements.
def __iter__(self):
return self
def _get_next(self):
return next(self._instream)
def __next__(self):
try:
z = self._get_next()
self.index += 1
return z
except StopIteration:
raise
except:
self._to_shutdown.set()
raise
def collect(self) -> list:
return list(self)
def drain(self) -> Union[int, Tuple[int, int]]:
'''Drain off the stream.
Return the number of elements processed.
When there are exceptions, return the total number of elements
as well as the number of exceptions.
'''
n = 0
nexc = 0
for v in self:
n += 1
if is_exception(v):
nexc += 1
if nexc:
return n, nexc
return n
def batch(self, batch_size: int) -> Batcher:
'''Take elements from an input stream,
and bundle them up into batches up to a size limit,
and produce the batches in an iterable.
The output batches are all of the specified size, except possibly the final batch.
There is no 'timeout' logic to produce a smaller batch.
For efficiency, this requires the input stream to have a steady supply.
If that is a concern, having a `buffer` on the input stream may help.
'''
return Batcher(self, batch_size)
def unbatch(self) -> Unbatcher:
'''Reverse of "batch", turning a stream of batches into
a stream of individual elements.
'''
return Unbatcher(self)
def drop_if(self, func: Callable[[int, T], bool]) -> Dropper:
return Dropper(self, func)
def head(self, n: int) -> Head:
return Head(self, n)
def peek(self, func: Callable[[int, T], None] = None) -> Peeker:
'''Take a peek at the data element *before* it is sent
on for processing.
The function `func` takes the data index (0-based)
and the data element. Typical actions include print out
info or save the data for later inspection. Usually this
function should not modify the data element in the stream.
'''
if func is None:
func = _default_peek_func
return Peeker(self, func)
def buffer(self, maxsize: int = None) -> Buffer:
'''Buffer is used to stabilize and improve the speed of data flow.
A buffer is useful after any operation that can not guarantee
(almost) instant availability of output. A buffer allows its
output to "pile up" when the downstream consumer is slow in requests,
so that data *is* available when the downstream does come to request
data. The buffer evens out unstabilities in the speeds of upstream
production and downstream consumption.
'''
if maxsize is None:
maxsize = 256
else:
assert 1 <= maxsize <= 10_000
return Buffer(self, maxsize)
def transform(self,
func: Callable[[T], TT],
*,
workers: Optional[Union[int, str]] = None,
return_exceptions: bool = False,
**func_args) -> Union[Transformer, ConcurrentTransformer]:
'''Apply a transformation on each element of the data stream,
producing a stream of corresponding results.
`func`: a sync function that takes a single input item
as the first positional argument and produces a result.
Additional keyword args can be passed in via `func_args`.
The outputs are in the order of the input elements in `self._instream`.
The main point of `func` does not have to be the output.
It could rather be some side effect. For example,
saving data in a database. In that case, the output may be
`None`. Regardless, the output is yielded to be consumed by the next
operator in the pipeline. A stream of `None`s could be used
in counting, for example. The output stream may also contain
Exception objects (if `return_exceptions` is `True`), which may be
counted, logged, or handled in other ways.
`workers`: max number of concurrent calls to `func`. By default
this is 0, i.e. there is no concurrency.
`workers=0` and `workers=1` are different. The latter runs the
transformer in a separate thread whereas the former runs "inline".
When `workers = N > 0`, the worker threads are named 'transformer-0',
'transformer-1',..., 'transformer-<N-1>'.
'''
if func_args:
func = functools.partial(func, **func_args)
if workers is None or workers == 0:
return Transformer(self, func, return_exceptions=return_exceptions)
if workers == 'max':
workers = MAX_THREADS
else:
1 <= workers <= 100
return ConcurrentTransformer(
self, func, workers=workers,
return_exceptions=return_exceptions)
class Batcher(Stream):
def __init__(self, instream: Stream, batch_size: int):
super().__init__(instream)
assert 1 < batch_size <= 10_000
self.batch_size = batch_size
self._done = False
def _get_next(self):
if self._done:
raise StopIteration
batch = []
for _ in range(self.batch_size):
try:
batch.append(next(self._instream))
except StopIteration:
self._done = True
break
if batch:
return batch
raise StopIteration
class Unbatcher(Stream):
def __init__(self, instream: Stream):
super().__init__(instream)
self._batch = None
def _get_next(self):
if self._batch:
return self._batch.pop(0)
self._batch = next(self._instream)
return self._get_next()
class Dropper(Stream):
def __init__(self, instream: Stream, func: Callable[[int, T], bool]):
super().__init__(instream)
self.func = func
def _get_next(self):
while True:
z = next(self._instream)
if self.func(self.index, z):
self.index += 1
continue
return z
class Head(Stream):
def __init__(self, instream: Stream, n: int):
super().__init__(instream)
assert n >= 0
self.n = n
def _get_next(self):
if self.index >= self.n:
raise StopIteration
return self._instream.__next__()
class Peeker(Stream):
def __init__(self, instream: Stream, func: Callable[[int, T], None]):
super().__init__(instream)
self.func = func
def _get_next(self):
z = next(self._instream)
self.func(self.index, z)
return z
class IterQueue(queue.Queue, collections.abc.Iterator):
'''
A queue that supports iteration over its elements.
In order to support iteration, it adds a special value
to indicate end of data, which is inserted by calling
the method `put_end`.
'''
GET_SLEEP = 0.00056
PUT_SLEEP = 0.00045
NO_MORE_DATA = object()
def __init__(self, maxsize: int, to_shutdown: threading.Event):
'''
`upstream`: an upstream `IterQueue` object, usually the data stream that
feeds into the current queue. This parameter allows this object and
the upstream share an `Event` object that indicates either queue
has stopped working, either deliberately or by exception.
'''
super().__init__(maxsize + 1)
self._to_shutdown = to_shutdown
def put_end(self, block: bool = True):
self.put(self.NO_MORE_DATA, block=block)
def put(self, x, block: bool = True):
while True:
try:
super().put(x, block=False)
break
except queue.Full:
if self._to_shutdown.is_set():
return
if block:
sleep(self.PUT_SLEEP)
else:
raise
def __next__(self):
while True:
try:
z = self.get_nowait()
if z is self.NO_MORE_DATA:
raise StopIteration
return z
except queue.Empty:
sleep(self.GET_SLEEP)
async def __anext__(self):
# This is used by `async_streamer`.
while True:
try:
z = self.get_nowait()
if z is self.NO_MORE_DATA:
raise StopAsyncIteration
return z
except queue.Empty:
await asyncio.sleep(self.GET_SLEEP)
class Buffer(Stream):
def __init__(self, instream: Stream, maxsize: int):
super().__init__(instream)
assert 1 <= maxsize <= 10_000
self.maxsize = maxsize
self._q = IterQueue(maxsize, self._to_shutdown)
self._err = None
self._thread = None
self._start()
def _start(self):
def foo(instream, q):
try:
for v in instream:
q.put(v)
if self._to_shutdown.is_set():
break
q.put_end()
except Exception as e:
# This should be exception while
# getting data from `instream`,
# not exception in the current object.
self._err = e
self._to_shutdown.set()
self._thread = threading.Thread(
target=foo, args=(self._instream, self._q))
self._thread.start()
def _stop(self):
if self._thread is not None:
self._thread.join()
self._thread = None
def __del__(self):
self._stop()
def _get_next(self):
if self._err is not None:
self._stop()
raise self._err
z = next(self._q)
return z
class Transformer(Stream):
def __init__(self,
instream: Stream,
func: Callable[[T], TT],
*,
return_exceptions: bool = False,
):
super().__init__(instream)
self.func = func
self.return_exceptions = return_exceptions
def _get_next(self):
z = next(self._instream)
try:
return self.func(z)
except Exception as e:
if self.return_exceptions:
return e
raise
def transform(in_stream: Iterator, out_stream: IterQueue,
func, workers, return_exceptions, err):
def _process(in_stream, out_stream, func,
lock, finished, return_exceptions):
Future = concurrent.futures.Future
while not finished.is_set():
with lock:
# This locked block ensures that
# input is read in order and their corresponding
# result placeholders (Future objects) are
# put in the output stream in order.
if finished.is_set():
return
if err:
return
try:
x = next(in_stream)
fut = Future()
out_stream.put(fut)
except StopIteration:
finished.set()
out_stream.put_end()
return
except Exception as e:
finished.set()
err.append(e)
return
try:
y = func(x)
fut.set_result(y)
except Exception as e:
if return_exceptions:
fut.set_result(e)
else:
fut.set_exception(e)
finished.set()
return
lock = threading.Lock()
finished = threading.Event()
tasks = [
threading.Thread(
target=_process,
name=f'transformer-{i}',
args=(in_stream, out_stream, func, lock,
finished, return_exceptions),
)
for i in range(workers)
]
for t in tasks:
t.start()
return tasks
class ConcurrentTransformer(Stream):
def __init__(self,
instream: Stream,
func: Callable[[T], TT],
*,
workers: int,
return_exceptions: bool = False,
):
assert workers >= 1
super().__init__(instream)
self.func = func
self.workers = workers
self.return_exceptions = return_exceptions
self._outstream = IterQueue(workers * 8, self._to_shutdown)
self._err = []
self._tasks = []
self._start()
def _start(self):
self._tasks = transform(
self._instream, self._outstream, self.func,
self.workers, self.return_exceptions, self._err)
def _stop(self):
for t in self._tasks:
t.join()
def __del__(self):
self._stop()
def _get_next(self):
try:
if self._err:
raise self._err[0]
fut = next(self._outstream)
return fut.result()
except:
self._stop()
raise
|
predict.py
|
import ctypes
import logging
import multiprocessing as mp
import numpy as np
from functools import reduce
from gunpowder.array import ArrayKey, Array
from gunpowder.ext import tensorflow as tf
from gunpowder.nodes.generic_predict import GenericPredict
from gunpowder.tensorflow.local_server import LocalServer
from operator import mul
logger = logging.getLogger(__name__)
class Predict(GenericPredict):
'''Tensorflow implementation of :class:`gunpowder.nodes.Predict`.
Args:
checkpoint (``string``):
Basename of a tensorflow checkpoint storing the tensorflow graph
and associated tensor values and metadata, as created by
:class:`gunpowder.nodes.Train`, for example.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors in the network to
array keys.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of output tensors in the network to array
keys. New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
graph: (``string``, optional):
An optional path to a tensorflow computation graph that should be
used for prediction. The checkpoint is used to restore the values
of matching variable names in the graph. Note that the graph
specified here can differ from the one associated to the
checkpoint.
skip_empty (``bool``, optional):
Skip prediction, if all inputs are empty (contain only 0). In this
case, outputs are simply set to 0.
max_shared_memory (``int``, optional):
The maximal amount of shared memory in bytes to allocate to send
batches to the GPU processes. Defaults to 1GB.
'''
def __init__(
self,
checkpoint,
inputs,
outputs,
array_specs=None,
graph=None,
skip_empty=False,
max_shared_memory=1024*1024*1024):
super(Predict, self).__init__(
inputs,
outputs,
array_specs)
self.checkpoint = checkpoint
self.meta_graph = graph
self.session = None
self.graph = None
self.skip_empty = skip_empty
self.manager = mp.Manager()
self.max_shared_memory = max_shared_memory
self.shared_input_array_config = self.manager.dict()
self.shared_output_array_config = self.manager.dict()
self.shared_input_arrays = {}
self.shared_output_arrays = {}
self.shared_input_memory = mp.RawArray(
ctypes.c_float,
self.max_shared_memory)
self.shared_output_memory = mp.RawArray(
ctypes.c_float,
self.max_shared_memory)
self.send_lock = mp.Lock()
self.receive_lock = mp.Lock()
self.predict_process_initialized = mp.Event()
self.worker_sent_inputs = mp.Event()
self.predict_received_inputs = mp.Event()
self.predict_sent_outputs = mp.Event()
self.predict_process = mp.Process(target=self.__predict)
self.predict_process_crashed = mp.Value('i', False)
self.predict_process.start()
self.predict_process_initialized.wait()
def predict(self, batch, request):
if not self.shared_output_arrays:
self.__init_shared_output_arrays()
if self.skip_empty:
can_skip = True
for array_key in self.inputs.values():
if batch[array_key].data.any():
can_skip = False
break
if can_skip:
logger.info("Skipping batch %i (all inputs are 0)"%batch.id)
for name, array_key in self.outputs.items():
shape = self.shared_output_arrays[name].shape
dtype = self.shared_output_arrays[name].dtype
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi.copy()
batch.arrays[array_key] = Array(
np.zeros(shape, dtype=dtype),
spec)
return
logger.debug("predicting in batch %i", batch.id)
output_tensors = self.__collect_outputs(request)
input_data = self.__collect_provided_inputs(batch)
self.send_lock.acquire()
if not self.shared_input_arrays:
if not self.shared_input_array_config:
self.__create_shared_input_array_config(batch, request)
self.__init_shared_input_arrays()
self.__write_inputs_to_shared(input_data)
self.worker_sent_inputs.set()
self.receive_lock.acquire()
self.predict_received_inputs.wait()
self.__check_background_process([self.receive_lock, self.send_lock])
self.predict_received_inputs.clear()
self.send_lock.release()
self.predict_sent_outputs.wait()
self.predict_sent_outputs.clear()
output_data = self.__read_outputs_from_shared(output_tensors)
self.receive_lock.release()
for array_key in output_tensors:
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(
output_data[array_key],
spec)
logger.debug("predicted in batch %i", batch.id)
def __predict(self):
'''The background predict process.'''
try:
# TODO: is the server still needed?
target = LocalServer.get_target()
logger.info("Initializing tf session, connecting to %s...", target)
self.graph = tf.Graph()
self.session = tf.Session(
target=target,
graph=self.graph)
with self.graph.as_default():
self.__read_checkpoint()
if not self.shared_output_arrays:
if not self.shared_output_array_config:
self.__create_shared_output_array_config()
self.__init_shared_output_arrays()
# from now on it is save to access the shared array configuration
self.predict_process_initialized.set()
# loop predict
while True:
# wait for inputs
self.worker_sent_inputs.wait()
self.worker_sent_inputs.clear()
if not self.shared_input_arrays:
self.__init_shared_input_arrays()
# read inputs
input_data = self.__read_inputs_from_shared()
self.predict_received_inputs.set()
# compute outputs
output_data = self.session.run(
{t: t for t in self.outputs.keys()},
feed_dict=input_data)
# write outputs
self.__write_outputs_to_shared(output_data)
self.predict_sent_outputs.set()
except Exception as e:
self.predict_process_crashed.value = True
# release locks and events
self.predict_process_initialized.set()
self.worker_sent_inputs.clear()
self.predict_received_inputs.set()
self.predict_sent_outputs.set()
raise e
def teardown(self):
self.predict_process.terminate()
self.predict_process.join()
def __check_background_process(self, locks=[]):
if self.predict_process_crashed.value:
# release all locks before raising exception
for l in locks:
l.release()
raise RuntimeError("Background process died.")
def __read_checkpoint(self):
# read the graph associated to the checkpoint
if self.meta_graph is None:
meta_graph_file = self.checkpoint + '.meta'
# read alternative, custom graph
else:
meta_graph_file = self.meta_graph
logger.info(
"Reading graph from %s and weights from %s...",
meta_graph_file, self.checkpoint)
saver = tf.train.import_meta_graph(
meta_graph_file,
clear_devices=True)
# restore variables from checkpoint
saver.restore(self.session, self.checkpoint)
def __collect_outputs(self, request=None):
'''Get a dict:
array key: tensor name
If request is not None, return only outputs that are in request.
'''
array_outputs = {}
for tensor_name, array_key in self.outputs.items():
if request is None or array_key in request:
array_outputs[array_key] = tensor_name
return array_outputs
def __collect_provided_inputs(self, batch):
'''Get a dict:
tensor name: ndarray
'''
inputs = {}
for input_name, input_key in self.inputs.items():
if isinstance(input_key, ArrayKey):
if input_key in batch.arrays:
inputs[input_name] = batch.arrays[input_key].data
else:
logger.warn("batch does not contain %s, input %s will not "
"be set", input_key, input_name)
elif isinstance(input_key, np.ndarray):
inputs[input_name] = input_key
elif isinstance(input_key, str):
inputs[input_name] = getattr(batch, input_key)
else:
raise Exception(
"Unknown network input key {}, can't be given to "
"network".format(input_key))
return inputs
def __create_shared_input_array_config(self, batch, request):
'''Store the shared array config in a shared dictionary. Should be run
once by the first worker to submit a batch.'''
begin = 0
for name, array_key in self.inputs.items():
shape = batch[array_key].data.shape
size = reduce(mul, shape, 1)
dtype = batch[array_key].data.dtype
self.shared_input_array_config[name] = (
begin,
size,
shape,
dtype)
begin += size*np.dtype(dtype).itemsize
assert begin <= self.max_shared_memory, (
"The input arrays exceed the max_shared_memory")
def __create_shared_output_array_config(self):
'''To be called by predict process.'''
begin = 0
for name, array_key in self.outputs.items():
tensor = self.graph.get_tensor_by_name(name)
shape = tensor.get_shape().as_list()
size = reduce(mul, shape, 1)
dtype = tensor.dtype.as_numpy_dtype
self.shared_output_array_config[name] = (
begin,
size,
tuple(shape),
dtype)
begin += size*np.dtype(dtype).itemsize
assert begin <= self.max_shared_memory, (
"The output arrays exceed the max_shared_memory")
def __init_shared_input_arrays(self):
'''Assign the shared memory to numpy arrays.'''
for name, (begin, size, shape, dtype) in self.shared_input_array_config.items():
self.shared_input_arrays[name] = np.frombuffer(
self.shared_input_memory,
dtype=dtype,
offset=begin,
count=size).reshape(shape)
def __init_shared_output_arrays(self):
'''Assign the shared memory to numpy arrays.'''
for name, (begin, size, shape, dtype) in self.shared_output_array_config.items():
self.shared_output_arrays[name] = np.frombuffer(
self.shared_output_memory,
dtype=dtype,
offset=begin,
count=size).reshape(shape)
def __write_inputs_to_shared(self, input_data):
for tensor_name, data in input_data.items():
self.shared_input_arrays[tensor_name][:] = data
def __read_inputs_from_shared(self):
return {
tensor_name: self.shared_input_arrays[tensor_name].copy()
for tensor_name in self.inputs.keys()
}
def __write_outputs_to_shared(self, output_data):
for tensor_name, data in output_data.items():
self.shared_output_arrays[tensor_name][:] = data
def __read_outputs_from_shared(self, output_tensors):
return {
array_key: self.shared_output_arrays[tensor_name].copy()
for array_key, tensor_name in output_tensors.items()
}
|
views.py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.generics import (
ListAPIView,
ListCreateAPIView,
ListAPIView,
RetrieveUpdateAPIView,)
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from apps.configuration.models import Book
from apps.hardspot.models import HardSpot
from .models import Content,ContentContributors
from .serializers import (
ContentListSerializer,
BookNestedSerializer,
BookListSerializer,
ContentStatusListSerializer,
SectionKeywordSerializer,
SubSectionKeywordSerializer,
SectionKeywordsSerializer,
ChapterKeywordsSerializer,
SubSectionKeywordsSerializer,
KeywordSerializer,
ContentContributorSerializer,
ApprovedContentSerializer,
ContentStatusSerializer,
HardSpotCreateSerializer,
ContentContributorsSerializer,
SubSubSectionKeywordsSerializer,
ContentStatusSerializerFileFormat,
)
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from rest_framework.parsers import MultiPartParser
from apps.dataupload.models import (Chapter,
Section,
SubSection,
ChapterKeyword,
SectionKeyword,
SubSectionKeyword,
SubSubSectionKeyword,
)
import json
import pandas as pd
from evolve import settings
from evolve import settings
from azure.storage.blob import (
BlockBlobService,
ContainerPermissions
)
from datetime import datetime, timedelta
import os
import itertools
from django.db.models import Q
import threading
account_name = settings.AZURE_ACCOUNT_NAME
account_key = settings.AZURE_ACCOUNT_KEY
CONTAINER_NAME= settings.AZURE_CONTAINER
block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key)
class ContentList(ListCreateAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
parser_classes = (MultiPartParser,)
def get(self, request):
try:
queryset = self.get_queryset()
serializer = ContentStatusListSerializer(queryset, many=True)
context = {"success": True, "message": "Chapter List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Chapter list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request,format=None):
try:
serializer = ContentListSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Created Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Invalid Input Data to create content"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed to create content.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ContentRetrieveUpdate(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
queryset = self.get_object()
serializer = ContentListSerializer(queryset, many=True)
context = {"success": True, "message": "Chapter List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def put(self, request, pk, format=None):
try:
try:
content_list = self.get_object()
except Exception as error:
context = {'success': "false", 'message': 'content Id does not exist.'}
return Response(context, status=status.HTTP_404_NOT_FOUND)
serializer = ContentListSerializer(content_list, data=request.data, context={"user":request.user}, partial=True)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Updation Successful","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Updation Failed"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed To Update content Details.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BookNestedList(ListAPIView):
queryset = Book.objects.all()
serializer_class = BookNestedSerializer
def get(self, request):
try:
subject = request.query_params.get('subject', None)
if subject is not None:
queryset=self.get_queryset().filter(subject__id=subject, content_only=True)
else:
queryset = self.get_queryset().filter(content_only=True)
serializer = BookNestedSerializer(queryset, many=True)
context = {"success": True, "message": "Conetent List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BookListView(ListAPIView):
queryset = Book.objects.all()
serializer_class = BookListSerializer
def get(self, request):
try:
subject = request.query_params.get('subject', None)
if subject is not None:
queryset=self.get_queryset().filter(subject__id=subject)
else:
queryset = self.get_queryset()
serializer = BookListSerializer(queryset, many=True)
context = {"success": True, "message": "Content List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Conetent list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentApprovedList(ListAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=True)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True)
elif sub_sub_section_id is not None:
queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True)
else:
queryset = self.get_queryset().filter(approved=True)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Approved List", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentPendingList(ListAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None)
elif sub_sub_section_id is not None:
queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None)
else:
queryset = self.get_queryset().filter(approved=False, approved_by=None)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Pending List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Pending list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentStatusList(ListCreateAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
if request.query_params.get('chapter', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None))
elif request.query_params.get('section', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None))
elif request.query_params.get('section', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None))
else:
queryset = self.get_queryset()
serializer = ContentListSerializer(queryset, many=True)
context = {"success": True, "message": "Content Status List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Status list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentRejectedList(ListAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None)
elif sub_sub_section_id is not None:
queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None)
else:
queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Rejected List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Rejected list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class Keywords(ListAPIView):
queryset = Content.objects.all()
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section', None)
if chapter_id is not None:
queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id)
serializer = ChapterKeywordsSerializer(queryset, many=True)
elif section_id is not None:
queryset = SectionKeyword.objects.filter(section__id = section_id)
serializer = SectionKeywordsSerializer(queryset, many=True)
elif sub_section_id is not None:
queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id)
serializer = SubSectionKeywordsSerializer(queryset, many=True)
elif sub_sub_section_id is not None:
queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id)
serializer = SubSubSectionKeywordsSerializer(queryset, many=True)
else:
queryset = self.get_queryset()
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentContributorCreateView(ListCreateAPIView):
queryset = ContentContributors.objects.all()
serializer_class = ContentContributorSerializer
def post(self, request):
try:
queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first()
if queryset is not None:
if str(queryset.email) == "" and request.data['email'] is not None:
ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email'])
queryset.refresh_from_db()
serializer = ContentContributorSerializer(queryset)
context = {"success": True, "message": "Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
else:
serializer = ContentContributorSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Invalid Input Data to create Pesonal details"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed to Personal Details.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ApprovedContentDownloadView(ListAPIView):
queryset = Book.objects.all()
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
book = request.query_params.get('book', None)
chapters=Chapter.objects.filter(book_id=book).order_by('id')
serializer = ApprovedContentSerializer(chapters, many=True)
for data in serializer.data:
for d in data['chapter']:
final_list.append(d)
repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords']
data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5)))))
exists = os.path.isfile('ApprovedContent.csv')
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('ApprovedContent.csv')
data_frame.to_csv(path + 'ApprovedContent.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List", "data": 'media/files/ApprovedContent.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentStatusDownloadView(RetrieveUpdateAPIView):
queryset = HardSpot.objects.all()
serializer_class = HardSpotCreateSerializer
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
book_id = request.query_params.get('book', None)
book_name=""
if book_id is not None:
book_name=Book.objects.get(id=book_id)
chapters=Chapter.objects.filter(book__id=book_id).order_by('id')
serializer = ContentStatusSerializer(chapters, many=True)
for data in serializer.data:
for d in data['chapter']:
final_list.append(d)
data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots'])
exists = os.path.isfile('{}_contentstatus.csv'.format(book_name))
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('{}_contentstatus.csv'.format(book_name))
# data_frame.to_excel(path + 'contentstatus.xlsx')
data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List","data": 'media/files/{}_contentstatus.csv'.format(book_name)}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ContentContributorsDownloadView(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = HardSpotCreateSerializer
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
state_id = request.query_params.get('state', None)
if state_id is not None:
queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct()
else:
queryset = self.get_queryset()
serializer = ContentContributorsSerializer(queryset, many=True)
res_list = []
for i in range(len(serializer.data)):
if serializer.data[i] not in serializer.data[i + 1:]:
res_list.append(serializer.data[i])
for data in res_list:
for d in res_list:
final_list.append(d)
data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates()
exists = os.path.isfile('content_contributers.csv')
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('content_contributers.csv')
# data_frame.to_excel(path + 'content_contributers.xlsx')
data_frame.to_csv(path + 'content_contributers.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List","data": 'media/files/content_contributers.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = { 'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetSASView(ListAPIView):
def get(self,request):
try:
sas_url = block_blob_service.generate_container_shared_access_signature(
CONTAINER_NAME,
ContainerPermissions.WRITE,
datetime.utcnow() + timedelta(hours=1),
)
base_url=account_name+".blob.core.windows.net/"+CONTAINER_NAME
context = {"success": True, "message": "url link", "token":sas_url,"base_url":base_url}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetSasDownloadView(ListAPIView):
def get(self,request):
from evolve import settings
accountName = settings.AZURE_ACCOUNT_NAME
accountKey = settings.AZURE_ACCOUNT_KEY
containerName= settings.AZURE_CONTAINER
try:
blobService = BlockBlobService(account_name=accountName, account_key=accountKey)
sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10))
context = {"success": True, "token":sas_token}
return Response(context, status=status.HTTP_200_OK)
except:
return None
class ContentListUrlUpdate(ListAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def get(self, request):
try:
queryset = self.get_queryset().filter(approved=True)
serializer = ContentStatusSerializerFileFormat(queryset, many=True)
context = {"success": True, "message": "OtherContent Approved List", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentListUrlPutRequest(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def post(self, request):
try:
datalist = request.data
print(datalist)
for data in datalist:
print(data)
Content.objects.filter(pk=data['content_id']).update(video=data['video'])
context = {"success": True, "message": "update successfull"}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def post(self, request):
try:
datalist = request.data
print(datalist)
for data in datalist:
Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database'])
context = {"success": True, "message": "update successfull"}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BackupContent(ListAPIView):
queryset = Book.objects.all()
def get(self,request):
try:
t = threading.Thread(target=self.index, args=(), kwargs={})
t.setDaemon(True)
t.start()
context = {"success": True, "message": "Activity List", "data": 'media/files/BackupContent.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.' ,"error" :str(error)}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def index(self):
final_list,final = [],[]
queryset = Content.objects.filter(approved=True)
for i in queryset:
try:
if i.video is not None :
final=[i.id,i.video]
final_list.append(final)
except Exception as e:
pass
path = settings.MEDIA_ROOT + '/files/'
data_frame = pd.DataFrame(final_list , columns=['id','url'])
data_frame.to_csv(path+ 'BackupContent.csv', encoding="utf-8-sig", index=False)
|
CodigoAntigo.py
|
# -*- coding: utf-8 -*-
import serial # pip install pyserial
import threading
import time
import pyttsx3 # pip install pyttsx3
from vosk import Model, KaldiRecognizer
import pyaudio
import re
# chatbot
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
AMGbot = ChatBot("Assistente")
# texto inicial, com o trino o bot vai ficando mais inteligente
conversa1 = ['oi', 'olá', 'olá bom dia', 'bom dia', 'como vai?', 'estou bem']
conversa2 = ['tente ', 'tente de novo', 'nao desista','fale novamente']
treinar = ListTrainer(AMGbot)
treinar.train(conversa1)
treinar.train(conversa2)
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('rate', 150) # velocidade 120 = lento
contar = 0;
for vozes in voices: # listar vozes
print(contar, vozes.name)
contar += 1
voz = 53
engine.setProperty('voice', voices[voz].id)
conectado = False
porta = '/dev/ttyUSB0'
velocidadeBaud = 9600
mensagensRecebidas = 1;
desligarArduinoThread = False
falarTexto = False;
textoRecebido = ""
textoFalado = ""
arduinoFuncionando = True
try:
SerialArduino = serial.Serial(porta, velocidadeBaud, timeout=0.2)
except:
print("Verificar porta serial ou religar arduino")
arduinoFuncionando = False
def handle_data(data):
global mensagensRecebidas, engine, falarTexto, textoRecebido
print("Recebi " + str(mensagensRecebidas) + ": " + data)
mensagensRecebidas += 1
textoRecebido = data
falarTexto = True
def read_from_port(ser):
global conectado, desligarArduinoThread
while not conectado:
conectado = True
while True:
reading = ser.readline().decode()
if reading != "":
handle_data(reading)
if desligarArduinoThread:
print("Desligando Arduino")
break
if arduinoFuncionando:
try:
lerSerialThread = threading.Thread(target=read_from_port, args=(SerialArduino,))
lerSerialThread.start()
except:
print("Verificar porta serial")
arduinoFuncionando = False
print("Preparando Arduino")
time.sleep(2)
print("Arduino Pronto")
else:
time.sleep(2)
print("Arduino não conectou")
while True:
if falarTexto:
if textoRecebido != "":
engine.say(textoRecebido)
engine.runAndWait()
textoRecebido = ""
elif textoFalado != "":
resposta = AMGbot.get_response(textoFalado)
engine.say(resposta)
engine.runAndWait()
textoFalado = ""
falarTexto = False
def meu_comando(): #função que retorna o que foi falado em forma de string
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)
stream.start_stream()
model = Model("vosk-model-small-pt-0.3") #localiza o arquivo de reconhecimento de voz
rec = KaldiRecognizer(model, 16000)
print("Fale algo")
while True:
data = stream.read(2000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
meuResultado = rec.Result()
minhaLista = meuResultado.split("text") #o que foi falado na posição text é retornado em lista
comando = minhaLista[1] #
stream.stop_stream()
stream.close()
p.terminate()
resultado = re.findall(r'\w+', comando) #expressão regular parar pegar todas as letras
resultadofinal = " ".join(resultado) #transforma a lista em string limpa
return resultadofinal
try:
try:
texto = meu_comando()
if arduinoFuncionando: #se o arduino estiver concetado a comunicação serial se inicia
SerialArduino.write(str.encode(texto+'\n')) #manda o que foi dito em forma
# de string para o serial do arduino
print(texto)
if texto != "":
textoFalado = texto
falarTexto = True
print("Dado enviado")
if (texto == "desativar"):
print("Saindo")
desativando = "Assistente desativando"
engine.say(desativando)
engine.runAndWait()
engine.stop()
desligarArduinoThread = True
if arduinoFuncionando:
SerialArduino.close()
lerSerialThread.join()
break
except:
print("Não entendi o que você disse\n")
engine.say("que você disse?")
engine.runAndWait()
time.sleep(0.5) # aguarda resposta do arduino
except (KeyboardInterrupt, SystemExit):
print("Apertou Ctrl+C")
engine.stop()
desligarArduinoThread = True
if arduinoFuncionando:
SerialArduino.close()
lerSerialThread.join()
break
|
qadapters.py
|
"""
Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division
import os
import abc
import string
import copy
import getpass
from subprocess import Popen, PIPE
from pymatgen.io.abinitio.launcher import ScriptEditor
from pymatgen.util.string_utils import is_string
import logging
logger = logging.getLogger(__name__)
__all__ = [
"MpiRunner",
"qadapter_class",
]
class Command(object):
"""
From https://gist.github.com/kirpit/1306188
Enables to run subprocess commands in a different thread with TIMEOUT option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if is_string(command):
import shlex
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
import traceback
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = PIPE
# thread
import threading
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
self.name = name
self.type = None
self.options = options
def string_to_run(self, executable, mpi_ncpus, stdin=None, stdout=None, stderr=None):
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if self.has_mpirun:
if self.type is None:
# TODO: better treatment of mpirun syntax.
#se.add_line('$MPIRUN -n $MPI_NCPUS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR')
num_opt = "-n " + str(mpi_ncpus)
cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!")
else:
#assert mpi_ncpus == 1
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
@property
def has_mpirun(self):
"""True if we are running via mpirun, mpiexec ..."""
return self.name is not None
def qadapter_class(qtype):
"""Return the concrete `Adapter` class from a string."""
return {"shell": ShellAdapter,
"slurm": SlurmAdapter,
"pbs": PbsAdapter,
}[qtype.lower()]
class QueueAdapterError(Exception):
"""Error class for exceptions raise by QueueAdapter."""
class AbstractQueueAdapter(object):
"""
The QueueAdapter is responsible for all interactions with a specific
queue management system. This includes handling all details of queue
script format as well as queue submission and management.
This is the Abstract base class defining the methods that
must be implemented by the concrete classes.
A user should extend this class with implementations that work on
specific queue systems.
"""
__metaclass__ = abc.ABCMeta
Error = QueueAdapterError
def __init__(self, qparams=None, setup=None, modules=None, shell_env=None, omp_env=None,
pre_run=None, post_run=None, mpi_runner=None):
"""
Args:
setup:
String or list of commands to execute during the initial setup.
modules:
String or list of modules to load before running the application.
shell_env:
Dictionary with the environment variables to export
before running the application.
omp_env:
Dictionary with the OpenMP variables.
pre_run:
String or list of commands to execute before launching the calculation.
post_run:
String or list of commands to execute once the calculation is completed.
mpi_runner:
Path to the MPI runner or `MpiRunner` instance. None if not used
"""
# Make defensive copies so that we can change the values at runtime.
self.qparams = qparams.copy() if qparams is not None else {}
if is_string(setup):
setup = [setup]
self.setup = setup[:] if setup is not None else []
self.omp_env = omp_env.copy() if omp_env is not None else {}
if is_string(modules):
modules = [modules]
self.modules = modules[:] if modules is not None else []
self.shell_env = shell_env.copy() if shell_env is not None else {}
self.mpi_runner = mpi_runner
if not isinstance(mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(mpi_runner)
if is_string(pre_run):
pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
if is_string(post_run):
post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
# Parse the template so that we know the list of supported options.
cls = self.__class__
if hasattr(cls, "QTEMPLATE"):
# Consistency check.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
if err_msg:
raise ValueError(err_msg)
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
@property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
try:
return self._supported_qparams
except AttributeError:
import re
self._supported_qparams = re.findall("\$\$\{(\w+)\}", self.QTEMPLATE)
return self._supported_qparams
@property
def has_mpirun(self):
"""True if we are using a mpirunner"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self,"omp_env") and bool(getattr(self, "omp_env"))
@property
def tot_ncpus(self):
"""Total number of CPUs employed"""
return self.mpi_ncpus * self.omp_ncpus
@property
def omp_ncpus(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@abc.abstractproperty
def mpi_ncpus(self):
"""Number of CPUs used for MPI."""
@abc.abstractmethod
def set_mpi_ncpus(self, mpi_ncpus):
"""Set the number of CPUs used for MPI."""
#@abc.abstractproperty
#def queue_walltime(self):
# """Returns the walltime in seconds."""
#@abc.abstractmethod
#def set_queue_walltime(self):
# """Set the walltime in seconds."""
#@abc.abstractproperty
#def mem_per_cpu(self):
# """The memory per CPU in Megabytes."""
@abc.abstractmethod
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
#@property
#def tot_mem(self):
# """Total memory required by the job n Megabytes."""
# return self.mem_per_cpu * self.mpi_ncpus
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id:
(in) Job identifier.
Returns:
Exit status.
"""
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
qtemplate = QScriptTemplate(self.QTEMPLATE)
# set substitution dict for replacements into the template and clean null values
subs_dict = {k: v for k,v in self.qparams.items() if v is not None}
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path, stdin=None, stdout=None, stderr=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
launch_dir:
(str) The directory the job will be launched in.
qout_path
Path of the Queue manager output file.
qerr_path:
Path of the Queue manager error file.
"""
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
if self.modules:
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
if self.has_omp:
se.add_comment("OpenMp Environment")
se.declare_vars(self.omp_env)
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
# Cd to launch_dir
se.add_line("cd " + os.path.abspath(launch_dir))
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
# Construct the string to run the executable with MPI and mpi_ncpus.
mpi_ncpus = self.mpi_ncpus
line = self.mpi_runner.string_to_run(executable, mpi_ncpus, stdin=stdin, stdout=stdout, stderr=stderr)
se.add_line(line)
if self.post_run:
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
shell_text = se.get_script_str()
return qheader + shell_text
@abc.abstractmethod
def submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
Args:
script_file:
(str) name of the script file to use (String)
Returns:
process, queue_id
"""
@abc.abstractmethod
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
####################
# Concrete classes #
####################
class ShellAdapter(AbstractQueueAdapter):
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
export MPI_NCPUS=$${MPI_NCPUS}
"""
@property
def mpi_ncpus(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("MPI_NCPUS", 1)
def set_mpi_ncpus(self, mpi_ncpus):
"""Set the number of CPUs used for MPI."""
self.qparams["MPI_NCPUS"] = mpi_ncpus
def set_mem_per_cpu(self, mem_mb):
"""mem_per_cpu is not available in ShellAdapter."""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def submit_to_queue(self, script_file):
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
# submit the job
try:
process = Popen(("/bin/bash", script_file), stderr=PIPE)
queue_id = process.pid
return process, queue_id
except:
# random error
raise self.Error("Random Error ...!")
def get_njobs_in_queue(self, username=None):
return None
class SlurmAdapter(AbstractQueueAdapter):
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#SBATCH --time=$${time}
#SBATCH --partition=$${partition}
#SBATCH --account=$${account}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --output=$${_qerr_path}
#SBATCH --error=$${_qout_path}
"""
@property
def mpi_ncpus(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("ntasks", 1)
def set_mpi_ncpus(self, mpi_ncpus):
"""Set the number of CPUs used for MPI."""
self.qparams["ntasks"] = mpi_ncpus
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
self.qparams["mem_per_cpu"] = int(mem_mb)
# Remove mem if it's defined.
self.qparams.pop("mem", None)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def submit_to_queue(self, script_file):
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
# submit the job
try:
cmd = ['sbatch', script_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
process.wait()
# grab the returncode. SLURM returns 0 if the job was successful
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(process.stdout.read().split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
queue_id = None
logger.warning('Could not parse job id following slurm...')
finally:
return process, queue_id
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
err_msg = ("Error in job submission with SLURM file {f} and cmd {c}\n".format(f=script_file, c=cmd) +
"The error response reads: {}".format(process.stderr.read()))
raise self.Error(err_msg)
except:
# random error, e.g. no qsub on machine!
raise self.Error('Running sbatch caused an error...')
def get_njobs_in_queue(self, username=None):
if username is None:
username = getpass.getuser()
cmd = ['squeue', '-o "%u"', '-u', username]
process = Popen(cmd, shell=False, stdout=PIPE)
process.wait()
# parse the result
if process.returncode == 0:
# lines should have this form
# username
# count lines that include the username in it
outs = process.stdout.readlines()
njobs = len([line.split() for line in outs if username in line])
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue using squeue service' +
'The error response reads: {}'.format(process.stderr.read()))
logger.critical(err_msg)
return None
class PbsAdapter(AbstractQueueAdapter):
QTYPE = "pbs"
QTEMPLATE = """\
#!/bin/bash
#PBS -A $${account}
#PBS -l walltime=$${walltime}
#PBS -q $${queue}
#PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -N $${job_name}
#PBS -o $${_qerr_path}
#PBS -e $${_qout_path}
"""
@property
def mpi_ncpus(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1)
def set_mpi_ncpus(self, mpi_ncpus):
"""Set the number of CPUs used for MPI."""
if "ppn" not in self.qparams: self.qparams["ppn"] = 1
ppnode = self.qparams.get("ppn")
self.qparams["nodes"] = mpi_ncpus // ppnode
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
raise NotImplementedError("")
#self.qparams["mem_per_cpu"] = mem_mb
## Remove mem if it's defined.
#self.qparams.pop("mem", None)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def submit_to_queue(self, script_file):
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
# submit the job
try:
cmd = ['qsub', scriprocesst_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
process.wait()
# grab the returncode. PBS returns 0 if the job was successful
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(process.stdout.read().split('.')[0])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.warning("Could not parse job id following qsub...")
queue_id = None
finally:
return process, queue_id
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) +
'The error response reads: {}'.format(process.stderr.read()))
except:
# random error, e.g. no qsub on machine!
raise self.Error("Running qsub caused an error...")
def get_njobs_in_queue(self, username=None):
# Initialize username
if username is None:
username = getpass.getuser()
# run qstat
qstat = Command(['qstat', '-a', '-u', username])
process = qstat.run(timeout=5)
# parse the result
if process[0] == 0:
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = process[1].split('\n')
njobs = len([line.split() for line in outs if username in line])
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
# there's a problem talking to qstat server?
err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' +
'The error response reads: {}'.format(process[2]))
logger.critical(err_msg)
return None
class QScriptTemplate(string.Template):
delimiter = '$$'
|
tasking.py
|
import asyncio
import threading
class Tasker:
def __init__(self, name="tasker"):
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(name=name, target=self._run, daemon=True)
def _run(self):
asyncio.set_event_loop(self.loop)
try:
self.loop.run_forever()
finally:
self.loop.close()
def start(self):
self.thread.start()
def do(self, func, *args, **kwargs):
handle = self.loop.call_soon(lambda: func(*args, **kwargs))
self.loop._write_to_self()
return handle
def later(self, func, *args, after=None, **kwargs):
handle = self.loop.call_later(after, lambda: func(*args, **kwargs))
self.loop._write_to_self()
return handle
def periodic(self, func, *args, interval=None, **kwargs):
@asyncio.coroutine
def f():
while True:
yield from asyncio.sleep(interval)
func(*args, **kwargs)
handle = self.loop.create_task(f())
self.loop._write_to_self()
return handle
|
__init__.py
|
#### PATTERN | WEB #################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# Python API interface for various web services (Google, Twitter, Wikipedia, ...)
# smgllib.py is removed from Python 3, a warning is issued in Python 2.6+. Ignore for now.
import warnings; warnings.filterwarnings(action='ignore', category=DeprecationWarning, module="sgmllib")
import threading
import time
import os
import socket, urlparse, urllib, urllib2
import base64
import htmlentitydefs
import sgmllib
import re
import xml.dom.minidom
import StringIO
import bisect
import new
import api
import feed
import oauth
import json
import locale
from feed import feedparser
from soup import BeautifulSoup
try:
# Import persistent Cache.
# If this module is used separately, a dict is used (i.e. for this Python session only).
from cache import Cache, cache, TMP
except:
cache = {}
try:
from imap import Mail, MailFolder, Message, GMAIL
from imap import MailError, MailServiceError, MailLoginError, MailNotLoggedIn
from imap import FROM, SUBJECT, DATE, BODY, ATTACHMENTS
except:
pass
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
#### UNICODE #######################################################################################
def decode_utf8(string):
""" Returns the given string as a unicode string (if possible).
"""
if isinstance(string, str):
for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")):
try:
return string.decode(*encoding)
except:
pass
return string
return unicode(string)
def encode_utf8(string):
""" Returns the given string as a Python byte string (if possible).
"""
if isinstance(string, unicode):
try:
return string.encode("utf-8")
except:
return string
return str(string)
u = decode_utf8
s = encode_utf8
# For clearer source code:
bytestring = s
#### ASYNCHRONOUS REQUEST ##########################################################################
class AsynchronousRequest:
def __init__(self, function, *args, **kwargs):
""" Executes the function in the background.
AsynchronousRequest.done is False as long as it is busy, but the program will not halt in the meantime.
AsynchronousRequest.value contains the function's return value once done.
AsynchronousRequest.error contains the Exception raised by an erronous function.
For example, this is useful for running live web requests while keeping an animation running.
For good reasons, there is no way to interrupt a background process (i.e. Python thread).
You are responsible for ensuring that the given function doesn't hang.
"""
self._response = None # The return value of the given function.
self._error = None # The exception (if any) raised by the function.
self._time = time.time()
self._function = function
self._thread = threading.Thread(target=self._fetch, args=(function,)+args, kwargs=kwargs)
self._thread.start()
def _fetch(self, function, *args, **kwargs):
""" Executes the function and sets AsynchronousRequest.response.
"""
try:
self._response = function(*args, **kwargs)
except Exception, e:
self._error = e
def now(self):
""" Waits for the function to finish and yields its return value.
"""
self._thread.join(); return self._response
@property
def elapsed(self):
return time.time() - self._time
@property
def done(self):
return not self._thread.isAlive()
@property
def value(self):
return self._response
@property
def error(self):
return self._error
def __repr__(self):
return "AsynchronousRequest(function='%s')" % self._function.__name__
def asynchronous(function, *args, **kwargs):
""" Returns an AsynchronousRequest object for the given function.
"""
return AsynchronousRequest(function, *args, **kwargs)
send = asynchronous
#### URL ###########################################################################################
# User agent and referrer.
# Used to identify the application accessing the web.
USER_AGENT = "Pattern/2.5 +http://www.clips.ua.ac.be/pages/pattern"
REFERRER = "http://www.clips.ua.ac.be/pages/pattern"
# Mozilla user agent.
# Websites can include code to block out any application except browsers.
MOZILLA = "Mozilla/5.0"
# HTTP request method.
GET = "get" # Data is encoded in the URL.
POST = "post" # Data is encoded in the message body.
# URL parts.
# protocol://username:password@domain:port/path/page?query_string#anchor
PROTOCOL, USERNAME, PASSWORD, DOMAIN, PORT, PATH, PAGE, QUERY, ANCHOR = \
"protocol", "username", "password", "domain", "port", "path", "page", "query", "anchor"
# MIME type.
MIMETYPE_WEBPAGE = ["text/html"]
MIMETYPE_STYLESHEET = ["text/css"]
MIMETYPE_PLAINTEXT = ["text/plain"]
MIMETYPE_PDF = ["application/pdf"]
MIMETYPE_NEWSFEED = ["application/rss+xml", "application/atom+xml"]
MIMETYPE_IMAGE = ["image/gif", "image/jpeg", "image/png", "image/tiff"]
MIMETYPE_AUDIO = ["audio/mpeg", "audio/mp4", "audio/x-aiff", "audio/x-wav"]
MIMETYPE_VIDEO = ["video/mpeg", "video/mp4", "video/quicktime"]
MIMETYPE_ARCHIVE = ["application/x-stuffit", "application/x-tar", "application/zip"]
MIMETYPE_SCRIPT = ["application/javascript", "application/ecmascript"]
def extension(filename):
""" Returns the extension in the given filename: "cat.jpg" => ".jpg".
"""
return os.path.splitext(filename)[1]
def urldecode(query):
""" Inverse operation of urllib.urlencode.
Returns a dictionary of (name, value)-items from a URL query string.
"""
def _format(s):
if s == "None":
return None
if s.isdigit():
return int(s)
try: return float(s)
except:
return s
query = [(kv.split("=")+[None])[:2] for kv in query.lstrip("?").split("&")]
query = [(urllib.unquote_plus(bytestring(k)), urllib.unquote_plus(bytestring(v))) for k, v in query]
query = [(u(k), u(v)) for k, v in query]
query = [(k, _format(v) or None) for k, v in query]
query = dict([(k,v) for k, v in query if k != ""])
return query
url_decode = urldecode
def proxy(host, protocol="https"):
""" Returns the value for the URL.open() proxy parameter.
- host: host address of the proxy server.
"""
return (host, protocol)
class URLError(Exception):
pass # URL contains errors (e.g. a missing t in htp://).
class URLTimeout(URLError):
pass # URL takes to long to load.
class HTTPError(URLError):
pass # URL causes an error on the contacted server.
class HTTP301Redirect(HTTPError):
pass # Too many redirects.
# The site may be trying to set a cookie and waiting for you to return it,
# or taking other measures to discern a browser from a script.
# For specific purposes you should build your own urllib2.HTTPRedirectHandler
# and pass it to urllib2.build_opener() in URL.open()
class HTTP400BadRequest(HTTPError):
pass # URL contains an invalid request.
class HTTP401Authentication(HTTPError):
pass # URL requires a login and password.
class HTTP403Forbidden(HTTPError):
pass # URL is not accessible (user-agent?)
class HTTP404NotFound(HTTPError):
pass # URL doesn't exist on the internet.
class HTTP420Error(HTTPError):
pass # Used by Twitter for rate limiting.
class HTTP500InternalServerError(HTTPError):
pass # Generic server error.
class URL:
def __init__(self, string=u"", method=GET, query={}):
""" URL object with the individual parts available as attributes:
For protocol://username:password@domain:port/path/page?query_string#anchor:
- URL.protocol: http, https, ftp, ...
- URL.username: username for restricted domains.
- URL.password: password for restricted domains.
- URL.domain : the domain name, e.g. nodebox.net.
- URL.port : the server port to connect to.
- URL.path : the server path of folders, as a list, e.g. ['news', '2010']
- URL.page : the page name, e.g. page.html.
- URL.query : the query string as a dictionary of (name, value)-items.
- URL.anchor : the page anchor.
If method is POST, the query string is sent with HTTP POST.
"""
self.__dict__["method"] = method # Use __dict__ directly since __setattr__ is overridden.
self.__dict__["_string"] = u(string)
self.__dict__["_parts"] = None
self.__dict__["_headers"] = None
self.__dict__["_redirect"] = None
if isinstance(string, URL):
self.__dict__["method"] = string.method
self.query.update(string.query)
if len(query) > 0:
# Requires that we parse the string first (see URL.__setattr__).
self.query.update(query)
def _parse(self):
""" Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:pass@example.com:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlparse.urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":")+[u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = int(P[PORT])
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2])-len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P
# URL.string yields unicode(URL) by joining the different parts,
# if the URL parts have been modified.
def _get_string(self): return unicode(self)
def _set_string(self, v):
self.__dict__["_string"] = u(v)
self.__dict__["_parts"] = None
string = property(_get_string, _set_string)
@property
def parts(self):
""" Yields a dictionary with the URL parts.
"""
if not self._parts: self._parse()
return self._parts
@property
def querystring(self):
""" Yields the URL querystring: "www.example.com?page=1" => "page=1"
"""
s = self.parts[QUERY].items()
s = dict((bytestring(k), bytestring(v if v is not None else "")) for k, v in s)
s = urllib.urlencode(s)
return s
def __getattr__(self, k):
if k in self.__dict__ : return self.__dict__[k]
if k in self.parts : return self.__dict__["_parts"][k]
raise AttributeError, "'URL' object has no attribute '%s'" % k
def __setattr__(self, k, v):
if k in self.__dict__ : self.__dict__[k] = u(v); return
if k == "string" : self._set_string(v); return
if k == "query" : self.parts[k] = v; return
if k in self.parts : self.__dict__["_parts"][k] = u(v); return
raise AttributeError, "'URL' object has no attribute '%s'" % k
def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
""" Returns a connection to the url from which data can be retrieved with connection.read().
When the timeout amount of seconds is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError (e.g. HTTP404NotFound).
"""
url = self.string
# Use basic urllib.urlopen() instead of urllib2.urlopen() for local files.
if os.path.exists(url):
return urllib.urlopen(url)
# Get the query string as a separate parameter if method=POST.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
if proxy:
proxy = urllib2.ProxyHandler({proxy[1]: proxy[0]})
proxy = urllib2.build_opener(proxy, urllib2.HTTPHandler)
urllib2.install_opener(proxy)
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 301: raise HTTP301Redirect
if e.code == 400: raise HTTP400BadRequest
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
if e.code == 420: raise HTTP420Error
if e.code == 500: raise HTTP500InternalServerError
raise HTTPError
except socket.timeout:
raise URLTimeout
except urllib2.URLError, e:
if e.reason == "timed out" \
or e.reason[0] in (36, "timed out"):
raise URLTimeout
raise URLError, e.reason
except ValueError, e:
raise URLError, e
def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in cache:
if isinstance(cache, dict): # Not a Cache object.
return cache[id]
if unicode is True:
return cache[id]
if unicode is False:
return cache.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default) cache the data.
data = self.open(timeout, proxy, user_agent, referrer, authentication).read()
if unicode is True:
data = u(data)
if cached:
cache[id] = data
if throttle:
time.sleep(max(throttle-(time.time()-t), 0))
return data
def read(self, *args):
return self.open().read(*args)
@property
def exists(self, timeout=10):
""" Yields False if the URL generates a HTTP404NotFound error.
"""
try: self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError, URLTimeoutError:
return True
except URLError:
return False
except:
return True
return True
@property
def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None
@property
def headers(self, timeout=10):
""" Yields a dictionary with the HTTP response headers.
"""
if self.__dict__["_headers"] is None:
try:
h = dict(self.open(timeout).info())
except URLError:
h = {}
self.__dict__["_headers"] = h
return self.__dict__["_headers"]
@property
def redirect(self, timeout=10):
""" Yields the redirected URL, or None.
"""
if self.__dict__["_redirect"] is None:
try:
r = self.open(timeout).geturl()
except URLError:
r = None
self.__dict__["_redirect"] = r != self.string and r or ""
return self.__dict__["_redirect"] or None
def __str__(self):
return bytestring(self.string)
def __unicode__(self):
# The string representation includes the query attributes with HTTP GET.
# This gives us the advantage of not having to parse the URL
# when no separate query attributes were given (e.g. all info is in URL._string):
if self._parts is None and self.method == GET:
return self._string
P = self._parts
u = []
if P[PROTOCOL]:
u.append("%s://" % P[PROTOCOL])
if P[USERNAME]:
u.append("%s:%s@" % (P[USERNAME], P[PASSWORD]))
if P[DOMAIN]:
u.append(P[DOMAIN])
if P[PORT]:
u.append(":%s" % P[PORT])
if P[PATH]:
u.append("/%s/" % "/".join(P[PATH]))
if P[PAGE] and len(u) > 0:
u[-1] = u[-1].rstrip("/")
if P[PAGE]:
u.append("/%s" % P[PAGE])
if P[QUERY] and self.method == GET:
u.append("?%s" % self.querystring)
if P[ANCHOR]:
u.append("#%s" % P[ANCHOR])
u = u"".join(u)
u = u.lstrip("/")
return u
def __repr__(self):
return "URL('%s', method='%s')" % (str(self), str(self.method))
def copy(self):
return URL(self.string, self.method, self.query)
def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode)
#url = URL("http://user:pass@example.com:992/animal/bird?species#wings")
#print url.parts
#print url.query
#print url.string
#--- STREAMING URL BUFFER --------------------------------------------------------------------------
def bind(object, method, function):
""" Attaches the function as a method with the given name to the given object.
"""
setattr(object, method, new.instancemethod(function, object))
class Stream(list):
def __init__(self, url, delimiter="\n", **kwargs):
""" Buffered stream of data from a given URL.
"""
self.socket = URL(url).open(**kwargs)
self.buffer = ""
self.delimiter = delimiter
def update(self, bytes=1024):
""" Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets
def parse(self, data):
""" Must be overridden in a subclass.
"""
return data
def clear(self):
list.__init__(self, [])
def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
""" Returns a new Stream with the given parse method.
"""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream
#--- FIND URLs -------------------------------------------------------------------------------------
RE_URL_PUNCTUATION = ("\"'{(>", "\"'.,;)}")
RE_URL_HEAD = r"[%s|\[|\s]" % "|".join(RE_URL_PUNCTUATION[0]) # Preceded by space, parenthesis or HTML tag.
RE_URL_TAIL = r"[%s|\]]*[\s|\<]" % "|".join(RE_URL_PUNCTUATION[1]) # Followed by space, punctuation or HTML tag.
RE_URL1 = r"(https?://.*?)" + RE_URL_TAIL # Starts with http:// or https://
RE_URL2 = RE_URL_HEAD + r"(www\..*?\..*?)" + RE_URL_TAIL # Starts with www.
RE_URL3 = RE_URL_HEAD + r"([\w|-]*?\.(com|net|org))" + RE_URL_TAIL # Ends with .com, .net, .org
RE_URL1, RE_URL2, RE_URL3 = (
re.compile(RE_URL1, re.I),
re.compile(RE_URL2, re.I),
re.compile(RE_URL3, re.I))
def find_urls(string, unique=True):
""" Returns a list of URLs parsed from the string.
Works on http://, https://, www. links or domain names ending in .com, .org, .net.
Links can be preceded by leading punctuation (open parens)
and followed by trailing punctuation (period, comma, close parens).
"""
string = u(string)
string = string.replace(u"\u2024", ".")
string = string.replace(" ", " ")
matches = []
for p in (RE_URL1, RE_URL2, RE_URL3):
for m in p.finditer(" %s " % string):
s = m.group(1)
s = s.split("\">")[0].split("'>")[0] # google.com">Google => google.com
if not unique or s not in matches:
matches.append(s)
return matches
links = find_urls
RE_EMAIL = re.compile(r"[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+") # tom.de+smedt@clips.ua.ac.be
def find_email(string, unique=True):
""" Returns a list of e-mail addresses parsed from the string.
"""
string = u(string).replace(u"\u2024", ".")
matches = []
for m in RE_EMAIL.finditer(string):
s = m.group(0)
if not unique or s not in matches:
matches.append(s)
return matches
def find_between(a, b, string):
""" Returns a list of substrings between a and b in the given string.
"""
p = "%s(.*?)%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return [m for m in p.findall(string)]
#### PLAIN TEXT ####################################################################################
BLOCK = [
"title", "h1", "h2", "h3", "h4", "h5", "h6", "p",
"center", "blockquote", "div", "table", "ul", "ol", "pre", "code", "form"
]
SELF_CLOSING = ["br", "hr", "img"]
# Element tag replacements for a stripped version of HTML source with strip_tags().
# Block-level elements are followed by linebreaks,
# list items are preceded by an asterisk ("*").
LIST_ITEM = "*"
blocks = dict.fromkeys(BLOCK+["br", "tr", "td"], ("", "\n\n"))
blocks.update({
"li": ("%s " % LIST_ITEM, "\n"),
"img": ("", ""),
"br": ("", "\n"),
"th": ("", "\n"),
"tr": ("", "\n"),
"td": ("", "\t"),
})
class HTMLParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
def clean(self, html):
html = decode_utf8(html)
html = html.replace("/>", " />")
html = html.replace(" />", " />")
html = html.replace("<!", "<!")
html = html.replace("<!DOCTYPE", "<!DOCTYPE")
html = html.replace("<!doctype", "<!doctype")
html = html.replace("<!--", "<!--")
return html
def parse_declaration(self, i):
# We can live without sgmllib's parse_declaration().
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
return i + 1
def convert_charref(self, name):
# This fixes a bug in older versions of sgmllib when working with Unicode.
# Fix: ASCII ends at 127, not 255
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return chr(n)
class HTMLTagstripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def strip(self, html, exclude=[], replace=blocks):
""" Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data)
def clean(self, html):
# Escape all entities (just strip tags).
return HTMLParser.clean(self, html).replace("&", "&")
def handle_starttag(self, tag, attributes):
if tag in self._exclude:
# Create the tag attribute string,
# including attributes defined in the HTMLTagStripper._exclude dict.
a = len(self._exclude[tag]) > 0 and attributes or []
a = ["%s=\"%s\"" % (k,v) for k, v in a if k in self._exclude[tag]]
a = (" "+" ".join(a)).rstrip()
self._data.append("<%s%s>" % (tag, a))
if tag in self._replace:
self._data.append(self._replace[tag][0])
if tag in self._replace and tag in SELF_CLOSING:
self._data.append(self._replace[tag][1])
def handle_endtag(self, tag):
if tag in self._exclude and self._data and self._data[-1].startswith("<"+tag):
# Never keep empty elements (e.g. <a></a>).
self._data.pop(-1); return
if tag in self._exclude:
self._data.append("</%s>" % tag)
if tag in self._replace:
self._data.append(self._replace[tag][1])
def handle_data(self, data):
self._data.append(data.strip("\n\t"))
def handle_comment(self, comment):
if "comment" in self._exclude or \
"!--" in self._exclude:
self._data.append("<!--%s-->" % comment)
# As a function:
strip_tags = HTMLTagstripper().strip
def strip_element(string, tag, attributes=""):
""" Removes all elements with the given tagname and attributes from the string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", "href='foo' class='bar'")
matches "<a href='foo' class='bar'" but not "<a class='bar' href='foo'".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
i = s.find("<%s%s" % (t, a), i)
j = s.find("</%s>" % t, i+1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j+1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0: return string
if j < 0: return string[:i]
string = string[:i] + string[j+len(t)+3:]; s=string.lower()
return string
def strip_between(a, b, string):
""" Removes anything between (and including) string a and b inside the given string.
"""
p = "%s.*?%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return re.sub(p, "", string)
def strip_javascript(html):
return strip_between("<script.*?>", "</script>", html)
def strip_inline_css(html):
return strip_between("<style.*?>", "</style>", html)
def strip_comments(html):
return strip_between("<!--", "-->", html)
def strip_forms(html):
return strip_between("<form.*?>", "</form>", html)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, (str, unicode)):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == '' :
return unichr(int(name)) # "&" => "&"
if hex in ("x","X"):
return unichr(int('0x'+name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return cp and unichr(cp) or match.group() # "&foo;" => "&foo;"
if isinstance(string, (str, unicode)):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
def encode_url(string):
return urllib.quote_plus(bytestring(string))
def decode_url(string):
return urllib.unquote_plus(string) # "black/white" => "black%2Fwhite".
RE_SPACES = re.compile("( |\xa0)+", re.M) # Matches one or more spaces.
RE_TABS = re.compile(r"\t+", re.M) # Matches one or more tabs.
def collapse_spaces(string, indentation=False, replace=" "):
""" Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_tabs(string, indentation=False, replace=" "):
""" Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_linebreaks(string, threshold=1):
""" Returns a string with consecutive linebreaks collapsed to at most the given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n+r"+", n, string)
return string
def plaintext(html, keep=[], replace=blocks, linebreaks=2, indentation=False):
""" Returns a string with all HTML tags removed.
Content inside HTML comments, the <style> tag and the <script> tags is removed.
- keep : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are followed by linebreaks.
- linebreaks : the maximum amount of consecutive linebreaks,
- indentation : keep left line indentation (tabs and spaces)?
"""
if not keep.__contains__("script"):
html = strip_javascript(html)
if not keep.__contains__("style"):
html = strip_inline_css(html)
if not keep.__contains__("form"):
html = strip_forms(html)
if not keep.__contains__("comment") and \
not keep.__contains__("!--"):
html = strip_comments(html)
html = html.replace("\r", "\n")
html = strip_tags(html, exclude=keep, replace=replace)
html = decode_entities(html)
html = collapse_spaces(html, indentation)
html = collapse_tabs(html, indentation)
html = collapse_linebreaks(html, linebreaks)
html = html.strip()
return html
#### SEARCH ENGINE #################################################################################
SEARCH = "search" # Query for pages (i.e. links to websites).
IMAGE = "image" # Query for images.
NEWS = "news" # Query for news items.
TINY = "tiny" # Image size around 100x100.
SMALL = "small" # Image size around 200x200.
MEDIUM = "medium" # Image size around 500x500.
LARGE = "large" # Image size around 1000x1000.
RELEVANCY = "relevancy" # Sort results by most relevant.
LATEST = "latest" # Sort results by most recent.
class Result(dict):
def __init__(self, url):
""" An item in a list of results returned by SearchEngine.search().
All dictionary entries are available as unicode string attributes.
- url : the URL of the referred web content,
- title : the title of the content at the URL,
- text : the content text,
- language: the content language,
- author : for news items and images, the author,
- date : for news items, the publication date.
"""
dict.__init__(self)
self.url = url
@property
def description(self):
return self.text # Backwards compatibility.
def download(self, *args, **kwargs):
""" Download the content at the given URL.
By default it will be cached - see URL.download().
"""
return URL(self.url).download(*args, **kwargs)
def __getattr__(self, k):
return self.get(k, u"")
def __getitem__(self, k):
return self.get(k, u"")
def __setattr__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"") # Store strings as unicode.
def __setitem__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"")
def setdefault(self, k, v):
dict.setdefault(self, u(k), u(v))
def update(self, *args, **kwargs):
map = dict()
map.update(*args, **kwargs)
dict.update(self, [(u(k), u(v)) for k, v in map.items()])
def __repr__(self):
return "Result(url=%s)" % repr(self.url)
class Results(list):
def __init__(self, source=None, query=None, type=SEARCH, total=0):
""" A list of results returned from SearchEngine.search().
- source: the service that yields the results (e.g. GOOGLE, TWITTER).
- query : the query that yields the results.
- type : the query type (SEARCH, IMAGE, NEWS).
- total : the total result count.
This is not the length of the list, but the total number of matches for the given query.
"""
self.source = source
self.query = query
self.type = type
self.total = total
class SearchEngine:
def __init__(self, license=None, throttle=1.0, language=None):
""" A base class for a web service.
- license : license key for the API,
- throttle : delay between requests (avoid hammering the server).
Inherited by: Google, Yahoo, Bing, Twitter, Wikipedia, Flickr.
"""
self.license = license
self.throttle = throttle # Amount of sleep time after executing a query.
self.language = language # Result.language restriction (e.g., "en").
self.format = lambda x: x # Formatter applied to each attribute of each Result.
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
return Results(source=None, query=query, type=type)
class SearchEngineError(HTTPError):
pass
class SearchEngineTypeError(SearchEngineError):
pass # Raised when an unknown type is passed to SearchEngine.search().
class SearchEngineLimitError(SearchEngineError):
pass # Raised when the query limit for a license is reached.
#--- GOOGLE ----------------------------------------------------------------------------------------
# Google Custom Search is a paid service.
# https://code.google.com/apis/console/
# http://code.google.com/apis/customsearch/v1/overview.html
GOOGLE = "https://www.googleapis.com/customsearch/v1?"
GOOGLE_LICENSE = api.license["Google"]
GOOGLE_CUSTOM_SEARCH_ENGINE = "000579440470800426354:_4qo2s0ijsi"
# Search results can start with: "Jul 29, 2007 ...",
# which is the date of the page parsed by Google from the content.
RE_GOOGLE_DATE = re.compile("^([A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}) {0,1}...")
class Google(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or GOOGLE_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start-1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("htmlSnippet").replace("<br> ","").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results
def translate(self, string, input="en", output="fr", **kwargs):
""" Returns the translation of the given string in the desired output language.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string,
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data)
def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data
#--- YAHOO -----------------------------------------------------------------------------------------
# Yahoo BOSS is a paid service.
# http://developer.yahoo.com/search/
YAHOO = "http://yboss.yahooapis.com/ysearch/"
YAHOO_LICENSE = api.license["Yahoo"]
class Yahoo(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or YAHOO_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": encode_url(query),
"start": 1 + (start-1) * count,
"count": min(count, type==IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) BOSS OAuth authentication.
url.query.update({
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, method=GET, secret=self.license[1])
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Yahoo %s API is a paid service" % type
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get({SEARCH:"web", IMAGE:"images", NEWS:"news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and \
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results
#--- BING ------------------------------------------------------------------------------------------
# https://datamarket.azure.com/dataset/5BA839F1-12CE-4CCE-BF57-A49D98D29A44
# https://datamarket.azure.com/account/info
BING = "https://api.datamarket.azure.com/Bing/Search/"
BING_LICENSE = api.license["Bing"]
class Bing(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or BING_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from Bing for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 15 for news,
- size : for images, either SMALL, MEDIUM or LARGE.
There is no daily query limit.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
src = "Web"
if type == IMAGE:
src = "Image"
if type == NEWS:
src = "News"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(BING + src + "?", query, type)
# 1) Construct request URL.
url = URL(BING + "Composite", method=GET, query={
"Sources": "'" + src.lower() + "'",
"Query": "'" + query + "'",
"$skip": 1 + (start-1) * count,
"$top": min(count, type==NEWS and 15 or 50),
"$format": "json",
})
# 2) Restrict image size.
if size in (TINY, SMALL, MEDIUM, LARGE):
url.query["ImageFilters"] = {
TINY: "'Size:Small'",
SMALL: "'Size:Small'",
MEDIUM: "'Size:Medium'",
LARGE: "'Size:Large'" }[size]
# 3) Restrict language.
if type in (SEARCH, IMAGE) and self.language is not None:
url.query["Query"] = url.query["Query"][:-1] + " language: %s'" % self.language
#if self.language is not None:
# market = locale.market(self.language)
# if market:
# url.query["market"] = market
# 4) Parse JSON response.
kwargs["authentication"] = ("", self.license)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Bing %s API is a paid service" % type
data = json.loads(data)
data = data.get("d", {})
data = data.get("results", [{}])[0]
results = Results(BING, query, type)
results.total = int(data.get(src+"Total", 0))
for x in data.get(src, []):
r = Result(url=None)
r.url = self.format(x.get("MediaUrl", x.get("Url")))
r.title = self.format(x.get("Title"))
r.text = self.format(x.get("Description", x.get("Snippet")))
r.language = self.language or ""
r.date = self.format(x.get("DateTime", x.get("Date")))
r.author = self.format(x.get("Source"))
results.append(r)
return results
#--- TWITTER ---------------------------------------------------------------------------------------
# http://apiwiki.twitter.com/
TWITTER = "http://search.twitter.com/"
TWITTER_STREAM = "https://stream.twitter.com/1/statuses/filter.json"
TWITTER_STATUS = "https://twitter.com/%s/status/%s"
TWITTER_LICENSE = api.license["Twitter"]
TWITTER_HASHTAG = re.compile(r"(\s|^)(#[a-z0-9_\-]+)", re.I) # Word starts with "#".
TWITTER_RETWEET = re.compile(r"(\s|^RT )(@[a-z0-9_\-]+)", re.I) # Word starts with "RT @".
class Twitter(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or TWITTER_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
""" Returns a list of results from Twitter for the given query.
- type : SEARCH or TRENDS,
- start: maximum 1500 results (10 for trends) => start 1-15 with count=100, 1500/count,
- count: maximum 100, or 10 for trends.
There is an hourly limit of 150+ queries (actual amount undisclosed).
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 1500 / count:
return Results(TWITTER, query, type)
# 1) Construct request URL.
url = URL(TWITTER + "search.json?", method=GET)
url.query = {
"q": query,
"page": start,
"rpp": min(count, 100)
}
if "geo" in kwargs:
# Filter by location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius "10km".
url.query["geocode"] = ",".join((map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 2) Restrict language.
url.query["lang"] = self.language or ""
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("results", data.get("trends", [])):
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("from_user"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at", data.get("as_of")))
r.author = self.format(x.get("from_user"))
r.profile = self.format(x.get("profile_image_url")) # Profile picture URL.
r.language = self.format(x.get("iso_language_code"))
results.append(r)
return results
def trends(self, **kwargs):
""" Returns a list with 10 trending topics on Twitter.
"""
url = URL("https://api.twitter.com/1/trends/1.json")
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
return [u(x.get("name")) for x in data[0].get("trends", [])]
def stream(self, query, **kwargs):
""" Returns a live stream of Result objects for the given query.
"""
url = URL(TWITTER_STREAM)
url.query.update({
"track": query,
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_token": self.license[2][0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, GET,
self.license[1],
self.license[2][1])
return TwitterStream(url, delimiter="\n", format=self.format, **kwargs)
class TwitterStream(Stream):
def __init__(self, socket, delimiter="\n", format=lambda s: s, **kwargs):
Stream.__init__(self, socket, delimiter, **kwargs)
self.format = format
def parse(self, data):
""" TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively.
"""
x = json.loads(data)
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.profile = self.format(x.get("profile_image_url"))
r.language = self.format(x.get("iso_language_code"))
return r
def author(name):
""" Returns a Twitter query-by-author-name that can be passed to Twitter.search().
For example: Twitter().search(author("tom_de_smedt"))
"""
return "from:%s" % name
def hashtags(string):
""" Returns a list of hashtags (words starting with a #hash) from a tweet.
"""
return [b for a, b in TWITTER_HASHTAG.findall(string)]
def retweets(string):
""" Returns a list of retweets (words starting with a RT @author) from a tweet.
"""
return [b for a, b in TWITTER_RETWEET.findall(string)]
#stream = Twitter().stream("cat")
#for i in range(10):
# stream.update()
# for tweet in reversed(stream):
# print tweet.text
# print tweet.url
# print
#stream.clear()
#--- MEDIAWIKI -------------------------------------------------------------------------------------
# http://en.wikipedia.org/w/api.php
WIKIA = "http://wikia.com"
WIKIPEDIA = "http://wikipedia.com"
WIKIPEDIA_LICENSE = api.license["Wikipedia"]
MEDIAWIKI_LICENSE = None
MEDIAWIKI = "http://{SUBDOMAIN}.{DOMAIN}{API}"
# Pattern for meta links (e.g. Special:RecentChanges).
# http://en.wikipedia.org/wiki/Main_namespace
MEDIAWIKI_NAMESPACE = ["Main", "User", "Wikipedia", "File", "MediaWiki", "Template", "Help", "Category", "Portal", "Book"]
MEDIAWIKI_NAMESPACE += [s+" talk" for s in MEDIAWIKI_NAMESPACE] + ["Talk", "Special", "Media"]
MEDIAWIKI_NAMESPACE += ["WP", "WT", "MOS", "C", "CAT", "Cat", "P", "T", "H", "MP", "MoS", "Mos"]
_mediawiki_namespace = re.compile(r"^"+"|".join(MEDIAWIKI_NAMESPACE)+":", re.I)
# Pattern to identify disambiguation pages.
MEDIAWIKI_DISAMBIGUATION = "<a href=\"/wiki/Help:Disambiguation\" title=\"Help:Disambiguation\">disambiguation</a> page"
# Pattern to identify references, e.g. [12]
MEDIAWIKI_REFERENCE = r"\s*\[[0-9]{1,3}\]"
class MediaWiki(SearchEngine):
def __init__(self, license=None, throttle=5.0, language="en"):
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
@property
def _url(self):
# Must be overridden in a subclass; see Wikia and Wikipedia.
return None
@property
def MediaWikiArticle(self):
return MediaWikiArticle
@property
def MediaWikiSection(self):
return MediaWikiSection
@property
def MediaWikiTable(self):
return MediaWikiTable
def __iter__(self):
return self.all()
def all(self, **kwargs):
""" Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.list(), MediaWiki.search() and URL.download().
"""
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
articles = all
def list(self, namespace=0, start=None, count=100, cached=True, **kwargs):
""" Returns an iterator over all article titles (for a given namespace id).
"""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration
def search(self, query, type=SEARCH, start=1, count=1, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
if type != SEARCH:
raise SearchEngineTypeError
if count < 1:
return None
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ","_"),
"redirects": 1,
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("timeout", 30) # Parsing the article takes some time.
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a
def _parse_article(self, data, **kwargs):
return self.MediaWikiArticle(
title = plaintext(data.get("displaytitle", data.get("title", ""))),
source = data.get("text", {}).get("*", ""),
disambiguation = data.get("text", {}).get("*", "").find(MEDIAWIKI_DISAMBIGUATION) >= 0,
links = [x["*"] for x in data.get("links", []) if not _mediawiki_namespace.match(x["*"])],
categories = [x["*"] for x in data.get("categories", [])],
external = [x for x in data.get("externallinks", [])],
media = [x for x in data.get("images", [])],
languages = dict([(x["lang"], x["*"]) for x in data.get("langlinks", [])]),
language = self.language,
parser = self, **kwargs)
def _parse_article_sections(self, article, data):
# If "References" is a section in the article,
# the HTML will contain a marker <h*><span class="mw-headline" id="References">.
# http://en.wikipedia.org/wiki/Section_editing
t = article.title
d = 0
i = 0
for x in data.get("sections", {}):
a = x.get("anchor")
if a:
p = r"<h.>\s*.*?\s*<span class=\"mw-headline\" id=\"%s\">" % a
p = re.compile(p)
m = p.search(article.source, i)
if m:
j = m.start()
article.sections.append(self.MediaWikiSection(article,
title = t,
start = i,
stop = j,
level = d))
t = x.get("line", "")
d = int(x.get("level", 2)) - 1
i = j
return article
def _parse_article_section_structure(self, article):
# Sections with higher level are children of previous sections with lower level.
for i, s2 in enumerate(article.sections):
for s1 in reversed(article.sections[:i]):
if s1.level < s2.level:
s2.parent = s1
s1.children.append(s2)
break
return article
class MediaWikiArticle:
def __init__(self, title=u"", source=u"", links=[], categories=[], languages={}, disambiguation=False, **kwargs):
""" A MediaWiki article returned from MediaWiki.search().
MediaWikiArticle.string contains the HTML content.
"""
self.title = title # Article title.
self.source = source # Article HTML content.
self.sections = [] # Article sections.
self.links = links # List of titles of linked articles.
self.categories = categories # List of categories. As links, prepend "Category:".
self.external = [] # List of external links.
self.media = [] # List of linked media (images, sounds, ...)
self.disambiguation = disambiguation # True when the article is a disambiguation page.
self.languages = languages # Dictionary of (language, article)-items, e.g. Cat => ("nl", "Kat")
self.language = kwargs.get("language", "en")
self.parser = kwargs.get("parser", MediaWiki())
for k, v in kwargs.items():
setattr(self, k, v)
def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
s = strip_between("<table class=\"metadata", "</table>", s) # Metadata.
s = strip_between("<table id=\"toc", "</table>", s) # Table of contents.
s = strip_between("<table class=\"infobox", "</table>", s) # Infobox.
s = strip_between("<table class=\"wikitable", "</table>", s) # Table.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_between("<div id=\"annotation", "</div>", s) # Annotations.
s = strip_between("<div class=\"dablink", "</div>", s) # Disambiguation message.
s = strip_between("<div class=\"magnify", "</div>", s) # Thumbnails.
s = strip_between("<div class=\"thumbcaption", "</div>", s) # Thumbnail captions.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s) # LaTex math images.
s = plaintext(s, **kwargs)
s = re.sub(r"\[edit\]\s*", "", s) # [edit] is language dependent (e.g. nl => "[bewerken]")
s = s.replace("[", " [").replace(" [", " [") # Space before inline references.
return s
def plaintext(self, **kwargs):
return self._plaintext(self.source, **kwargs)
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
def __repr__(self):
return "MediaWikiArticle(title=%s)" % repr(self.title)
class MediaWikiSection:
def __init__(self, article, title=u"", start=0, stop=0, level=1):
""" A (nested) section in the content of a MediaWikiArticle.
"""
self.article = article # MediaWikiArticle the section is part of.
self.parent = None # MediaWikiSection the section is part of.
self.children = [] # MediaWikiSections belonging to this section.
self.title = title # Section title.
self._start = start # Section start index in MediaWikiArticle.string.
self._stop = stop # Section stop index in MediaWikiArticle.string.
self._level = level # Section depth (main title + intro = level 0).
self._tables = None
def plaintext(self, **kwargs):
return self.article._plaintext(self.source, **kwargs)
@property
def source(self):
return self.article.source[self._start:self._stop]
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
@property
def content(self):
# ArticleSection.string, minus the title.
s = self.plaintext()
if s == self.title or s.startswith(self.title+"\n"):
return s[len(self.title):].lstrip()
return s
@property
def tables(self):
""" Yields a list of MediaWikiTable objects in the section.
"""
if self._tables is None:
self._tables = []
b = "<table class=\"wikitable\"", "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title = p((f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source = b[0] + s + b[1]
)
for i, row in enumerate(f(r"<tr", "</tr>", s)):
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update MediaWikiTable.headers.
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (((f(r'colspan="', r'"', v)+[1])[0], v[v.find(">")+1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []; [[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables
@property
def level(self):
return self._level
depth = level
def __repr__(self):
return "MediaWikiSection(title='%s')" % bytestring(self.title)
class MediaWikiTable:
def __init__(self, section, title=u"", headers=[], rows=[], source=u""):
""" A <table class="wikitable> in a MediaWikiSection.
"""
self.section = section # MediaWikiSection the table is part of.
self.source = source # Table HTML.
self.title = title # Table title.
self.headers = headers # List of table headers.
self.rows = rows # List of table rows, each a list of cells.
@property
def html(self):
return self.source
def __repr__(self):
return "MediaWikiTable(title='%s')" % bytestring(self.title)
#--- MEDIAWIKI: WIKIPEDIA --------------------------------------------------------------------------
class Wikipedia(MediaWiki):
def __init__(self, license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[language].wikipedia.org.
"""
SearchEngine.__init__(self, license or WIKIPEDIA_LICENSE, throttle, language)
self._subdomain = language
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikipedia.org")
s = s.replace("{API}", '/w/api.php')
return s
@property
def MediaWikiArticle(self):
return WikipediaArticle
@property
def MediaWikiSection(self):
return WikipediaSection
@property
def MediaWikiTable(self):
return WikipediaTable
class WikipediaArticle(MediaWikiArticle):
def download(self, media, **kwargs):
""" Downloads an item from MediaWikiArticle.media and returns the content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (self.__dict__.get("language", "en"), media)
if url not in cache:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL("http://" + data.group(0)).download(**kwargs) or None
return data
def __repr__(self):
return "WikipediaArticle(title=%s)" % repr(self.title)
class WikipediaSection(MediaWikiSection):
def __repr__(self):
return "WikipediaSection(title='%s')" % bytestring(self.title)
class WikipediaTable(MediaWikiTable):
def __repr__(self):
return "WikipediaTable(title='%s')" % bytestring(self.title)
#article = Wikipedia().search("cat")
#for section in article.sections:
# print " "*(section.level-1) + section.title
#if article.media:
# data = article.download(article.media[2])
# f = open(article.media[2], "w")
# f.write(data)
# f.close()
#
#article = Wikipedia(language="nl").search("borrelnootje")
#print article.string
#--- MEDIAWIKI: WIKIA ------------------------------------------------------------------------------
class Wikia(MediaWiki):
def __init__(self, domain="www", license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[domain].wikia.com.
"""
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
self._subdomain = domain
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikia.com")
s = s.replace("{API}", '/api.php')
return s
@property
def MediaWikiArticle(self):
return WikiaArticle
@property
def MediaWikiSection(self):
return WikiaSection
@property
def MediaWikiTable(self):
return WikiaTable
def all(self, **kwargs):
if kwargs.pop("batch", True):
# We can take advantage of Wikia's search API to reduce bandwith.
# Instead of executing a query to retrieve each article,
# we query for a batch of (10) articles.
iterator = self.list(_id="pageid", **kwargs)
while True:
batch, done = [], False
try:
for i in range(10): batch.append(iterator.next())
except StopIteration:
done = True # No more articles, finish batch and raise StopIteration.
url = URL(self._url.replace("api.php", "wikia.php"), method=GET, query={
"controller": "WikiaSearch",
"method": "getPages",
"ids": '|'.join(str(id) for id in batch),
"format": "json"
})
kwargs.setdefault("unicode", True)
kwargs.setdefault("cached", True)
kwargs["timeout"] = 10 * (1 + len(batch))
data = url.download(**kwargs)
data = json.loads(data)
for x in (data or {}).get("pages", {}).values():
yield WikiaArticle(title=x.get("title", ""), source=x.get("html", ""))
if done:
raise StopIteration
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
class WikiaArticle(MediaWikiArticle):
def __repr__(self):
return "WikiaArticle(title=%s)" % repr(self.title)
class WikiaSection(MediaWikiSection):
def __repr__(self):
return "WikiaSection(title='%s')" % bytestring(self.title)
class WikiaTable(MediaWikiTable):
def __repr__(self):
return "WikiaTable(title='%s')" % bytestring(self.title)
#--- FLICKR ----------------------------------------------------------------------------------------
# http://www.flickr.com/services/api/
FLICKR = "http://api.flickr.com/services/rest/"
FLICKR_LICENSE = api.license["Flickr"]
INTERESTING = "interesting"
class Flickr(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or FLICKR_LICENSE, throttle, language)
def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500/count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR+"?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": { RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc" }.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results
class FlickrResult(Result):
@property
def url(self):
# Retrieving the url of a FlickrResult (i.e. image location) requires another query.
# Note: the "Original" size no longer appears in the response,
# so Flickr might not like it if we download it.
url = FLICKR + "?method=flickr.photos.getSizes&photo_id=%s&api_key=%s" % (self._id, self._license)
data = URL(url).download(throttle=self._throttle, unicode=True)
data = xml.dom.minidom.parseString(bytestring(data))
size = { TINY: "Thumbnail",
SMALL: "Small",
MEDIUM: "Medium",
LARGE: "Original" }.get(self._size, "Medium")
for x in data.getElementsByTagName("size"):
if size == x.getAttribute("label"):
return x.getAttribute("source")
if size == "Original":
url = x.getAttribute("source")
url = url[:-len(extension(url))-2] + "_o" + extension(url)
return u(url)
#images = Flickr().search("kitten", count=10, size=SMALL)
#for img in images:
# print bytestring(img.description)
# print img.url
#
#data = img.download()
#f = open("kitten"+extension(img.url), "w")
#f.write(data)
#f.close()
#--- FACEBOOK --------------------------------------------------------------------------------------
# Facebook public status updates.
# https://developers.facebook.com/docs/reference/api/
FACEBOOK = "https://graph.facebook.com/"
FACEBOOK_LICENSE = api.license["Facebook"]
FEED = "feed" # Facebook timeline.
COMMENTS = "comments" # Facebook comments (for a given news feed post).
LIKES = "likes" # Facebook likes (for a given post or comment).
FRIENDS = "friends" # Facebook friends (for a given profile id).
class FacebookResult(Result):
def __repr__(self):
return "Result(id=%s)" % repr(self.id)
class Facebook(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
@property
def _token(self):
# Yields the "application access token" (stored in api.license["Facebook"]).
# With this license, we can view public content.
# To view more information, we need a "user access token" as license key.
# This token can be retrieved manually from:
# http://www.clips.ua.ac.be/media/pattern-fb.html
# Or parsed from this URL:
# https://graph.facebook.com/oauth/authorize?type=user_agent
# &client_id=332061826907464
# &redirect_uri=http%3A%2F%2Fwww.clips.ua.ac.be/media/pattern-facebook-token.html
# &scope=read_stream,user_birthday,user_likes,user_photos,friends_birthday,friends_likes
# The token is valid for a limited duration.
return URL(FACEBOOK + "oauth/access_token?", query={
"grant_type": "client_credentials",
"client_id": "332061826907464",
"client_secret": "81ff4204e73ecafcd87635a3a3683fbe"
}).download().split("=")[1]
def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
""" Returns a list of results from Facebook public status updates for the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"fields": ",".join(("id", "link", "message", "created_time", "from")),
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max),
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + (u(query) or "me").replace(FACEBOOK, "") + "/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max)
})
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(x.get("story", x.get("message")))
r.date = self.format(x.get("created_time"))
# Store likes & comments count as int, author as (id, name)-tuple
# (by default Result will store everything as Unicode strings).
s = lambda r, k, v: dict.__setitem__(r, k, v)
s(r, "likes", \
self.format(x.get("like_count", x.get("likes", {}).get("count", 0))) + 0)
s(r, "comments", \
self.format(x.get("comments", {}).get("count", 0)) + 0)
s(r, "author", (
u(self.format(x.get("from", {}).get("id", ""))), \
u(self.format(x.get("from", {}).get("name", "")))))
# Set Result.text to author name for likes.
if type in (LIKES, FRIENDS):
s(r, "author", (
u(self.format(x.get("id", ""))),
u(self.format(x.get("name", "")))))
r.text = \
self.format(x.get("name"))
# Set Result.url to full-size image.
if r.url.startswith("http://www.facebook.com/photo"):
r.url = x.get("picture").replace("_s", "_b") or r.url
# Set Result.title to object id.
if r.url.startswith("http://www.facebook.com/"):
r.title = r.url.split("/")[-1].split("?")[0]
results.append(r)
return results
def profile(self, id=None, **kwargs):
""" For the given author id or alias,
returns a (id, name, date of birth, gender, locale)-tuple.
"""
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return (
u(data.get("id", "")),
u(data.get("name", "")),
u(data.get("birthday", "")),
u(data.get("gender", "")[:1]),
u(data.get("locale", ""))
)
#--- PRODUCT REVIEWS -------------------------------------------------------------------------------
PRODUCTWIKI = "http://api.productwiki.com/connect/api.aspx"
PRODUCTWIKI_LICENSE = api.license["Products"]
class Products(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or PRODUCTWIKI_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Productwiki for the given query.
Each Result.reviews is a list of (review, score)-items.
- type : SEARCH,
- start: maximum undefined,
- count: 20,
- sort : RELEVANCY.
There is no daily limit.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(PRODUCTWIKI, query, type)
# 1) Construct request URL.
url = PRODUCTWIKI+"?"
url = URL(url, method=GET, query={
"key": self.license or "",
"q": query,
"page" : start,
"op": "search",
"fields": "proscons", # "description,proscons" is heavy.
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = URL(url).download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(PRODUCTWIKI, query, type)
results.total = None
for x in data.get("products", [])[:count]:
r = Result(url=None)
r.__dict__["title"] = u(x.get("title"))
r.__dict__["text"] = u(x.get("text"))
r.__dict__["reviews"] = []
reviews = x.get("community_review") or {}
for p in reviews.get("pros", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or +1))
for p in reviews.get("cons", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or -1))
r.__dict__["score"] = int(sum(score for review, score in r.reviews))
results.append(r)
# Highest score first.
results.sort(key=lambda r: r.score, reverse=True)
return results
#for r in Products().search("tablet"):
# print r.title
# print r.score
# print r.reviews
# print
#--- NEWS FEED -------------------------------------------------------------------------------------
# Based on the Universal Feed Parser by Mark Pilgrim:
# http://www.feedparser.org/
class Newsfeed(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
""" Returns a list of results from the given RSS or Atom newsfeed URL.
"""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value") for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and \
x.get("content")[0].get("language") or \
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) => Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results
feeds = {
"Nature": "http://feeds.nature.com/nature/rss/current",
"Science": "http://www.sciencemag.org/rss/podcast.xml",
"Herald Tribune": "http://www.iht.com/rss/frontpage.xml",
"TIME": "http://feeds.feedburner.com/time/topstories",
"CNN": "http://rss.cnn.com/rss/edition.rss",
}
#for r in Newsfeed().search(feeds["Nature"]):
# print r.title
# print r.author
# print r.url
# print plaintext(r.text)
# print
#--- QUERY -----------------------------------------------------------------------------------------
def query(string, service=GOOGLE, **kwargs):
""" Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (TWITTER, "twitter"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError, "unknown search engine '%s'" % service
#--- WEB SORT --------------------------------------------------------------------------------------
SERVICES = {
GOOGLE : Google,
YAHOO : Yahoo,
BING : Bing,
TWITTER : Twitter,
WIKIPEDIA : Wikipedia,
WIKIA : Wikia,
FLICKR : Flickr,
FACEBOOK : Facebook
}
def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, reverse=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = reverse and context+" "+word or word+" "+context
q.strip()
q = strict and "\"%s\"" % q or q
r = service.search(q, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1)/s, r.query) for r in R]
R = sorted(R, reverse=True)
return R
#print sort(["black", "happy"], "darth vader", GOOGLE)
#### DOCUMENT OBJECT MODEL #########################################################################
# Tree traversal of HTML source code.
# The Document Object Model (DOM) is a cross-platform and language-independent convention
# for representing and interacting with objects in HTML, XHTML and XML documents.
# BeautifulSoup is wrapped in Document, Element and Text classes that resemble the Javascript DOM.
# BeautifulSoup can of course be used directly since it is imported here.
# http://www.crummy.com/software/BeautifulSoup/
SOUP = (
BeautifulSoup.BeautifulSoup,
BeautifulSoup.Tag,
BeautifulSoup.NavigableString,
BeautifulSoup.Comment
)
NODE, TEXT, COMMENT, ELEMENT, DOCUMENT = \
"node", "text", "comment", "element", "document"
#--- NODE ------------------------------------------------------------------------------------------
class Node:
def __init__(self, html, type=NODE, **kwargs):
""" The base class for Text, Comment and Element.
All DOM nodes can be navigated in the same way (e.g. Node.parent, Node.children, ...)
"""
self.type = type
self._p = not isinstance(html, SOUP) and BeautifulSoup.BeautifulSoup(u(html), **kwargs) or html
@property
def _beautifulSoup(self):
# If you must, access the BeautifulSoup object with Node._beautifulSoup.
return self._p
def __eq__(self, other):
# Two Node objects containing the same BeautifulSoup object, are the same.
return isinstance(other, Node) and hash(self._p) == hash(other._p)
def _wrap(self, x):
# Navigating to other nodes yields either Text, Element or None.
if isinstance(x, BeautifulSoup.Comment):
return Comment(x)
if isinstance(x, BeautifulSoup.Declaration):
return Text(x)
if isinstance(x, BeautifulSoup.NavigableString):
return Text(x)
if isinstance(x, BeautifulSoup.Tag):
return Element(x)
@property
def parent(self):
return self._wrap(self._p.parent)
@property
def children(self):
return hasattr(self._p, "contents") and [self._wrap(x) for x in self._p.contents] or []
@property
def html(self):
return self.__unicode__()
@property
def source(self):
return self.__unicode__()
@property
def next_sibling(self):
return self._wrap(self._p.nextSibling)
@property
def previous_sibling(self):
return self._wrap(self._p.previousSibling)
next, previous = next_sibling, previous_sibling
def traverse(self, visit=lambda node: None):
""" Executes the visit function on this node and each of its child nodes.
"""
visit(self); [node.traverse(visit) for node in self.children]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def __getitem__(self, index):
return self.children[index]
def __repr__(self):
return "Node(type=%s)" % repr(self.type)
def __str__(self):
return bytestring(self.__unicode__())
def __unicode__(self):
return u(self._p)
#--- TEXT ------------------------------------------------------------------------------------------
class Text(Node):
""" Text represents a chunk of text without formatting in a HTML document.
For example: "the <b>cat</b>" is parsed to [Text("the"), Element("cat")].
"""
def __init__(self, string):
Node.__init__(self, string, type=TEXT)
def __repr__(self):
return "Text(%s)" % repr(self._p)
class Comment(Text):
""" Comment represents a comment in the HTML source code.
For example: "<!-- comment -->".
"""
def __init__(self, string):
Node.__init__(self, string, type=COMMENT)
def __repr__(self):
return "Comment(%s)" % repr(self._p)
#--- ELEMENT ---------------------------------------------------------------------------------------
class Element(Node):
def __init__(self, html):
""" Element represents an element or tag in the HTML source code.
For example: "<b>hello</b>" is a "b"-Element containing a child Text("hello").
"""
Node.__init__(self, html, type=ELEMENT)
@property
def tagname(self):
return self._p.name
tag = tagName = tagname
@property
def attributes(self):
return self._p._getAttrMap()
@property
def id(self):
return self.attributes.get("id")
def get_elements_by_tagname(self, v):
""" Returns a list of nested Elements with the given tag name.
The tag name can include a class (e.g. div.header) or an id (e.g. div#content).
"""
if isinstance(v, basestring) and "#" in v:
v1, v2 = v.split("#")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, id=v2)]
if isinstance(v, basestring) and "." in v:
v1, v2 = v.split(".")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, v2)]
return [Element(x) for x in self._p.findAll(v in ("*","") or v.lower())]
by_tag = getElementsByTagname = get_elements_by_tagname
def get_element_by_id(self, v):
""" Returns the first nested Element with the given id attribute value.
"""
return ([Element(x) for x in self._p.findAll(id=v, limit=1) or []]+[None])[0]
by_id = getElementById = get_element_by_id
def get_elements_by_classname(self, v):
""" Returns a list of nested Elements with the given class attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, v))]
by_class = getElementsByClassname = get_elements_by_classname
def get_elements_by_attribute(self, **kwargs):
""" Returns a list of nested Elements with the given attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, attrs=kwargs))]
by_attribute = getElementsByAttribute = get_elements_by_attribute
@property
def content(self):
""" Yields the element content as a unicode string.
"""
return u"".join([u(x) for x in self._p.contents])
@property
def source(self):
""" Yields the HTML source as a unicode string (tag + content).
"""
return u(self._p)
html = source
def __getattr__(self, k):
if k in self.__dict__:
return self.__dict__[k]
if k in self.attributes:
return self.attributes[k]
raise AttributeError, "'Element' object has no attribute '%s'" % k
def __repr__(self):
return "Element(tag='%s')" % bytestring(self.tagname)
#--- DOCUMENT --------------------------------------------------------------------------------------
class Document(Element):
def __init__(self, html, **kwargs):
""" Document is the top-level element in the Document Object Model.
It contains nested Element, Text and Comment nodes.
"""
# Aliases for BeautifulSoup optional parameters:
kwargs["selfClosingTags"] = kwargs.pop("self_closing", kwargs.get("selfClosingTags"))
Node.__init__(self, u(html).strip(), type=DOCUMENT, **kwargs)
@property
def declaration(self):
""" Yields the <!doctype> declaration, as a TEXT Node or None.
"""
for child in self.children:
if isinstance(child._p, BeautifulSoup.Declaration):
return child
@property
def head(self):
return self._wrap(self._p.head)
@property
def body(self):
return self._wrap(self._p.body)
@property
def tagname(self):
return None
tag = tagname
def __repr__(self):
return "Document()"
DOM = Document
#article = Wikipedia().search("Document Object Model")
#dom = DOM(article.html)
#print dom.get_element_by_id("References").source
#print [element.attributes["href"] for element in dom.get_elements_by_tagname("a")]
#print dom.get_elements_by_tagname("p")[0].next.previous.children[0].parent.__class__
#print
#### WEB CRAWLER ###################################################################################
# Tested with a crawl across 1,000 domain so far.
class Link:
def __init__(self, url, text="", relation="", referrer=""):
""" A hyperlink parsed from a HTML document, in the form:
<a href="url"", title="text", rel="relation">xxx</a>.
"""
self.url, self.text, self.relation, self.referrer = \
u(url), u(text), u(relation), u(referrer),
@property
def description(self):
return self.text
def __repr__(self):
return "Link(url=%s)" % repr(self.url)
# Used for sorting in Spider.links:
def __eq__(self, link):
return self.url == link.url
def __ne__(self, link):
return self.url != link.url
def __lt__(self, link):
return self.url < link.url
def __gt__(self, link):
return self.url > link.url
class HTMLLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def parse(self, html, url=""):
""" Returns a list of Links parsed from the given HTML string.
"""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data
def handle_starttag(self, tag, attributes):
if tag == "a":
attributes = dict(attributes)
if "href" in attributes:
link = Link(url = attributes.get("href"),
text = attributes.get("title"),
relation = attributes.get("rel", ""),
referrer = self._url)
self._data.append(link)
def base(url):
""" Returns the URL domain name:
http://en.wikipedia.org/wiki/Web_crawler => en.wikipedia.org
"""
return urlparse.urlparse(url).netloc
def abs(url, base=None):
""" Returns the absolute URL:
../media + http://en.wikipedia.org/wiki/ => http://en.wikipedia.org/media
"""
if url.startswith("#") and not base is None and not base.endswith("/"):
if not re.search("[^/]/[^/]", base):
base += "/"
return urlparse.urljoin(base, url)
DEPTH = "depth"
BREADTH = "breadth"
FIFO = "fifo" # First In, First Out.
FILO = "filo" # First In, Last Out.
LIFO = "lifo" # Last In, First Out (= FILO).
class Spider:
def __init__(self, links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO):
""" A spider can be used to browse the web in an automated manner.
It visits the list of starting URLs, parses links from their content, visits those, etc.
- Links can be prioritized by overriding Spider.priority().
- Links can be ignored by overriding Spider.follow().
- Each visited link is passed to Spider.visit(), which can be overridden.
"""
self.parse = parser
self.delay = delay # Delay between visits to the same (sub)domain.
self.domains = domains # Domains the spider is allowed to visit.
self.history = {} # Domain name => time last visited.
self.visited = {} # URLs visited.
self._queue = [] # URLs scheduled for a visit: (priority, time, Link).
self._queued = {} # URLs scheduled so far, lookup dictionary.
self.QUEUE = 10000 # Increase or decrease according to available memory.
self.sort = sort
# Queue given links in given order:
for link in (isinstance(links, basestring) and [links] or links):
self.push(link, priority=1.0, sort=FIFO)
@property
def done(self):
""" Yields True if no further links are scheduled to visit.
"""
return len(self._queue) == 0
def push(self, link, priority=1.0, sort=FILO):
""" Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True
def pop(self, remove=True):
""" Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until Spider.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link
@property
def next(self):
""" Returns the next Link queued to visit (without removing it).
"""
return self.pop(remove=False)
def crawl(self, method=DEPTH, **kwargs):
""" Visits the next link in Spider._queue.
If the link is on a domain recently visited (< Spider.delay) it is skipped.
Parses the content at the link for new links and adds them to the queue,
according to their Spider.priority().
Visited links (and content) are passed to Spider.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Spider.follow() is True are queued.
# 5) Only links on Spider.domains are queued.
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Spider.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Spider.pop()).
# Log the URL as visited.
self.history[base(link.url)] = time.time()
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False
def normalize(self, url):
""" Called from Spider.crawl() to normalize URLs.
For example: return url.split("?")[0]
"""
# All links pass through here (visited or not).
# This can be a place to count backlinks.
return url
def follow(self, link):
""" Called from Spider.crawl() to determine if it should follow this link.
For example: return "nofollow" not in link.relation
"""
return True
def priority(self, link, method=DEPTH):
""" Called from Spider.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80
def visit(self, link, source=None):
""" Called from Spider.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass
def fail(self, link):
""" Called from Spider.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass
#class Spiderling(Spider):
# def visit(self, link, source=None):
# print "visited:", link.url, "from:", link.referrer
# def fail(self, link):
# print "failed:", link.url
#
#s = Spiderling(links=["http://nodebox.net/"], domains=["nodebox.net"], delay=5)
#while not s.done:
# s.crawl(method=DEPTH, cached=True, throttle=5)
#--- CRAWL FUNCTION --------------------------------------------------------------------------------
# Functional approach to crawling.
Crawler = Spider
def crawl(links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO, method=DEPTH, **kwargs):
""" Returns a generator that yields (Link, source)-tuples of visited pages.
When the crawler is busy, it yields (None, None).
When the crawler is done, it yields None.
"""
# The scenarios below defines "busy":
# - crawl(delay=10, throttle=0)
# The crawler will wait 10 seconds before visiting the same subdomain.
# The crawler will not throttle downloads, so the next link is visited instantly.
# So sometimes (None, None) is returned while it waits for an available subdomain.
# - crawl(delay=0, throttle=10)
# The crawler will halt 10 seconds after each visit.
# The crawler will not delay before visiting the same subdomain.
# So usually a result is returned each crawl.next(), but each call takes 10 seconds.
# - asynchronous(crawl().next)
# AsynchronousRequest.value is set to (Link, source) once AsynchronousRequest.done=True.
# The program will not halt in the meantime (i.e., the next crawl is threaded).
crawler = Crawler(links, domains, delay, parser, sort)
bind(crawler, "visit", \
lambda crawler, link, source=None: \
setattr(crawler, "crawled", (link, source))) # Define Crawler.visit() on-the-fly.
while not crawler.done:
crawler.crawled = (None, None)
crawler.crawl(method, **kwargs)
yield crawler.crawled
#for link, source in crawl("http://www.nodebox.net/", delay=0, throttle=10):
# print link
#g = crawl("http://www.nodebox.net/")
#for i in range(10):
# p = asynchronous(g.next)
# while not p.done:
# print "zzz..."
# time.sleep(0.1)
# link, source = p.value
# print link
#### PDF PARSER ####################################################################################
# Yusuke Shinyama, PDFMiner, http://www.unixuser.org/~euske/python/pdfminer/
class PDFParseError(Exception):
pass
class PDF:
def __init__(self, data, format=None):
""" Plaintext parsed from the given PDF data.
"""
self.content = self._parse(data, format)
@property
def string(self):
return self.content
def __unicode__(self):
return self.content
def _parse(self, data, format=None):
# The output will be ugly: it may be useful for mining but probably not for displaying.
# You can also try PDF(data, format="html") to preserve some layout information.
from pdf.pdfinterp import PDFResourceManager, process_pdf
from pdf.converter import TextConverter, HTMLConverter
from pdf.layout import LAParams
s = ""
m = PDFResourceManager()
try:
# Given data is a PDF file path.
data = os.path.exists(data) and open(data) or StringIO.StringIO(data)
except TypeError:
# Given data is a PDF string.
data = StringIO.StringIO(data)
try:
stream = StringIO.StringIO()
parser = format=="html" and HTMLConverter or TextConverter
parser = parser(m, stream, codec="utf-8", laparams=LAParams())
process_pdf(m, parser, data, set(), maxpages=0, password="")
except Exception, e:
raise PDFParseError, str(e)
s = stream.getvalue()
s = decode_utf8(s)
s = s.strip()
s = re.sub(r"([a-z])\-\n", "\\1", s) # Join hyphenated words.
s = s.replace("\n\n", "<!-- paragraph -->") # Preserve paragraph spacing.
s = s.replace("\n", " ")
s = s.replace("<!-- paragraph -->", "\n\n")
s = collapse_spaces(s)
return s
#### DBPedia Search Engine ####################################################################################
# Kenneth Koch kkoch986@gmail.com 2013
# the result object returned by this will have the following functions set (see RDFResult class)
# result.vars() => a list containing the names of all variables bound in this result
# result.types(var) => the type for <var> in this row
# result.data(var) => the value of <var> in this result
#
# Some Usage Examples:
# >>> from pattern.web import DBPedia
# >>> DBPedia().search('select distinct ?Concept where {[] a ?Concept}')
# >>> results = DBPedia().search('PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX : <http://dbpedia.org/resource/> select * where {?a rdf:type ?s.}',"SPARQL", 1,1)
# >>>
# >>> for row in results:
# ... print "Entity: " + row.data('a') + " ( " + row.data('s') + " )"
# .., print row.download('a')
class DBPedia(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type="SPARQL", start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
query = query + " OFFSET " + start.__str__() + " LIMIT " + count.__str__()
# Search will accept a db pedia sparql query
url = URL("http://dbpedia.org/sparql?", method=GET)
url.query = {
"query": query,
"format": "json"
}
try:
data = URL(url).download(cached=cached, timeout=30, **kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise SyntaxError("Invalid Query Syntax")
# now build a results object to return the results
total = data["results"]["bindings"].__len__()
results = Results(url, query, type, total)
vars = data["head"]["vars"]
for row in data["results"]["bindings"]:
types = dict((k,row[k]["type"]) for k in vars)
data = dict((k,row[k]["value"]) for k in vars)
result = RDFResult(vars, types, data)
results.append(result)
return results
class RDFResult:
def __init__(self, vars, types, data):
self._vars = vars
self._types = types
self._data = data
def data(self, var = None):
if(var is None):
return self._data
else:
return self._data[var]
def types(self, var = None):
if(var is None):
return self._types
else:
return self._types[var]
def vars(self):
return self._vars
def download(self, var, *args, **kwargs):
if(self.types(var).lower() != "uri"):
raise TypeError(var + " is not a URI.")
return URL(self.data(var)).download(*args, **kwargs)
|
launch_live.py
|
""" Script for running pyspace live controlling
.. image:: ../../graphics/launch_live.png
:width: 500
A script for running pyspace live. The script contains
a class to control the other related classes needed in the online mode,
and several methods that are used for the general startup of the suite.
"""
import sys
import os
import time
import traceback
import logging
import yaml
import datetime
import optparse
import multiprocessing
from collections import defaultdict
from select import select
file_path = os.path.dirname(os.path.realpath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import pySPACE
# create logger with handlers
from pySPACE.environments.live import online_utilities
online_logger = logging.getLogger("OnlineLogger")
class LiveController(object):
""" Controlling suite.
This class provides a clean interface to the live environment.
It provides contains objects of the classes that are used
for the online mode and configures them as needed.
The controller uses the config-files for user related configuration,
and additional parameter files for scenario/task specific parameterization.
"""
def __init__(self, parameters_, live_processing=None):
# create two level defaultdict for parameters!
parameters = defaultdict(lambda : defaultdict(lambda:None))
for key,value in parameters_.items():
#if the dict value is also a dict, convert this also to a defaultdict
if type(value) is dict:
value = self.convert_dict_to_defaultdict(value)
parameters[key] = value
# fetch parameters that would be nice to have
datafile_info = parameters["data_files"]
data_source = parameters["data_source"]
potentials = parameters["potentials"]
self.flow_persistency_directory = parameters["flow_persistency_directory"]
self.prewindowed_data_directory = parameters["prewindowed_data_directory"]
self.datafile_train = datafile_info["eeg_data_file_train"]
self.datafile_test = datafile_info["eeg_data_file_test"]
self.eeg_server_nullmarker_stride_ms = data_source["nullmarker_stride_ms"]
self.eeg_server_eeg_port = data_source["default_port"]
self.eeg_server_offline_predict_ip = data_source["predict_offline"]["ip"]
self.eeg_server_predict_ip = data_source["predict"]["ip"]
self.eeg_server_train_ip = data_source["train"]["ip"]
self.eeg_server_prewindow_ip = data_source["prewindow"]["ip"]
self.eeg_server_record = data_source["record"]
# settings for recording
self.subject = parameters["record"]["subject"]
self.experiment = parameters["record"]["experiment"]
# try to fetch optional parameters, set them to None if not present
# live_server_info = parameters["live_server"]
# self.live_server_ip = live_server_info["live_server_ip"]
# self.live_xmlrpc_port = live_server_info["live_xmlrpc_port"]
self.configuration = pySPACE.configuration
self.label = None
self.prediction_process = None
self.live_processing = None
self.live_prewindower = None
# figure out all potentials and
# store all relevant information
self.erps = dict()
if not isinstance(potentials, list):
for (_, potential) in potentials.iteritems():
if type(potential) is dict:
potential = self.convert_dict_to_defaultdict(potential)
potential["configuration"] = self.configuration
self.erps[potential["flow_id"]] = potential
else:
for potential in potentials:
if type(potential) is dict:
potential = self.convert_dict_to_defaultdict(potential)
potential["configuration"] = self.configuration
# online_logger.info(potential)
self.erps[potential["flow_id"]] = potential
results_to_send = []
for key in self.erps:
if self.erps[key].has_key('online_result_messenger') and self.erps[key]['online_result_messenger']:
results_to_send.append(key)
if live_processing == None:
if len(results_to_send) > 0:
self.messenger = pySPACE.environments.live.communication.socket_messenger.SocketMessenger(key=results_to_send)
online_logger.info('Prediction results from %s will be send via socket.', ' and '.join(results_to_send))
else:
self.messenger = pySPACE.environments.live.communication.log_messenger.LogMessenger()
# self.messenger = pySPACE.environments.live.communication.socket_messenger.SocketMessenger(key="P3")
# self.messenger = pySPACE.environments.live.communication.socket_messenger.EmbeddedSocketManager(key="P3")
else:
self.messenger = live_processing
def convert_dict_to_defaultdict(self, dict_to_convert):
if type(dict_to_convert) is not dict:
online_logger.warn( str('CARE: trying to convert a %s to defaultdict') % type(dict_to_convert))
return dict_to_convert
mydefaultdict = defaultdict(lambda:None)
for key in dict_to_convert:
mydefaultdict[key] = dict_to_convert[key]
return mydefaultdict
def prewindowing(self, online = True):
""" Prewindows the pyspace flows on the data streamed from
an external EEG-Server
"""
online_logger.info("starting prewindowing")
# Create prewindower
self.live_prewindower = trainer.LiveTrainer()
# online_logger.info(self.erps)
prewindowing_files = []
if online:
# create the stream manager
stream_manager = eeg_stream_manager.LiveEegStreamManager(online_logger)
stream_manager.initialize_eeg_server(ip=self.eeg_server_prewindow_ip,
port=self.eeg_server_eeg_port)
# setup recording if option is set
if self.subject is not None and \
self.experiment is not None:
stream_manager.record_with_options(self.subject, self.experiment)
else:
online_logger.error("RAW DATA IS NOT RECORDED!")
# set the stream manager into the trainer object
self.live_prewindower.set_eeg_stream_manager(stream_manager)
# in online case just connect to the streaming server
self.live_prewindower.prepare_training(prewindowing_files,
self.erps,
"prewindowing",
nullmarker_stride_ms = self.eeg_server_nullmarker_stride_ms)
else:
# when running offline prepare local streaming
if isinstance(self.datafile_train, str):
prewindowing_files = \
os.path.join(self.configuration.storage, self.datafile_train)
else:
for datafile in self.datafile_train:
if os.path.isabs(datafile):
prewindowing_files = prewindowing_files + [datafile]
else:
prewindowing_files = prewindowing_files + \
[os.path.join(self.configuration.storage, datafile)]
online_logger.info("prewindowing files:")
online_logger.info(prewindowing_files)
self.live_prewindower.prepare_training(prewindowing_files,
self.erps,
"prewindowing_offline",
nullmarker_stride_ms = self.eeg_server_nullmarker_stride_ms)
self.start_prewindowing(online)
def start_prewindowing(self, online = True):
""" Start the prewindowing process """
online_logger.info("Start prewindowing")
self.live_prewindower.start_training("prewindowing") # pass an additional True for profiling
def stop_prewindowing(self):
""" Create pyspace live processing server """
self.live_prewindower.process_external_command("STOP")
def prewindowed_train(self):
""" Trains the pyspace flows which have been prewindowed using the prewindower"""
# Create trainer and initialize the eeg data stream
pw_trainer = trainer.LiveTrainer()
postprocessing_files = []
pw_trainer.prepare_training(postprocessing_files,
self.erps,
"prewindowed_train",
nullmarker_stride_ms = self.eeg_server_nullmarker_stride_ms)
# Let pyspace live train on this data
online_logger.info("Start pyspace live training")
pw_trainer.start_training("prewindowed_train") # pass an additional True for profiling
def train(self):
""" Trains the pyspace flows on the data streamed from
an external EEG-Server
"""
# Create trainer and initialize the eeg data stream
online_trainer = trainer.LiveTrainer()
stream_manager = \
eeg_stream_manager.LiveEegStreamManager(online_logger)
stream_manager.initialize_eeg_server(ip=self.eeg_server_train_ip,
port=self.eeg_server_eeg_port)
# Prepare trainer for training
online_trainer.set_eeg_stream_manager(stream_manager)
training_files = []
if isinstance(self.datafile_train, str):
training_files = self.datafile_train
else:
for datafile in self.datafile_train:
if os.path.isabs(datafile):
datafile_train = [datafile]
else:
datafile_train = \
[os.path.join(self.configuration.storage, datafile)]
training_files = training_files + datafile_train
online_logger.info(training_files)
online_logger.info("#"*30)
online_trainer.prepare_training(training_files,
self.erps,
"train",
nullmarker_stride_ms = self.eeg_server_nullmarker_stride_ms)
# Let pyspace live train on this data
online_logger.info("Start pyspace live training")
online_trainer.start_training("train") # pass an additional True for profiling
def adapt_classification_threshold(self, load_model=True):
""" Adapts classification threshold on a special function """
# Create pyspace live processing server
live_adaptor = adaptation.LiveAdaptor()
# Reloading stored models
if load_model:
online_logger.info("Reloading Models")
live_adaptor.load_model(self.flow_persistency_directory, self.erps)
online_logger.info("Creating eeg stream")
# Start EEG server that streams data for testing
stream_manager = \
eeg_stream_manager.LiveEegStreamManager(online_logger)
stream_manager.initialize_eeg_server(ip=self.eeg_server_train_ip,
port=self.eeg_server_eeg_port)
# Prepare live_adaptor for adaptation
live_adaptor.set_eeg_stream_manager(stream_manager)
adaptation_files = []
if isinstance(self.datafile_train, str):
adaptation_files = self.datafile_train
else:
for datafile in self.datafile_train:
if os.path.isabs(datafile):
datafile_train = [datafile]
else:
datafile_train = \
[os.path.join(self.configuration.storage, datafile)]
adaptation_files = adaptation_files + datafile_train
online_logger.info(adaptation_files)
online_logger.info("#"*30)
live_adaptor.prepare_adaptation(adaptation_files,
self.erps)
# Prepare for adaptation
# register the pyspace live module with the ControlManager
# Let pyspace live train on this data
online_logger.info("Start pyspace live adaptation")
live_adaptor.start_adaptation()
# We block and wait until either adaptation is finished or
# the user enters a 'X'
time.sleep(5)
try:
while live_adaptor.is_adaptation_active():
time.sleep(1)
except Exception as exc:
online_logger.log(logging.ERROR,"Training interrupted")
exc_type, exc_value, exc_traceback = sys.exc_info()
online_logger.log(logging.ERROR, repr(
traceback.format_exception(
exc_type, exc_value, exc_traceback)))
online_logger.log(logging.ERROR, str(exc))
live_adaptor.stop_adaptation()
online_logger.info("Adaptation Finished")
def predict(self, load_model = True, online = True, remote = False):
""" Classifies new instances based on the trained pyspace flows"""
# do all preparations only if there is no prepared prediction process
if self.live_processing == None:
# create pyspace live processing server
self.live_processing = prediction.Predictor(self.messenger)
self.live_processing.set_controller(self)
self.prediction_process = self.live_processing
# reloading stored models
if load_model:
online_logger.info("Reloading Models")
self.live_processing.load_model(self.flow_persistency_directory, self.erps)
# connect to the server
if online:
# init eeg streaming and recording
stream_manager = eeg_stream_manager.LiveEegStreamManager(online_logger)
stream_manager.initialize_eeg_server(ip=self.eeg_server_predict_ip,
port=self.eeg_server_eeg_port)
# set teht stream manager into the trainger object
self.live_processing.set_eeg_stream_manager(stream_manager)
# prepare the prediction
self.live_processing.prepare_predicting(self.erps, nullmarker_stride_ms=self.eeg_server_nullmarker_stride_ms)
# setup recording if option is set
if self.subject is not None and self.experiment is not None:
stream_manager.record_with_options(self.subject, self.experiment, online=True)
else:
online_logger.warn("RAW DATA IS NOT RECORDED!")
else:
# when running offline prepare local streaming
if isinstance(self.datafile_test, str):
testing_file = \
os.path.join(self.configuration.storage, self.datafile_test)
elif isinstance(self.data_test, list):
testing_file = \
os.path.join(self.configuration.storage, self.datafile_test[0])
else:
raise Exception, "could not determine testing data!"
online_logger.info(str("testing file: %s " % testing_file))
self.live_processing.prepare_predicting(self.erps, testing_file, nullmarker_stride_ms = self.eeg_server_nullmarker_stride_ms)
online_logger.info("Finished")
if not remote and not online:
raw_input("\nPress Enter to start predicting ")
# Let pyspace live classify the test data
online_logger.info("Start pyspace live classification")
# pass an additional True for profiling
self.live_processing.start_predicting()
def stop_prediction(self):
# stop running prediction
self.live_processing.process_external_command("STOP")
def record(self):
# just record incoming data - press enter to stop
online_logger.info("recording .. (press enter to stop)")
data_stream = eeg_stream_manager.LiveEegStreamManager(online_logger)
data_stream.initialize_eeg_server(**self.eeg_server_record)
window_stream = data_stream.request_window_stream(window_spec=None, no_overlap=True)
data_stream.record_with_options(subject=self.subject,
experiment=self.experiment,
online=False)
for window, label in window_stream:
if select([sys.stdin], [], [], 0) == ([sys.stdin], [], []):
break
online_logger.info("Received window with label %s" % label)
data_stream.stop()
online_logger.info("stopped recording!")
def parse_arguments():
""" Parses the command line arguments to create options object"""
usage = "Usage: %prog [--config <configuration.yaml>] "\
"[--params <params.yaml>] "
parser = optparse.OptionParser(usage=usage)
parser.add_option("-c", "--configuration",
help="Choose the configuration file",
action="store")
parser.add_option("-p", "--params",
help="Specify parameter file that contains information about the data and environment",
action="store")
parser.add_option("-t","--train",
help="Train a flow according to parameters in parameter file",
action="store_true",
dest="train",
default=False)
parser.add_option("--prewindowing",
help="Prewindow a flow according to parameters in parameter file",
action="store_true",
dest="prewindowing",
default=False)
parser.add_option("--prewindowing_offline",
help="Prewindow an offline flow for test purpose",
action="store_true",
dest="prewindowing_offline",
default=False)
parser.add_option("--prewindowed_train",
help="Train a prewindowed flow according to parameters in parameter file",
action="store_true",
dest="prewindowed_train",
default=False)
parser.add_option("-a","--adapt",
help="Adapt the threshold of the flow according to parameters in parameter file",
action="store_true",
dest="adapt",
default=False)
parser.add_option("--predict",
help="Predict with trained flow",
action="store_true",
dest="predict",
default=False)
parser.add_option("--predict_offline",
help="Prediction using an offline flow for testing purposes",
action="store_true",
dest="predict_offline",
default=False)
parser.add_option("--all",
help="First train a flow according to parameters in parameter file and then do prediction using the trained flow",
action="store_true",
dest="all",
default=False)
parser.add_option("--remote",
help="Start remote control",
action="store_true",
dest="remote",
default=False)
parser.add_option("--record",
help="Just record data into the specified storage dir",
action="store_true",
dest="record",
default=False)
(parse_options, parse_args) = parser.parse_args()
return (parse_options, parse_args)
def read_parameter_file(parameter_file_name):
""" Reads and interprets the given parameter file """
# interpret parameter file
online_logger.info(parameter_file_name)
param_path = os.path.join(pySPACE.configuration.spec_dir, "live_settings", parameter_file_name)
stream = file(param_path, 'r')
online_logger.info( "Loading parameter file..")
parameters = yaml.load(stream)
online_logger.info( "Done.")
online_logger.debug(yaml.dump(parameters))
return parameters
def create_and_start_rpc_server(controller_instance, rpc_port=16254):
""" Creates and starts the server for the remote procedure calls """
# starting rpc server
rpc_server_ip = "localhost"
rpc_server_port = rpc_port
online_logger.info(str("Starting RPC server on port %d .." % rpc_server_port))
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = \
SimpleXMLRPCServer((rpc_server_ip, rpc_server_port), logRequests=False)
online_logger.info( "RPCServer listens on "+str(rpc_server_ip)+":"+str(rpc_server_port))
# register and start
server.register_instance(controller_instance)
server_process = multiprocessing.Process(target = server.serve_forever)
server_process.start()
return server_process
def create_backup(liveControl, options):
"""Create backup files"""
online_logger.info( "Creating backup...")
#path to be created
path = os.path.realpath(__file__)
dir_path = os.path.dirname(path)
newdir = dir_path + os.path.sep + "backup"
if not os.path.exists(newdir):
os.makedirs (newdir)
date_time = datetime.datetime.now()
path_datetime = newdir + os.path.sep + date_time.strftime("%Y%m%d_%H%M%S")
os.mkdir (path_datetime)
path_flow = path_datetime + os.path.sep + "flow_storage"
path_node_chain = path_datetime + os.path.sep + "node_chains"
path_windower = path_datetime + os.path.sep + "windower"
path_param = path_datetime + os.path.sep + "live_settings"
os.mkdir (path_flow)
os.mkdir (path_node_chain)
os.mkdir (path_windower)
os.mkdir (path_param)
import distutils.dir_util
distutils.dir_util.copy_tree(
liveControl.flow_persistency_directory, path_flow)
if os.path.isdir (path_flow):
online_logger.info( "flow storage backup successful!")
param_path = os.path.join(pySPACE.configuration.spec_dir, "live_settings", options.params)
if param_path == None:
return
distutils.file_util.copy_file(param_path, path_param)
if os.path.isdir (path_param):
online_logger.info( "parameters file backup successful!")
online_logger.info("Creating backup finished!")
if __name__ == "__main__":
(options,args) = parse_arguments()
server_process = None
if options.remote:
online_logger.info("Starting remote modus")
conf_file_name = options.configuration
conf = pySPACE.load_configuration(conf_file_name)
adrf = pySPACE.environments.live.communication.adrf_messenger.AdrfMessenger()
adrf.register()
# register the interface with ADRF
online_logger.info("Starting event loop")
while True:
online_logger.info("Check register status")
time.sleep(0.5)
while adrf.is_registered():
#online_logger.info("Get command")
command = adrf.adrf_receive_command()
if command[0] == 3: # 3 = C_CONFIGURE
online_logger.info( "received command: C_CONFIGURE")
online_logger.info( "Loading parameter file..")
online_logger.info( "Done")
adrf.set_state(5) # 5 = S_CONFIGURED
# starting controller
cfg = adrf.get_config()
online_logger.info( "Constructing Controller...")
liveControl = LiveController(cfg, adrf)
online_logger.info( "Constructing Controller finished")
if server_process == None:
online_logger.info("Starting XMLRPCServer.. ")
server_process = create_and_start_rpc_server(liveControl)
else :
online_logger.info(str("XMLRPCServer already running (%s)" % server_process))
elif command[0] == 4: # 4 = C_STARTAPP
online_logger.info( "received command: C_STARTAPP")
adrf.set_state(6) # 6 = S_RUNNING
cfg = adrf.get_config()
# mode can be defined in the configuration file, predict_offline as an example
if cfg["mode"] == 'prewindowing_offline':
liveControl.prewindowing(online=False)
create_backup(liveControl, options)
elif cfg["mode"] == 'prewindowing':
# first start eegclient
liveControl.prewindowing(online=True)
create_backup(liveControl, options)
elif cfg["mode"] == 'prewindowed_train':
liveControl.prewindowed_train()
create_backup(liveControl, options)
elif cfg["mode"] == 'train':
liveControl.train()
create_backup(liveControl, options)
elif cfg["mode"] == 'adapt':
liveControl.adapt_classification_threshold()
elif cfg["mode"] == 'predict':
liveControl.predict(online=True, remote=True)
elif cfg["mode"] == 'predict_offline':
liveControl.predict(online=False, remote=True)
elif cfg["mode"] == 'all':
liveControl.train()
create_backup(liveControl, options)
liveControl.predict(online=False, remote=True)
else :
online_logger.warn(str("mode \'%s\' was not recognized!" % cfg["mode"]))
elif command[0] == 5: # 5 = C_STOPAPP
online_logger.info( "received command: C_STOPAPP")
adrf.set_state(8) # 8 = S_STOPPED
if cfg["mode"] in ('prewindowing', 'prewindowing_offline'):
liveControl.stop_prewindowing()
elif cfg["mode"] in ('predict', 'predict_offline'):
liveControl.stop_prediction()
else:
pass
adrf.undo_registration()
elif options.all:
online_logger.info("Starting training and then predicting...")
param_file_name = options.params
parameters = read_parameter_file(param_file_name)
conf_file_name = options.configuration
if conf_file_name is not None:
conf = pySPACE.load_configuration(conf_file_name)
else:
conf = None
# starting controller
online_logger.info( "Constructing Controller...")
liveControl = LiveController(parameters)
online_logger.info( "Constructing Controller finished")
server_process = create_and_start_rpc_server(liveControl)
liveControl.prewindowing()
liveControl.prewindowed_train()
create_backup(liveControl, options)
server_process.terminate()
server_process.join()
liveControl.predict(online=False)
server_process.terminate()
server_process.join()
else:
pySPACE.load_configuration(options.configuration)
conf = pySPACE.configuration
param_file_name = options.params
parameters = read_parameter_file(param_file_name)
from pySPACE.environments.live import eeg_stream_manager, prediction, adaptation, communication, trainer
import pySPACE.environments.live.communication.log_messenger
# starting controller
online_logger.info( "Constructing Controller...")
liveControl = LiveController(parameters)
online_logger.info( "Constructing Controller finished")
server_process = create_and_start_rpc_server(liveControl)
# start main work....
if options.prewindowing:
# first start eegclient
liveControl.prewindowing(online=True)
create_backup(liveControl, options)
elif options.prewindowing_offline:
liveControl.prewindowing(online=False)
create_backup(liveControl, options)
elif options.prewindowed_train:
liveControl.prewindowed_train()
create_backup(liveControl, options)
elif options.train:
liveControl.train()
create_backup(liveControl, options)
elif options.adapt:
liveControl.adapt_classification_threshold()
create_backup(liveControl, options)
elif options.predict:
liveControl.predict(online=True)
elif options.predict_offline:
liveControl.predict(online=False)
elif options.record:
liveControl.record()
server_process.terminate()
server_process.join()
|
rovio.py
|
#!/usr/bin/env python
# rovio.py v0.0.1 alpha -- text-mode Rovio client for Linux
#
# a Rudforce Intragalactic endeavor
# http://www.rudforce.com/
#
# copyleft 2008 Del Rudolph
# see http://www.gnu.org/copyleft/gpl.html for latest license
# Set up the important stuff
# need to make this an interactive, first-run thing
# that gives option of saving to a config file
#
# theurl is in the form [IP_number:Port] (or just IP_Number if not using a port)
# this should be Rovio's LAN IP, running this over the internets would probably
# be too laggy to bother with
theurl = 'rovio.ip.number:port'
# an Admin user account on Rovio. Needs to be Admin for all functions to work!
username = 'adminUsername'
# said Admin user's password
password = 'adminPassword'
# nothing needs set below here, unless you wanna mess with it :)
################################################################
import curses, curses.wrapper
import urllib2
import time
import threading, Queue
import subprocess, os
# had to use DefaultRealm because Rovio uses different realm for Admin
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, theurl, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
# fix these so they're figured from updateStats?
# nope, haven't figured out how to tell if headlight is already on
light = '0'
# set up some globals
battList = [126,126,126,126,126]
emailok = 0
vlcon = 0
head = 1
# set up all the curses UI stuff
import curses
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
curses.curs_set(0)
mainwin = curses.newwin(21, 66, 0, 0)
mainwin.border()
mainwin.overwrite(stdscr)
msgwin = curses.newwin(7, 13, 0, 66)
msgwin.border()
msgwin.addstr(1, 1, " Rovio ", curses.A_REVERSE);
msgwin.overwrite(stdscr)
lightwin = curses.newwin(7, 13, 7, 66)
lightwin.border()
lightwin.addstr(1, 2, " _")
lightwin.addstr(2, 2, " / |")
lightwin.addstr(3, 2, "(> |")
lightwin.addstr(4, 2, " \_|")
lightwin.overwrite(stdscr)
headwin = curses.newwin(7, 13, 14, 66)
headwin.border()
headwin.addstr(4, 1, "<I______/")
headwin.addstr(5, 2, '(_)---`=[]')
headwin.overwrite(stdscr)
battwin = curses.newwin(3, 26, 21, 1)
battwin.border()
battwin.addstr(0, 1, "Battery")
battwin.overwrite(stdscr)
sswin = curses.newwin(3, 26, 21, 27)
sswin.border()
sswin.addstr(0, 1, "Signal")
sswin.overwrite(stdscr)
wifiwin = curses.newwin(3, 26, 21, 53)
wifiwin.border()
wifiwin.addstr(0, 1, "Wifi")
wifiwin.overwrite(stdscr)
# bunch o' functions from here
# found at http://code.activestate.com/recipes/65222/ and tweaked a bit
# using a thread, run a function every n seconds
class PeriodicExecutor(threading.Thread):
def __init__(self,sleep,func):
self.func = func
self.sleep = sleep
threading.Thread.__init__(self,name = "PeriodicExecutor")
self.setDaemon(1)
def run(self):
while 1:
time.sleep(self.sleep)
apply(self.func)
def spawnVlc():
global vlcon
if os.access("/usr/bin/vlc", os.X_OK) and vlcon == 0:
FNULL = open('/dev/null', 'w')
vlcon = subprocess.Popen(["/usr/bin/vlc", "rtsp://"+username+":"+password+"@"+theurl+"/webcam"], stderr=FNULL, stdout=FNULL).pid
def makeProgBar(width,min,max,val):
if val < min: val = min
if val > max: val = max
diff = float(val - min)
span = float(max - min)
# figure percent done
pDone = (diff / span) * 100.0
pDone = round(pDone)
pDone = int(pDone)
# Figure out how many # in percent done
numEq = (pDone / 100.0) * width
numEq = int(round(numEq))
# build bar with = and -
bar = '#'*numEq + '-'*(width-numEq)
# place percentage in center
pPlace = (len(bar) / 2) - len(str(pDone))
pString = str(pDone) + "%"
# slice the percentage into the bar
bar = bar[0:pPlace] + pString + bar[pPlace+len(pString):]
return str(bar)
def updateVid():
# get vid, print to screen
# expects ascii converter to be /usr/bin/jp2a
picUrl = "http://"+username+":"+password+"@"+theurl+"/Jpeg/CamImg1234.jpg"
output = subprocess.Popen(["/usr/bin/jp2a", "--width=64 --height=20", picUrl], stdout=subprocess.PIPE).communicate()[0]
imgtext = output.split("\n")
for i in range(19):
stdscr.addstr(i+1, 1, imgtext[i])
def returnConfirm(inText):
shq = curses.newwin(3, 30, 10, 25)
shq.border()
shq.addstr(0, 1, "Confirm", curses.A_REVERSE);
shq.addstr(1, 2, inText, curses.A_BOLD)
shq.addstr(2, 16, "'y' or 'n'")
c = shq.getch()
if c in (ord('y'), ord('Y')):
shq.clear()
shq.refresh()
return 1
else:
shq.clear()
shq.refresh()
return 0
def headPos(inpos):
global head
if inpos == head: return
data = ""
# call thing to lower head /rev.cgi?Cmd=nav&action=18&drive=[down=12,mid=13,up=11]
if inpos == '1' and head != '1':
data = "12"
head = "1"
if inpos == '2' and head != '2':
data = "13"
head = "2"
if inpos == '3' and head != '3':
data = "11"
head = "3"
SendRequest("Cmd=nav&action=18&drive="+data+"")
def setHome():
if returnConfirm("Overwrite Home position?") == 1:
SendRequest("Cmd=nav&action=14")
def Light():
global light
if light == "1":
light = "0"
for i in (2,3,4):
lightwin.addstr(i, 6, " ")
lightwin.refresh()
else:
light = "1"
for i in (2,3,4):
lightwin.addstr(i, 6, "===")
lightwin.refresh()
SendRequest("Cmd=nav&action=19&LIGHT="+light+"")
def updateStats():
# batt 100=dead in the water, <106 go home, 127 full
# wifi 0-254
# nav_ss 0-65535, <5000 no signal, >47000 strong signal
#Cmd = nav
#responses = 0|x=-1339|y=-5592|theta=-1.953|room=0|ss=8263
#|beacon=0|beacon_x=0|next_room=9|next_room_ss=38
#|state=0|resistance=0|sm=15|pp=0|flags=0005
#|brightness=6|resolution=3|video_compression=1|frame_rate=20
#|privilege=0|user_check=1|speaker_volume=15|mic_volume=17
#|wifi_ss=233|show_time=0|ddns_state=0|email_state=0
#|battery=126|charging=80|head_position=203|ac_freq=2
# makeProgBar(width,min,max,val)
stats = {}
statstr = SendRequest("Cmd=nav&action=1", 1)
statstr = statstr.replace("Cmd = nav\nresponses = 0|", '')
statstr = statstr.replace("\n", "")
for item in statstr.split('|'):
a,b = item.split('=')
stats[a] = b
global emailok
emailok = int(stats['email_state'])
i = 0
battavg = 0
del battList[0]
battList.append(int(stats['battery']))
for item in battList: i += item
battavg = i / 5
battstr = makeProgBar(24, 100, 127, battavg)
battwin.addstr(1, 1, battstr)
battwin.refresh()
if battavg < 108:
curses.flash()
msgwin.addstr(5, 1, "Low Battery", curses.A_STANDOUT)
msgwin.refresh()
ssstr = makeProgBar(24, 5000, 47000, int(stats['ss']))
sswin.addstr(1, 1, ssstr)
sswin.refresh()
wifistr = makeProgBar(24, 0, 254, int(stats['wifi_ss']))
wifiwin.addstr(1, 1, wifistr)
wifiwin.refresh()
# Going Home state doesn't seem to be used?
state = (' Roaming ', 'Going Home', ' Docking ', ' ', 'No Connection')
msgwin.addstr(3, 1, state[int(stats['state'])])
if int(stats['charging']) > 63: msgwin.addstr(3, 1, " Docked ")
if int(stats['charging']) >= 80: msgwin.addstr(3, 1, " Charging ")
msgwin.refresh()
hp = int(stats['head_position'])
if hp > 195 and hp < 205:
head = 1
headwin.addstr(1, 4, ' ')
headwin.addstr(2, 3, ' ')
headwin.addstr(3, 3, "_______")
headwin.refresh()
if hp > 130 and hp < 150:
head = 2
headwin.addstr(1, 1, ' ___')
headwin.addstr(2, 1, ' `--.\\')
headwin.addstr(3, 1, ' ____\\\\_')
headwin.refresh()
if hp > 60 and hp < 70:
head = 3
headwin.addstr(1, 1, " '\\")
headwin.addstr(2, 1, ' \\\\')
headwin.addstr(3, 3, '____\\\\_')
headwin.refresh()
def emailImage():
if emailok == 1:
# send email
SendRequest("SendMail")
msgwin.addstr(3,1, " Photo Sent")
msgwin.refresh()
# need to clean this up, make it more flexible
def SendRequest(indata, myreturn=0):
if indata == 'SendMail':
thefile = '/SendMail.cgi'
thedata = ""
else:
thefile = "/rev.cgi"
thedata = indata
req = urllib2.Request("http://"+theurl+thefile, thedata)
handle = urllib2.urlopen(req)
if myreturn == 1:
return handle.read()
def ShowHelp():
shq = curses.newwin(16, 45, 3, 8)
shq.border()
shq.addstr(0, 1, "Help", curses.A_REVERSE);
shq.addstr(1, 1, "[w] Forward [s] Backward")
shq.addstr(2, 1, "[a] Strafe Left [d] Strafe Right")
shq.addstr(3, 1, "[q] Rotate Left [e] Rotate Right")
shq.addstr(4, 1, "[l] Light [1,2,3] Head Position")
shq.addstr(5, 1, "[Arrow Keys] Directional")
shq.addstr(7, 1, "[h] Go Home [H] Set Home")
shq.addstr(9, 1, "[Spacebar] Stop ") # [p] Preferences goes here
shq.addstr(10, 1, "[i] Email Image [v] Start VLC")
shq.addstr(12, 1, "[x] Quit [/ or ?] Show Help")
shq.addstr(14, 1, " Press any key to continue")
c = shq.getch()
# just adds a drive command to the queue
# command is in the form "drive_command:speed"
def Drive(command):
if q.qsize() < 1:
q.put(command)
else:
return
def doUpdate():
updateVid()
updateStats()
# this runs in seperate thread
# handles drive commands
def worker():
tRevUrl = "http://"+theurl+"/rev.cgi"
while True:
item = q.get()
fields = item.split(':')
mydata = "Cmd=nav&action=18&drive="+fields[0]+"&speed="+fields[1]+""
myreq = urllib2.Request(tRevUrl, mydata)
urllib2.urlopen(myreq)
time.sleep(0.15)
# this runs in seperate thread
# handles periodic refresh of stats and video
def timerThread():
pe = PeriodicExecutor(1, doUpdate)
pe.run()
q = Queue.Queue()
t = threading.Thread(target=worker)
t.setDaemon(1)
t.start()
t2 = threading.Thread(target=timerThread)
t2.setDaemon(1)
t2.start()
# now run the actual program loop
while 1:
stdscr.timeout(500)
c = stdscr.getch()
if c in (ord('/'), ord('?')): ShowHelp()
elif c in (ord('x'), ord('X')):
if returnConfirm("Exit the program?") == 1: break
#elif c == curses.KEY_UP: Drive("1:1")
elif c in (ord('w'), curses.KEY_UP): Drive("1:1")
#elif c == curses.KEY_DOWN: Drive("2:1")
elif c in (ord('s'), curses.KEY_DOWN): Drive("2:1")
#elif c == curses.KEY_LEFT: Drive("3:1")
elif c in (ord('a'), curses.KEY_LEFT): Drive("3:1")
#elif c == curses.KEY_RIGHT: Drive("4:1")
elif c in (ord('d'), curses.KEY_RIGHT): Drive("4:1")
elif c == ord('q'): Drive("5:5")
elif c == ord('e'): Drive("6:5")
elif c == ord('l'): Light()
elif c == ord('h'): SendRequest("Cmd=nav&action=13")
elif c == ord('H'): setHome()
elif c == ord(' '): SendRequest("Cmd=nav&action=17")
elif c == ord('1'): headPos('1')
elif c == ord('2'): headPos('2')
elif c == ord('3'): headPos('3')
elif c in (ord('v'), ord('V')): spawnVlc()
elif c in (ord('i'), ord('I')): emailImage()
#elif c in (ord('p'), ord('P')): # will eventually run preferences window
# if we get here the program is ending
curses.nocbreak(); stdscr.keypad(0); curses.echo()
curses.endwin()
|
kinect.py
|
import ctypes
from turtle import distance
import _ctypes
import pygame
import pygame.freetype
import sys
import time
import math
import threading
from queue import Empty, Queue
from dataclasses import dataclass
from pykinect2 import PyKinectV2, PyKinectRuntime
from pykinect2.PyKinectV2 import *
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QApplication
if sys.hexversion >= 0x03000000:
import _thread as thread
else:
import thread
# colors for drawing different bodies
SKELETON_COLORS = [pygame.color.THECOLORS["red"],
pygame.color.THECOLORS["blue"],
pygame.color.THECOLORS["green"],
pygame.color.THECOLORS["orange"],
pygame.color.THECOLORS["purple"],
pygame.color.THECOLORS["yellow"],
pygame.color.THECOLORS["violet"]]
class BodyGameRuntime(QObject):
swipe_signal = pyqtSignal(str, str)
hand_gesture_signal = pyqtSignal(str, str)
get_status_signal = pyqtSignal(dict)
request_status_signal = pyqtSignal(dict)
def __init__(self, fps, hand_data_queue):
super().__init__()
pygame.init()
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Set the width and height of the screen [width, height]
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1),
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect for Windows v2 Body Game")
# Loop until the user clicks the close button.
self._done = False
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
# back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
# here we will store skeleton data
self._bodies = None
self.status = {}
self.right_hand = GestureDetector(200, 900)
self.left_hand = GestureDetector(200, 900)
self.left_hand_gesture = None
self.right_hand_gesture = None
self.fps = fps
self.hand_data_queue = hand_data_queue
def timed_call(self, callback, calls_per_second):
time_time = time.time
start = time_time()
period = 1.0 / calls_per_second
while not self._done:
if (time_time() - start) > period:
start += period
callback()
self._kinect.close()
pygame.quit()
def draw_body_bone(self, joints, jointPoints, color, joint0, joint1):
joint0State = joints[joint0].TrackingState;
joint1State = joints[joint1].TrackingState;
# both joints are not tracked
if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked):
return
# both joints are not *really* tracked
if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred):
return
# ok, at least one is good
start = (jointPoints[joint0].x, jointPoints[joint0].y)
end = (jointPoints[joint1].x, jointPoints[joint1].y)
try:
pygame.draw.line(self._frame_surface, color, start, end, 8)
except: # need to catch it due to possible invalid positions (with inf)
pass
def draw_body(self, joints, jointPoints, color):
# Torso
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_SpineMid);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft);
# Right Arm
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderRight, PyKinectV2.JointType_ElbowRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_WristRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandRight, PyKinectV2.JointType_HandTipRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ThumbRight);
# Left Arm
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderLeft, PyKinectV2.JointType_ElbowLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft);
# Right Leg
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight);
# Left Leg
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft);
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft);
def draw_color_frame(self, frame, target_surface):
target_surface.lock()
address = self._kinect.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame.ctypes.data, frame.size)
del address
target_surface.unlock()
def run(self):
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
self._screen = pygame.display.set_mode(event.dict['size'],
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
# --- Getting frames and drawing
# --- Woohoo! We've got a color frame! Let's fill out back buffer surface with frame's data
if self._kinect.has_new_color_frame():
frame = self._kinect.get_last_color_frame()
self.draw_color_frame(frame, self._frame_surface)
frame = None
# --- Cool! We have a body frame, so can get skeletons
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
# --- draw skeletons to _frame_surface
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
# convert joint coordinates to color space
joint_points = self._kinect.body_joints_to_color_space(joints)
self.draw_body(joints, joint_points, SKELETON_COLORS[i])
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
self.detect_gestures(joint_points, body.hand_left_state, body.hand_right_state)
break
# --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio
# --- (screen size may be different from Kinect's color frame size)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface, (self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to self.fps frames per second
self._clock.tick(self.fps)
def set_disabled_area(self, area):
self.right_hand.disabled_area = area
self.left_hand.disabled_area = area
def detect_gestures(self, joint_points, hand_left_state, hand_right_state):
#print((joint_points[PyKinectV2.JointType_HandRight].x - joint_points[PyKinectV2.JointType_SpineShoulder].x))
self.right_hand.update_series(joint_points[PyKinectV2.JointType_HandRight].x, joint_points[PyKinectV2.JointType_HandRight].y, joint_points[PyKinectV2.JointType_SpineShoulder].x, hand_right_state)
self.status["right"] = self.right_hand.update_status()
if self.status["right"]["swipe"]:
self.swipe_signal.emit("right", self.status["right"]["swipe"])
if not None in self.status['right']['hand_gesture']:
gesture = self.status['right']['hand_gesture'][0]
if not gesture == self.right_hand_gesture:
self.hand_gesture_signal.emit("right", gesture)
self.right_hand_gesture = gesture
self.left_hand.update_series(joint_points[PyKinectV2.JointType_HandLeft].x, joint_points[PyKinectV2.JointType_HandLeft].y, joint_points[PyKinectV2.JointType_SpineShoulder].x, hand_left_state)
self.status["left"] = self.left_hand.update_status()
if self.status["left"]["swipe"]:
self.swipe_signal.emit("left", self.status["left"]["swipe"])
if not None in self.status['left']['hand_gesture']:
gesture = self.status['left']['hand_gesture'][0]
if not gesture == self.left_hand_gesture:
self.hand_gesture_signal.emit("left", gesture)
self.left_hand_gesture = gesture
if self.hand_data_queue.full():
try:
self.hand_data_queue.get_nowait()
except Empty:
pass
self.hand_data_queue.put(self.status)
@dataclass
class BodyPoint:
x: int = 0
y: int = 0
central: int = 0
status: int = 0
class GestureDetector:
def __init__(self, y_min, y_max) -> None:
self.disabled_area = None
self.point_series = [None] * 30
self.dead_time = 30
self.calibration = [y_min, y_max]
self.hand_status = {
"percentage": 0,
"swipe" : None,
"hand_gesture" : [None, None],
"hand_area" : 0
}
def update_series(self, x_hand, y_hand, x_central, status):
self.point_series.insert(0, BodyPoint(x_hand, y_hand, x_central, status))
self.point_series.pop()
self.dead_time = self.dead_time - 1
def update_status(self):
self.hand_status["swipe"] = self.swipe_action(self.point_series)
self.hand_status["percentage"] = self.current_percentage(self.point_series[0].y)
tmp = self.hand_status["hand_gesture"][0]
self.hand_status["hand_gesture"][0] = self.hand_gesture(self.point_series)
if not (tmp == self.hand_status["hand_gesture"][0]):
self.hand_status["hand_gesture"][1] = tmp
self.hand_status["hand_area"] = self.hand_in_area(self.point_series[0])
return self.hand_status
def swipe_action(self, point_series):
if not None in point_series:
x_movement = point_series[0].x - point_series[-10].x
y_movement = abs(point_series[0].y - point_series[-10].y)
if y_movement < 100 and self.dead_time <= 0:
self.dead_time = 30
if x_movement < -350:
if self.check_straight_swipe(point_series[0:10]):
#print(f"swipe left detected nr. {x_movement}, {y_movement}")
return "left"
if x_movement > 350:
if self.check_straight_swipe(point_series[0:10]):
#print(f"swipe right detected nr. {x_movement}, {y_movement}")
#print(f"x1, x2 {point_series[0].x}, {point_series[-10].x}")
#print(f"y1, y2 {point_series[0].y}, {point_series[-10].y}")
return "right"
return None
def check_straight_swipe(self, point_series):
x1 = point_series[0].x
y1 = point_series[0].y
x2 = point_series[-1].x
y2 = point_series[-1].y
m = (y2 - y1)/(x2-x1)
n = y1 - m*x1
for point in point_series[1:9]:
y = m*point.x +n
if y/point.y < 0.85 or y/point.y > 1.15:
print("not in a straight line")
return False
return True
def hand_gesture(self, point_series):
if not None in point_series:
#valid_area = self.hand_in_area(point_series)
valid_area = True
if valid_area:
any_check = []
for point in point_series:
any_check.append(point.status)
if any_check.count(2) >= len(any_check)-5:
return "open"
if any_check.count(3) >= len(any_check)-5:
return "closed"
if any_check.count(4) >= len(any_check)-5:
return "thumbs"
return 'undefined'
def hand_in_area(self, point):
if point is not None:
if not (point.y > self.calibration[0] and point.y < self.calibration[1]):
return None
distance = abs(point.x - point.central)
if self.disabled_area == None:
if (distance <= 250):
return 0
if (distance > 250 and distance <= 450):
return 1
if (distance > 450):
return 2
else:
if self.disabled_area == 0:
if (distance <= 350):
return 1
else:
return 2
if self.disabled_area == 1:
if (distance <= 350):
return 0
else:
return 2
if self.disabled_area == 2:
if (distance <= 350):
return 0
else:
return 1
return None
def current_percentage(self, value):
percentage = self.convert_to_percent(value)
if percentage > 100:
return 100
elif percentage < 0:
return 0
return percentage
def convert_to_percent(self, value):
percentage = round(100/(self.calibration[1]-self.calibration[0]) * (self.calibration[1] - value),1)
if math.isinf(percentage):
return 0
return percentage
if __name__ == "__main__":
def kinect_thread_runner(request_status):
game = BodyGameRuntime(20, request_status);
game.swipe_signal.connect(swipe_detected)
game.hand_gesture_signal.connect(hand_gesture_detected)
while not game._done:
game.run()
def get_hand_data():
try:
return hand_data.get_nowait()
except Empty as e:
return None
@pyqtSlot(str, str)
def swipe_detected(side, direction):
print(f"{side} Hand: {direction} swipe")
pass
@pyqtSlot(str, str)
def hand_gesture_detected(side, gesture):
print(f"{side} Hand: {gesture} detected")
data = get_hand_data()
if not data:
return
data = data[side]
if data["hand_gesture"][0] == "closed":
if not data["hand_gesture"][1] == "closed":
area = data["hand_area"]
print(area)
"""
hand_data dictionary contains dictionary with hand data:
keys: left, right --> contains dictionary to corresponding hand
containing dictionary:
keys: swipe --> if swipe has been detected (better use emitted signal) - str: left, right
percentage --> where y_pos of hand is from 0-100% - int: 0-100
hand_gesture --> last detected hand gesture - str: open, closed, thumbs
"""
hand_data = Queue(maxsize=1)
kinect_thread = threading.Thread(target=kinect_thread_runner, args=(hand_data,))
kinect_thread.setDaemon(False)
kinect_thread.start()
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs fuzzer for trial."""
from collections import namedtuple
import importlib
import json
import os
import posixpath
import shlex
import shutil
import subprocess
import sys
import tarfile
import threading
import time
import zipfile
from common import environment
from common import experiment_utils
from common import filesystem
from common import filestore_utils
from common import fuzzer_utils
from common import fuzzer_stats
from common import logs
from common import new_process
from common import retry
from common import utils
NUM_RETRIES = 3
RETRY_DELAY = 3
FUZZ_TARGET_DIR = '/out'
# This is an optimization to sync corpora only when it is needed. These files
# are temporary files generated during fuzzer runtime and are not related to
# the actual corpora.
EXCLUDE_PATHS = set([
# AFL excludes.
'.cur_input',
'.state',
'fuzz_bitmap',
'fuzzer_stats',
'plot_data',
# QSYM excludes.
'bitmap',
])
CORPUS_ELEMENT_BYTES_LIMIT = 1 * 1024 * 1024
SEED_CORPUS_ARCHIVE_SUFFIX = '_seed_corpus.zip'
File = namedtuple('File', ['path', 'modified_time', 'change_time'])
fuzzer_errored_out = False # pylint:disable=invalid-name
def _clean_seed_corpus(seed_corpus_dir):
"""Prepares |seed_corpus_dir| for the trial. This ensures that it can be
used by AFL which is picky about the seed corpus. Moves seed corpus files
from sub-directories into the corpus directory root. Also, deletes any files
that exceed the 1 MB limit. If the NO_SEEDS env var is specified than the
seed corpus files are deleted."""
if not os.path.exists(seed_corpus_dir):
return
if environment.get('NO_SEEDS'):
logs.info('NO_SEEDS specified, deleting seed corpus files.')
shutil.rmtree(seed_corpus_dir)
os.mkdir(seed_corpus_dir)
return
failed_to_move_files = []
for root, _, files in os.walk(seed_corpus_dir):
for filename in files:
file_path = os.path.join(root, filename)
if os.path.getsize(file_path) > CORPUS_ELEMENT_BYTES_LIMIT:
os.remove(file_path)
logs.warning('Removed seed file %s as it exceeds 1 Mb limit.',
file_path)
continue
sha1sum = utils.file_hash(file_path)
new_file_path = os.path.join(seed_corpus_dir, sha1sum)
try:
shutil.move(file_path, new_file_path)
except OSError:
failed_to_move_files.append((file_path, new_file_path))
if failed_to_move_files:
logs.error('Failed to move seed corpus files: %s', failed_to_move_files)
def get_clusterfuzz_seed_corpus_path(fuzz_target_path):
"""Returns the path of the clusterfuzz seed corpus archive if one exists.
Otherwise returns None."""
fuzz_target_without_extension = os.path.splitext(fuzz_target_path)[0]
seed_corpus_path = (fuzz_target_without_extension +
SEED_CORPUS_ARCHIVE_SUFFIX)
return seed_corpus_path if os.path.exists(seed_corpus_path) else None
def _unpack_clusterfuzz_seed_corpus(fuzz_target_path, corpus_directory):
"""If a clusterfuzz seed corpus archive is available, unpack it into the
corpus directory if it exists. Copied from unpack_seed_corpus in
engine_common.py in ClusterFuzz.
"""
oss_fuzz_corpus = environment.get('OSS_FUZZ_CORPUS')
if oss_fuzz_corpus:
benchmark = environment.get('BENCHMARK')
corpus_archive_filename = f'{benchmark}.zip'
oss_fuzz_corpus_archive_path = posixpath.join(
experiment_utils.get_oss_fuzz_corpora_filestore_path(),
corpus_archive_filename)
seed_corpus_archive_path = posixpath.join(FUZZ_TARGET_DIR,
corpus_archive_filename)
filestore_utils.cp(oss_fuzz_corpus_archive_path,
seed_corpus_archive_path)
else:
seed_corpus_archive_path = get_clusterfuzz_seed_corpus_path(
fuzz_target_path)
if not seed_corpus_archive_path:
return
with zipfile.ZipFile(seed_corpus_archive_path) as zip_file:
# Unpack seed corpus recursively into the root of the main corpus
# directory.
idx = 0
for seed_corpus_file in zip_file.infolist():
if seed_corpus_file.filename.endswith('/'):
# Ignore directories.
continue
# Allow callers to opt-out of unpacking large files.
if seed_corpus_file.file_size > CORPUS_ELEMENT_BYTES_LIMIT:
continue
output_filename = '%016d' % idx
output_file_path = os.path.join(corpus_directory, output_filename)
zip_file.extract(seed_corpus_file, output_file_path)
idx += 1
logs.info('Unarchived %d files from seed corpus %s.', idx,
seed_corpus_archive_path)
def run_fuzzer(max_total_time, log_filename):
"""Runs the fuzzer using its script. Logs stdout and stderr of the fuzzer
script to |log_filename| if provided."""
input_corpus = environment.get('SEED_CORPUS_DIR')
output_corpus = environment.get('OUTPUT_CORPUS_DIR')
fuzz_target_name = environment.get('FUZZ_TARGET')
target_binary = fuzzer_utils.get_fuzz_target_binary(FUZZ_TARGET_DIR,
fuzz_target_name)
if not target_binary:
logs.error('Fuzz target binary not found.')
return
_unpack_clusterfuzz_seed_corpus(target_binary, input_corpus)
_clean_seed_corpus(input_corpus)
if max_total_time is None:
logs.warning('max_total_time is None. Fuzzing indefinitely.')
runner_niceness = environment.get('RUNNER_NICENESS', 0)
try:
# Because the runner is launched at a higher priority,
# set it back to the default(0) for fuzzing processes.
command = [
'nice', '-n',
str(0 - runner_niceness), 'python3', '-u', '-c',
('from fuzzers.{fuzzer} import fuzzer; '
'fuzzer.fuzz('
"'{input_corpus}', '{output_corpus}', '{target_binary}')").format(
fuzzer=environment.get('FUZZER'),
input_corpus=shlex.quote(input_corpus),
output_corpus=shlex.quote(output_corpus),
target_binary=shlex.quote(target_binary))
]
# Write output to stdout if user is fuzzing from command line.
# Otherwise, write output to the log file.
if environment.get('FUZZ_OUTSIDE_EXPERIMENT'):
new_process.execute(command,
timeout=max_total_time,
write_to_stdout=True,
kill_children=True)
else:
with open(log_filename, 'wb') as log_file:
new_process.execute(command,
timeout=max_total_time,
output_file=log_file,
kill_children=True)
except subprocess.CalledProcessError:
global fuzzer_errored_out # pylint:disable=invalid-name
fuzzer_errored_out = True
logs.error('Fuzz process returned nonzero.')
class TrialRunner: # pylint: disable=too-many-instance-attributes
"""Class for running a trial."""
def __init__(self):
self.fuzzer = environment.get('FUZZER')
if not environment.get('FUZZ_OUTSIDE_EXPERIMENT'):
benchmark = environment.get('BENCHMARK')
trial_id = environment.get('TRIAL_ID')
self.gcs_sync_dir = experiment_utils.get_trial_bucket_dir(
self.fuzzer, benchmark, trial_id)
filestore_utils.rm(self.gcs_sync_dir, force=True, parallel=True)
else:
self.gcs_sync_dir = None
self.cycle = 1
self.corpus_dir = 'corpus'
self.corpus_archives_dir = 'corpus-archives'
self.results_dir = 'results'
self.unchanged_cycles_path = os.path.join(self.results_dir,
'unchanged-cycles')
self.log_file = os.path.join(self.results_dir, 'fuzzer-log.txt')
self.last_sync_time = None
self.corpus_dir_contents = set()
def initialize_directories(self):
"""Initialize directories needed for the trial."""
directories = [
self.corpus_dir,
self.corpus_archives_dir,
self.results_dir,
]
for directory in directories:
filesystem.recreate_directory(directory)
def conduct_trial(self):
"""Conduct the benchmarking trial."""
self.initialize_directories()
logs.info('Starting trial.')
max_total_time = environment.get('MAX_TOTAL_TIME')
args = (max_total_time, self.log_file)
fuzz_thread = threading.Thread(target=run_fuzzer, args=args)
fuzz_thread.start()
if environment.get('FUZZ_OUTSIDE_EXPERIMENT'):
# Hack so that the fuzz_thread has some time to fail if something is
# wrong. Without this we will sleep for a long time before checking
# if the fuzz thread is alive.
time.sleep(5)
while fuzz_thread.is_alive():
self.sleep_until_next_sync()
self.do_sync()
self.cycle += 1
logs.info('Doing final sync.')
self.do_sync(final_sync=True)
fuzz_thread.join()
def sleep_until_next_sync(self):
"""Sleep until it is time to do the next sync."""
if self.last_sync_time is not None:
next_sync_time = (self.last_sync_time +
experiment_utils.get_snapshot_seconds())
sleep_time = next_sync_time - time.time()
if sleep_time < 0:
# Log error if a sync has taken longer than
# get_snapshot_seconds() and messed up our time
# synchronization.
logs.warning('Sleep time on cycle %d is %d', self.cycle,
sleep_time)
sleep_time = 0
else:
sleep_time = experiment_utils.get_snapshot_seconds()
logs.debug('Sleeping for %d seconds.', sleep_time)
time.sleep(sleep_time)
# last_sync_time is recorded before the sync so that each sync happens
# roughly get_snapshot_seconds() after each other.
self.last_sync_time = time.time()
def _set_corpus_dir_contents(self):
"""Set |self.corpus_dir_contents| to the current contents of
|self.corpus_dir|. Don't include files or directories excluded by
|EXCLUDE_PATHS|."""
self.corpus_dir_contents = set()
corpus_dir = os.path.abspath(self.corpus_dir)
for root, _, files in os.walk(corpus_dir):
# Check if root is excluded.
relpath = os.path.relpath(root, corpus_dir)
if _is_path_excluded(relpath):
continue
for filename in files:
# Check if filename is excluded first.
if _is_path_excluded(filename):
continue
file_path = os.path.join(root, filename)
stat_info = os.stat(file_path)
last_modified_time = stat_info.st_mtime
# Warning: ctime means creation time on Win and may not work as
# expected.
last_changed_time = stat_info.st_ctime
file_tuple = File(file_path, last_modified_time,
last_changed_time)
self.corpus_dir_contents.add(file_tuple)
def is_corpus_dir_same(self):
"""Sets |self.corpus_dir_contents| to the current contents and returns
True if it is the same as the previous contents."""
logs.debug('Checking if corpus dir is the same.')
prev_contents = self.corpus_dir_contents.copy()
self._set_corpus_dir_contents()
return prev_contents == self.corpus_dir_contents
def do_sync(self, final_sync=False):
"""Save corpus archives and results to GCS."""
try:
if not final_sync and self.is_corpus_dir_same():
logs.debug('Cycle: %d unchanged.', self.cycle)
filesystem.append(self.unchanged_cycles_path, str(self.cycle))
else:
logs.debug('Cycle: %d changed.', self.cycle)
self.archive_and_save_corpus()
self.record_stats()
self.save_results()
logs.debug('Finished sync.')
except Exception: # pylint: disable=broad-except
logs.error('Failed to sync cycle: %d.', self.cycle)
def record_stats(self):
"""Use fuzzer.get_stats if it is offered, validate the stats and then
save them to a file so that they will be synced to the filestore."""
# TODO(metzman): Make this more resilient so we don't wait forever and
# so that breakages in stats parsing doesn't break runner.
fuzzer_module = get_fuzzer_module(self.fuzzer)
fuzzer_module_get_stats = getattr(fuzzer_module, 'get_stats', None)
if fuzzer_module_get_stats is None:
# Stats support is optional.
return
try:
output_corpus = environment.get('OUTPUT_CORPUS_DIR')
stats_json_str = fuzzer_module_get_stats(output_corpus,
self.log_file)
except Exception: # pylint: disable=broad-except
logs.error('Call to %d failed.', fuzzer_module_get_stats)
return
try:
fuzzer_stats.validate_fuzzer_stats(stats_json_str)
except (ValueError, json.decoder.JSONDecodeError):
logs.error('Stats are invalid.')
return
stats_filename = experiment_utils.get_stats_filename(self.cycle)
stats_path = os.path.join(self.results_dir, stats_filename)
with open(stats_path, 'w') as stats_file_handle:
stats_file_handle.write(stats_json_str)
def archive_corpus(self):
"""Archive this cycle's corpus."""
archive = os.path.join(
self.corpus_archives_dir,
experiment_utils.get_corpus_archive_name(self.cycle))
directories = [self.corpus_dir]
if self.cycle == 1:
# Some fuzzers like eclipser and LibFuzzer don't actually copy the
# seed/input corpus to the output corpus (which AFL does do), this
# results in their coverage being undercounted.
seed_corpus = environment.get('SEED_CORPUS_DIR')
directories.append(seed_corpus)
archive_directories(directories, archive)
return archive
def save_corpus_archive(self, archive):
"""Save corpus |archive| to GCS and delete when done."""
if not self.gcs_sync_dir:
return
basename = os.path.basename(archive)
gcs_path = posixpath.join(self.gcs_sync_dir, self.corpus_dir, basename)
# Don't use parallel to avoid stability issues.
filestore_utils.cp(archive, gcs_path)
# Delete corpus archive so disk doesn't fill up.
os.remove(archive)
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'experiment.runner.TrialRunner.archive_and_save_corpus')
def archive_and_save_corpus(self):
"""Archive and save the current corpus to GCS."""
archive = self.archive_corpus()
self.save_corpus_archive(archive)
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'experiment.runner.TrialRunner.save_results')
def save_results(self):
"""Save the results directory to GCS."""
if not self.gcs_sync_dir:
return
# Copy results directory before rsyncing it so that we don't get an
# exception from uploading a file that changes in size. Files can change
# in size because the log file containing the fuzzer's output is in this
# directory and can be written to by the fuzzer at any time.
results_copy = filesystem.make_dir_copy(self.results_dir)
filestore_utils.rsync(
results_copy, posixpath.join(self.gcs_sync_dir, self.results_dir))
def get_fuzzer_module(fuzzer):
"""Returns the fuzzer.py module for |fuzzer|. We made this function so that
we can mock the module because importing modules makes hard to undo changes
to the python process."""
fuzzer_module_name = 'fuzzers.{fuzzer}.fuzzer'.format(fuzzer=fuzzer)
fuzzer_module = importlib.import_module(fuzzer_module_name)
return fuzzer_module
def archive_directories(directories, archive_path):
"""Create a tar.gz file named |archive_path| containing the contents of each
directory in |directories|."""
with tarfile.open(archive_path, 'w:gz') as tar:
for directory in directories:
tar_directory(directory, tar)
def tar_directory(directory, tar):
"""Add the contents of |directory| to |tar|. Note that this should not
exception just because files and directories are being deleted from
|directory| while this function is being executed."""
directory = os.path.abspath(directory)
directory_name = os.path.basename(directory)
for root, _, files in os.walk(directory):
for filename in files:
file_path = os.path.join(root, filename)
arcname = os.path.join(directory_name,
os.path.relpath(file_path, directory))
try:
tar.add(file_path, arcname=arcname)
except (FileNotFoundError, OSError):
# We will get these errors if files or directories are being
# deleted from |directory| as we archive it. Don't bother
# rescanning the directory, new files will be archived in the
# next sync.
pass
except Exception: # pylint: disable=broad-except
logs.error('Unexpected exception occurred when archiving.')
def _is_path_excluded(path):
"""Is any part of |path| in |EXCLUDE_PATHS|."""
path_parts = path.split(os.sep)
for part in path_parts:
if not part:
continue
if part in EXCLUDE_PATHS:
return True
return False
def experiment_main():
"""Do a trial as part of an experiment."""
logs.info('Doing trial as part of experiment.')
try:
runner = TrialRunner()
runner.conduct_trial()
except Exception as error: # pylint: disable=broad-except
logs.error('Error doing trial.')
raise error
def main():
"""Do an experiment on a development machine or on a GCP runner instance."""
logs.initialize(
default_extras={
'benchmark': environment.get('BENCHMARK'),
'component': 'runner',
'fuzzer': environment.get('FUZZER'),
'trial_id': str(environment.get('TRIAL_ID')),
})
experiment_main()
if fuzzer_errored_out:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
anime.py
|
from .stream import Stream
from .cacher import Cacher
from .events import EventEmitter
from .handling import Result, Status
from typing import Union
from threading import Thread #, Semaphore, Lock
# from queue import Queue
import os, time
class Anime(EventEmitter):
def __init__(self, name: str):
EventEmitter.__init__(self, ['spawn', 'download_start',
'download_end', 'receive'])
with Cacher() as cache:
if not (descriptor := cache.get_anime_by_name(name)):
raise NameError("Anime '%s' is not registered in the database" % name)
else:
self.descriptor = descriptor
def spawn(self, episodes: Union[int, list[int]], path: str = os.curdir,
name_tmp: str = "%(name)s-Episode-%(ep)i-1080p.mp4",
chsize: int = None, block: bool = False) -> list[Thread]:
if isinstance(episodes, int):
episodes = [episodes]
chsize = chsize or 1024 << 2
threads = []
for ep in episodes:
thread = Thread(target=self.download, args=(ep, path, name_tmp, chsize))
thread.start()
self.emit('spawn', thread)
threads.append(thread)
time.sleep(0.5)
if block:
list(map(Thread.join, threads))
return Result(Status.OK, threads, "Anime(s) dwnloaded suceessfilly")
def download(self, ep: int, directory: str, template: str, chsize: int):
stream = Stream(self.descriptor, ep, chsize)
path = '%s/%s' % (directory,
template % dict(name=self.descriptor.name, ep=ep))
stream.on('download',
lambda result: self.emit('download_start', stream, result))
stream.on('end',
lambda result: self.emit('download_end', stream, result))
stream.on('data', lambda data: self.emit('receive', stream, data))
file = open(path, 'ab+' if os.path.isfile(path) else 'wb+')
if result := stream.download(file):
result.value.close()
else:
print("Failed downloading '%s' episode %i" % (stream._anime.name,
stream._ep))
def __len__(self):
return self.descriptor.episode_count
def __repr__(self):
return repr(self.descriptor)
def __str__(self):
return self.descriptor.name
|
control.py
|
import asyncio
import functools
import os
import json
import logging
from aiohttp import web
from threading import Thread
from opentrons import instruments
from opentrons.config import pipette_config
from opentrons.trackers import pose_tracker
from opentrons.config import feature_flags as ff
from opentrons.types import Mount, Point
from opentrons.hardware_control.types import Axis, CriticalPoint
log = logging.getLogger(__name__)
def _motion_lock(func):
@functools.wraps(func)
async def decorated(request):
async with request.app['com.opentrons.motion_lock']:
return await func(request)
return decorated
def hw_from_req(req):
""" Utility function to get the hardware resource from requests """
return req.app['com.opentrons.hardware']
async def get_attached_pipettes(request):
"""
Query robot for model strings on 'left' and 'right' mounts, and return a
dict with the results keyed by mount. By default, this endpoint provides
cached values, which will not interrupt a running session. WARNING: if the
caller supplies the "refresh=true" query parameter, this method will
interrupt a sequence of Smoothie operations that are in progress, such as a
protocol run.
Example:
```
{
'left': {
'model': 'p300_single_v1',
'name': 'p300_single',
'tip_length': 51.7,
'mount_axis': 'z',
'plunger_axis': 'b',
'id': '<pipette id string>'
},
'right': {
'model': 'p10_multi_v1',
'name': 'p10_multi',
'tip_length': 40,
'mount_axis': 'a',
'plunger_axis': 'c',
'id': '<pipette id string>'
}
}
```
If a pipette is "uncommissioned" (e.g.: does not have a model string
written to on-board memory), or if no pipette is present, the corresponding
mount will report `'model': null`
"""
hw = hw_from_req(request)
if request.url.query.get('refresh') == 'true':
if ff.use_protocol_api_v2():
await hw.cache_instruments()
else:
hw.cache_instrument_models()
response = {}
if ff.use_protocol_api_v2():
attached = await hw.get_attached_pipettes()
else:
attached = hw.get_attached_pipettes()
for mount, data in attached.items():
response[mount] = {
'model': data['model'],
'name': data['name'],
'mount_axis': str(data['mount_axis']).lower(),
'plunger_axis': str(data['plunger_axis']).lower(),
'id': data['id']
}
if 'tip_length' in data:
response[mount]['tip_length'] = data.get('tip_length', 0)
return web.json_response(response, status=200)
async def get_attached_modules(request):
"""
On success (including an empty "modules" list if no modules are detected):
# status: 200
{
"modules": [
{
# machine readable identifying name of module
"name": "string",
# human-presentable name of module
"displayName": "string",
# module system port pat
"port": "string",
# unique serial number
"serial": "string",
# model identifier (i.e. part number)
"model": "string",
# current firmware version
"fwVersion": "string",
# human readable status
"status": "string",
# live module dat
"data": "dict",
},
// ...
],
}
On failure:
# status: 500
{
"message": "..."
}
"""
hw = hw_from_req(request)
if ff.use_protocol_api_v2():
hw_mods = await hw.discover_modules()
module_data = [
{
'name': mod.name(),
'displayName': mod.display_name(),
'port': mod.port,
'serial': mod.device_info.get('serial'),
'model': mod.device_info.get('model'),
'fwVersion': mod.device_info.get('version'),
**mod.live_data
}
for mod in hw_mods
]
else:
hw.discover_modules()
hw_mods = hw.attached_modules.values()
module_data = [
{
'name': mod.name(),
'displayName': mod.display_name(),
'port': mod.port,
'serial': mod.device_info and mod.device_info.get('serial'),
'model': mod.device_info and mod.device_info.get('model'),
'fwVersion': mod.device_info
and mod.device_info.get('version'),
**mod.live_data
}
for mod in hw_mods
]
return web.json_response(data={"modules": module_data},
status=200)
async def get_module_data(request):
"""
Query a module (by its serial number) for its live data
"""
hw = hw_from_req(request)
requested_serial = request.match_info['serial']
res = None
if ff.use_protocol_api_v2():
hw_mods = await hw.discover_modules()
else:
hw_mods = hw.attached_modules.values()
for module in hw_mods:
is_serial_match = module.device_info.get('serial') == requested_serial
if is_serial_match and hasattr(module, 'live_data'):
res = module.live_data
if res:
return web.json_response(res, status=200)
else:
return web.json_response({"message": "Module not found"}, status=404)
async def execute_module_command(request):
"""
Execute a command on a given module by its serial number
"""
hw = hw_from_req(request)
requested_serial = request.match_info['serial']
data = await request.json()
command_type = data.get('command_type')
args = data.get('args')
if ff.use_protocol_api_v2():
hw_mods = await hw.discover_modules()
else:
hw_mods = hw.attached_modules.values()
if len(hw_mods) == 0:
return web.json_response({"message": "No connected modules"},
status=404)
matching_mod = next((mod for mod in hw_mods if
mod.device_info.get('serial') == requested_serial),
None)
if not matching_mod:
return web.json_response({"message": "Specified module not found"},
status=404)
if hasattr(matching_mod, command_type):
clean_args = args or []
method = getattr(matching_mod, command_type)
if asyncio.iscoroutinefunction(method):
val = await method(*clean_args)
else:
val = method(*clean_args)
return web.json_response(
{'message': 'Success', 'returnValue': val},
status=200)
else:
return web.json_response(
{'message': f'Module does not have command: {command_type}'},
status=400)
async def get_engaged_axes(request):
"""
Query driver for engaged state by axis. Response keys will be axes XYZABC
and keys will be True for engaged and False for disengaged. Axes must be
manually disengaged, and are automatically re-engaged whenever a "move" or
"home" command is called on that axis.
Response shape example:
{"x": {"enabled": true}, "y": {"enabled": false}, ...}
"""
hw = hw_from_req(request)
if ff.use_protocol_api_v2():
engaged = await hw.engaged_axes
else:
engaged = hw.engaged_axes
return web.json_response(
{str(k).lower(): {'enabled': v}
for k, v in engaged.items()})
async def disengage_axes(request):
"""
Disengage axes (turn off power) primarily in order to reduce heat
consumption.
:param request: Must contain an "axes" field with a list of axes
to disengage (["x", "y", "z", "a", "b", "c"])
:return: message and status code
"""
hw = hw_from_req(request)
data = await request.text()
axes = json.loads(data).get('axes')
invalid_axes = [ax for ax in axes if ax.lower() not in 'xyzabc']
if invalid_axes:
message = "Invalid axes: {}".format(', '.join(invalid_axes))
status = 400
else:
await hw.disengage_axes([ax.upper() for ax in axes])
message = "Disengaged axes: {}".format(', '.join(axes))
status = 200
return web.json_response({"message": message}, status=status)
async def position_info(request):
"""
Positions determined experimentally by issuing move commands. Change
pipette position offsets the mount to the left or right such that a user
can easily access the pipette mount screws with a screwdriver. Attach tip
position places either pipette roughly in the front-center of the deck area
"""
return web.json_response({
'positions': {
'change_pipette': {
'target': 'mount',
'left': (325, 40, 30),
'right': (65, 40, 30)
},
'attach_tip': {
'target': 'pipette',
'point': (200, 90, 150)
}
}
})
def _validate_move_data(data):
error = False
message = ''
target = data.get('target')
if target not in ['mount', 'pipette']:
message = "Invalid target key: '{}' (target must be one of " \
"'mount' or 'pipette'".format(target)
error = True
point = data.get('point')
if type(point) == list:
point = tuple(point)
if type(point) is not tuple:
message = "Point must be an ordered iterable. Got: {}".format(
type(point))
error = True
if point is not None and len(point) != 3:
message = "Point must have 3 values--got {}".format(point)
error = True
if target == 'mount' and float(point[2]) < 30:
message = "Sending a mount to a z position lower than 30 can cause " \
"a collision with the deck or reach the end of the Z axis " \
"movement screw. Z values for mount movement must be >= 30"
error = True
mount = data.get('mount')
if mount not in ['left', 'right']:
message = "Mount '{}' not supported, must be 'left' or " \
"'right'".format(mount)
error = True
if target == 'pipette':
model = data.get('model')
if model not in pipette_config.config_models:
message = "Model '{}' not recognized, must be one " \
"of {}".format(model, pipette_config.config_models)
error = True
else:
model = None
return target, point, mount, model, message, error
@_motion_lock
async def move(request):
"""
Moves the robot to the specified position as provided by the `control.info`
endpoint response
Post body must include the following keys:
- 'target': either 'mount' or 'pipette'
- 'point': a tuple of 3 floats for x, y, z
- 'mount': must be 'left' or 'right'
If 'target' is 'pipette', body must also contain:
- 'model': must be a valid pipette model (as defined in `pipette_config`)
"""
hw = hw_from_req(request)
req = await request.text()
data = json.loads(req)
target, point, mount, model, message, error = _validate_move_data(data)
if error:
status = 400
else:
status = 200
if ff.use_protocol_api_v2():
await hw.cache_instruments()
if target == 'mount':
critical_point = CriticalPoint.MOUNT
else:
critical_point = None
mount = Mount[mount.upper()]
target = Point(*point)
await hw.home_z()
pos = await hw.gantry_position(mount, critical_point)
await hw.move_to(mount, target._replace(z=pos.z),
critical_point=critical_point)
await hw.move_to(mount, target,
critical_point=critical_point)
pos = await hw.gantry_position(mount)
message = 'Move complete. New position: {}'.format(pos)
else:
if target == 'mount':
message = _move_mount(hw, mount, point)
elif target == 'pipette':
message = _move_pipette(hw, mount, model, point)
return web.json_response({"message": message}, status=status)
def _move_pipette(robot, mount, model, point):
pipette, _ = _fetch_or_create_pipette(robot, mount, model)
pipette.move_to((robot.deck, point), strategy='arc')
new_position = tuple(
pose_tracker.absolute(pipette.robot.poses, pipette))
return "Move complete. New position: {}".format(new_position)
def _fetch_or_create_pipette(robot, mount, model=None):
existing_pipettes = robot.get_instruments()
pipette = None
should_remove = True
for existing_mount, existing_pipette in existing_pipettes:
if existing_mount == mount:
pipette = existing_pipette
should_remove = False
if pipette is None:
if model is None:
pipette = instruments.Pipette(
mount=mount, max_volume=1000, ul_per_mm=1000)
else:
config = pipette_config.load(model)
pipette = instruments._create_pipette_from_config(
config=config,
mount=mount,
name=model)
return pipette, should_remove
def _move_mount(robot, mount, point):
"""
The carriage moves the mount in the Z axis, and the gantry moves in X and Y
Mount movements do not have the same protections calculated in to an
existing `move` command like Pipette does, so the safest thing is to home
the Z axis, then move in X and Y, then move down to the specified Z height
"""
carriage = robot._actuators[mount]['carriage']
# Home both carriages, to prevent collisions and to ensure that the other
# mount doesn't block the one being moved (mount moves are primarily for
# changing pipettes, so we don't want the other pipette blocking access)
robot.poses = carriage.home(robot.poses)
other_mount = 'left' if mount == 'right' else 'right'
robot.poses = robot._actuators[other_mount]['carriage'].home(robot.poses)
robot.gantry.move(
robot.poses, x=point[0], y=point[1])
robot.poses = carriage.move(
robot.poses, z=point[2])
# These x and y values are hard to interpret because of some internals of
# pose tracker. It's mostly z that matters for this operation anyway
x, y, _ = tuple(
pose_tracker.absolute(
robot.poses, robot._actuators[mount]['carriage']))
_, _, z = tuple(
pose_tracker.absolute(
robot.poses, robot.gantry))
new_position = (x, y, z)
return "Move complete. New position: {}".format(new_position)
@_motion_lock
async def home(request):
"""
This initializes a call to pipette.home() which, as a side effect will:
1. Check the pipette is actually connected (will throw an error if you
try to home a non-connected pipette)
2. Re-engages the motor
:param request: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'target': Can be, 'robot' or 'pipette'
'mount': 'left' or 'right', only used if target is pipette
}
:return: A success or non-success message.
"""
hw = hw_from_req(request)
req = await request.text()
data = json.loads(req)
target = data.get('target')
if target == 'robot':
if ff.use_protocol_api_v2():
await hw.home()
else:
hw.home()
status = 200
message = "Homing robot."
elif target == 'pipette':
mount = data.get('mount')
if mount in ['left', 'right']:
if ff.use_protocol_api_v2():
await hw.home([Axis.by_mount(Mount[mount.upper()])])
await hw.home_plunger(Mount[mount.upper()])
status = 200
message = 'Pipette on {} homed successfuly'.format(mount)
else:
pipette, should_remove = _fetch_or_create_pipette(hw,
mount)
pipette.home()
if should_remove:
hw.remove_instrument(mount)
status = 200
message = "Pipette on {} homed successfully.".format(mount)
else:
status = 400
message = "Expected 'left' or 'right' as values for mount" \
"got {} instead.".format(mount)
else:
status = 400
message = "Expected 'robot' or 'pipette' got {}.".format(target)
return web.json_response({"message": message}, status=status)
async def identify(request):
hw = hw_from_req(request)
blink_time = int(request.query.get('seconds', '10'))
if ff.use_protocol_api_v2():
asyncio.ensure_future(hw.identify(blink_time))
else:
Thread(target=lambda: hw.identify(blink_time)).start()
return web.json_response({"message": "identifying"})
async def get_rail_lights(request):
hw = hw_from_req(request)
if ff.use_protocol_api_v2():
on = await hw.get_lights()
else:
on = hw.get_lights()
return web.json_response({'on': on['rails']})
async def set_rail_lights(request):
hw = hw_from_req(request)
data = await request.json()
on = data.get('on')
if on is None:
return web.json_response(
{'message': '"on" must be true or false, got {}'.format(on)},
status=400)
if ff.use_protocol_api_v2():
await hw.set_lights(rails=on)
else:
hw.set_lights(rails=on)
return web.json_response({'on': on})
async def take_picture(request):
filename = './picture.jpg'
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
cmd = 'ffmpeg -f video4linux2 -s 640x480 -i /dev/video0 -ss 0:0:1 -frames 1' # NOQA
proc = await asyncio.create_subprocess_shell(
'{} {}'.format(cmd, filename),
stdout=asyncio.subprocess.PIPE,
loop=request.loop)
res = await proc.stdout.read()
res = res.decode().strip()
await proc.wait()
# TODO (andy - 2018-04-23) find better way of ensuring picture was taken
# TODO and properly saved by ffmpeg
if 'video:' in res and 'audio:' in res and 'subtitle:' in res:
return web.json_response({'message': res}, status=500)
if not os.path.exists(filename):
return web.json_response({'message': 'picture not saved'}, status=500)
return web.FileResponse(filename)
|
server.py
|
# -*- coding: utf-8 -*-
"""TLS server
"""
import os
import socket
import ssl
import time
from threading import Thread
from pyssldemo.peer import Peer
class Server(Peer):
def __init__(self, context=None, port=0):
super(Server, self).__init__(context)
self.context.sni_callback = self.sni_callback
self.port = port
self.s_socket = None # Server-side SSL socket
self.c_socket = None # Accepted SSL socket
self.server_name = None # Selected SNI server name
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def sni_callback(self, socket, server_name, context):
"""
It just records the indicated server name,
but DOESN'T verify the certificate chain on this server name.
"""
self.log(f'Indicated server name: {server_name}')
self.server_name = server_name
def get_session(self):
return self.s_socket.session
def is_session_resumed(self):
return self.s_socket.session_reused
def get_app_protocol(self):
return self.s_socket.selected_alpn_protocol()
def start(self):
self.s_socket = self.context.wrap_socket(
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM),
server_side=True)
self.s_socket.bind(('127.0.0.1', self.port))
self.s_socket.listen()
real_port = self.s_socket.getsockname()[1]
self.log(f'Listening {real_port}')
_port_log = self.get_port_log_path()
with open(_port_log, 'w') as f:
f.write(f'{real_port}')
self.log(f'Generated port log: {os.path.abspath(_port_log)}')
def accept(self):
self.log('Accepting connection ...')
while True:
try:
self.c_socket, _addr = self.s_socket.accept()
self.log(f'Client address: {_addr}')
Thread(target=self.connect(self.c_socket)).start()
except Exception:
self.log('Server was stopped')
break
def connect(self, c_socket):
self.log(f'Negotiated protocol: {c_socket.version()}')
self.log(f'Negotiated cipher suite: {c_socket.cipher()}')
with c_socket:
request = c_socket.recv(1024)
self.log(f'Request: {request}')
c_socket.sendall(b'Client said: ' + request)
self.log('Send response')
def close(self):
self.s_socket.close()
_port_log = self.get_port_log_path()
if os.path.isfile(_port_log):
os.remove(_port_log)
self.log(f'Removed port log: {os.path.abspath(_port_log)}')
self.log('Closed')
def get_log_path(self):
return 'server.log'
def get_port_log_path(self):
return 'port.log'
def get_port(self):
"""
Read port from the local port log file.
If the file is unavailable, the caller would be blocked.
"""
_port_log = self.get_port_log_path()
# Wait for port is ready
while not os.path.isfile(_port_log):
self.log('Waiting for port ...')
time.sleep(1)
with open(_port_log, 'r') as f:
return int(f.readline())
class ServerThread(Thread):
def __init__(self, server):
Thread.__init__(self)
self.server = server
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.close()
def run(self):
self.server.start()
self.server.accept()
if __name__ == '__main__':
print(ssl.OPENSSL_VERSION)
try:
with Server(port=65443) as _server:
_server.start()
_server.accept()
except KeyboardInterrupt:
print('Server exited')
|
worker.py
|
import threading
tasks = []
MAX_TASKS = 1
def worker(task):
task.run()
def spawn_task(task):
while (len(tasks) >= MAX_TASKS):
tasks.pop().join()
t = threading.Thread(target=worker, args=[task])
t.start()
tasks.append(t)
def wait_tasks():
while (len(tasks) > 0):
tasks.pop().join()
|
game_modes.py
|
# For python 3 compatible
from __future__ import division, absolute_import, print_function
try: input = raw_input
except: pass
from getch import getch
import game_structure as gs
from musictools import (play_progression, random_progression,
random_key, isvalidnote, resolve_with_chords, chordname,
random_chord, easy_play, play_wait)
import settings as st
# External Dependencies
import time, random, sys
from copy import copy
from collections import OrderedDict
from multiprocessing import Process
from mingus.midi import fluidsynth # requires FluidSynth is installed
from mingus.core import progressions, intervals, chords as ch
import mingus.core.notes as notes
from mingus.containers import NoteContainer, Note, Bar
# Decorators
def repeat_question(func):
def func_wrapper(*args, **kwargs):
st.NEWQUESTION = False
return func(*args, **kwargs)
return func_wrapper
def new_question(func):
def func_wrapper(*args, **kwargs):
st.NEWQUESTION = True
return func(*args, **kwargs)
return func_wrapper
# Menu Command Actions
@repeat_question
def play_cadence():
play_progression(st.CADENCE, st.KEY, Iup=st.I, bpm=st.BPM)
play_wait()
# time.sleep(2 * st.DELAY)
# @repeat_question
# def set_delay():
# st.DELAY = float(input("Enter the desired delay time (in seconds): "))
@repeat_question
def set_bpm():
st.BPM = float(input("Enter the desired BPM: "))
@new_question
def toggle_triads7ths():
if st.I == "I7":
st.I, st.II, st.III, st.IV, st.V, st.VI, st.VII = \
"I", "II", "III", "IV", "V", "VI", "VII"
else:
st.I, st.II, st.III, st.IV, st.V, st.VI, st.VII = \
"I7", "II7", "III7", "IV7", "V7", "VI7", "VII7"
st.NUMERALS = st.I, st.II, st.III, st.IV, st.V, st.VI, st.VII
@new_question
def set_key(reset_score=True):
mes = ("Enter the desired key, use upper-case for major "
"and lower-case for minor (e.g. C or c).\n"
"Enter R/r for a random major/minor key.")
newkey = input(mes)
keys = ['A', 'Bb', 'B', 'C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab']
if newkey == 'R':
st.KEY = random.choice(keys)
elif newkey == 'r':
st.KEY = random.choice(keys).lower()
elif notes.is_valid_note(newkey):
st.KEY = newkey
else:
print("Input key not understood, key unchanged.")
st.CURRENT_MODE.intro()
if reset_score:
st.COUNT = 0
st.SCORE = 0
@repeat_question
def toggle_many_octaves():
st.MANY_OCTAVES = not st.MANY_OCTAVES
print("MANY_OCTAVE : {}".format(st.MANY_OCTAVES))
@repeat_question
def arpeggiate(invert=False, descending=False, chord=None, bpm=None,
durations=None):
if not bpm:
bpm = st.BPM
# if not delay:
# delay = st.DELAY/2
if chord:
pass
elif st.CURRENT_MODE.name in ['single_chord', 'chord_tone']:
chord = st.CURRENT_Q_INFO["chord"]
elif st.CURRENT_MODE.name in ['interval']:
chord = st.CURRENT_Q_INFO["interval"]
else:
print("Arpeggiation not available in {} mode.".format(st.CURRENT_MODE))
return
arpeggiation = [x for x in chord]
if invert:
arpeggiation = [arpeggiation[i] for i in invert]
elif descending:
arpeggiation.reverse()
# Play
easy_play(arpeggiation, durations, bpm)
play_wait() # play wait
# bar = Bar()
# if not durations:
# durations = [4]*len(arpeggiation)
# for x, d in zip(arpeggiation, durations):
# bar.place_notes(x, d)
# fluidsynth.play_Bar(bar, bpm=bpm)
# for x in arpeggiation:
# fluidsynth.play_Note(x)
# time.sleep(delay)
def change_mode_settings(mode):
if mode == "interval":
interval_modes = \
["triads", "sevenths", "ascending", "descending", "mixed"]
mes = "Enter:\n"
mes += "\n".join(["{} for {}".format(k, m)
for k, m in enumerate(interval_modes)])
user_response = getch(mes)
st.INTERVAL_MODE = interval_modes[user_response]
else:
pass
def change_game_mode(new_mode):
@new_question
def _change_mode():
st.COUNT = 0
st.SCORE = 0
if new_mode == st.CURRENT_MODE.name:
change_mode_settings(new_mode)
st.CURRENT_MODE = game_modes[new_mode]
return _change_mode
@repeat_question
def play_question_again():
return
@repeat_question
def toggle_alt_chord_tone_res():
st.ALTERNATIVE_CHORD_TONE_RESOLUTION = \
(st.ALTERNATIVE_CHORD_TONE_RESOLUTION + 1) % 3
print("Switching to chord tone resolution "
"option {}".format(st.ALTERNATIVE_CHORD_TONE_RESOLUTION))
menu_commands = [
gs.MenuCommand("v", "hear the cadence",
play_cadence),
gs.MenuCommand("w", "change the BPM",
set_bpm),
gs.MenuCommand("s", "toggle between hearing triads and hearing seventh "
"chords",
toggle_triads7ths),
gs.MenuCommand("k", "change the key",
set_key),
gs.MenuCommand("o", "toggle between using one octave or many",
toggle_many_octaves),
gs.MenuCommand("m", "to arpeggiate chord (not available in progression "
"mode)",
arpeggiate),
gs.MenuCommand("p", "switch to random progression mode (experimental)",
change_game_mode('progression')),
gs.MenuCommand("n", "switch to interval mode",
change_game_mode('interval')),
gs.MenuCommand("t", "switch to chord tone mode",
change_game_mode('chord_tone')),
gs.MenuCommand("h", "switch to single chord mode",
change_game_mode('single_chord')),
gs.MenuCommand("i", "toggle between chord tone resolutions",
toggle_alt_chord_tone_res),
gs.MenuCommand("x", "quit",
sys.exit),
gs.MenuCommand("", "hear the chord or progression again",
play_question_again,
input_description="Press Enter"),
]
menu_commands = OrderedDict([(mc.command, mc) for mc in menu_commands])
# Game Mode Intro Functions
def intro(play_cadence=True):
print("\n" + "~" * 20 + "\n")
# List menu_commands
print("Note: At any time enter")
for mc in menu_commands.values():
print(mc.input_description, "to", mc.description)
print("\n" + "-" * 10 + "\n")
# Display key
if st.KEY == st.KEY.lower():
print("KEY:", st.KEY.upper(), "min")
else:
print("KEY:", st.KEY, "Maj")
print("-" * 10)
# Play cadence
if play_cadence:
play_progression(st.CADENCE, st.KEY, Iup=st.I)
play_wait()
# time.sleep(st.DELAY)
# time.sleep(st.DELAY)
return
###############################################################################
### Interval ##################################################################
###############################################################################
@new_question
def eval_interval_name(user_answer, interval, diatonic):
semitone_distance = int(interval[1]) - int(interval[0])
names = ['8', '2b', '2', '3b', '3', '4', '5b', '5', '6b', '6', '7b', '7']
correct_answer = names[semitone_distance % 12]
user_answer = user_answer.strip()
print("Your answer: ", user_answer)
print("Correct Answer:", correct_answer)
note_nums = [diatonic.note2num(x) for x in interval]
print("Interval Notes:", " ".join([str(x) for x in note_nums]))
if user_answer == correct_answer:
st.SCORE += 1
print("Good Job!")
print()
else:
print("It's ok, you'll get 'em next time.")
print()
play_wait()
@new_question
def eval_interval(ans, interval, diatonic):
try:
int(ans)
answers = [x for x in ans]
except:
answers = ans.split(" ")
def parse_answer(ans):
try:
return int(ans) % 8
except:
try:
note_name = ans[0].upper() + ans[1:] # capitalize for mingus
return diatonic.note2num(Note(ans))
except:
return "Err"
user_answers = [parse_answer(ans) for ans in answers]
correct_answers = [diatonic.note2num(x) for x in interval]
if len(answers) < len(interval):
print("too few answers")
if len(answers) > len(interval):
print("too many answers")
print("Your answer: ", " ".join([str(x) for x in user_answers]))
print("Correct Answer:", " ".join([str(x) for x in correct_answers]))
###debug (safe to delete)
try:
semitone_distance = int(interval[1]) - int(interval[0])
except:
print()
print(interval)
print()
raise
###end of debug (safe to delete)
semitone_distance = int(interval[1]) - int(interval[0])
names = ['8', '2b', '2','3b', '3', '4', '5b', '5', '6b', '6', '7b', '7']
print("Interval:", names[semitone_distance % 12])
if all([x == y for x, y in zip(user_answers, correct_answers)]):
st.SCORE += 1
print("Good Job!")
print()
else:
print("It's ok, you'll get 'em next time.")
print()
play_wait()
def new_question_interval():
if st.NEWQUESTION:
if st.COUNT:
print("score: {} / {} = {:.2%}"
"".format(st.SCORE, st.COUNT, st.SCORE/st.COUNT))
st.COUNT += 1
# Pick Ioctave
if st.MANY_OCTAVES:
Ioctave = random.choice(st.OCTAVES)
else:
Ioctave = st.DEFAULT_IOCTAVE
from musictools import Diatonic
diatonic = Diatonic(key=st.KEY, Ioctave=Ioctave)
# pick first note
if st.FIXED_ROOT:
first_note = diatonic.notes[st.FIXED_ROOT - 1]
else:
first_note = random.choice(diatonic.notes)
# pick second note
if st.INTERVAL_MODE == 'triads':
number = random.choice([3, 5, 8])
interval = diatonic.interval(number, root=first_note,
ascending=True)
elif st.INTERVAL_MODE == 'sevenths':
number = random.choice([3, 5, 7, 8])
interval = diatonic.interval(number, root=first_note,
ascending=True)
elif st.INTERVAL_MODE == 'ascending':
number = random.choice(st.INTERVALS)
interval = diatonic.interval(number, root=first_note,
ascending=True)
elif st.INTERVAL_MODE == 'descending': # redundant for harmonic intrvls
number = random.choice(st.INTERVALS)
interval = diatonic.interval(number, root=first_note,
ascending=False)
elif st.INTERVAL_MODE == 'mixed': # redundant for harmonic intervals
number = random.choice(st.INTERVALS)
interval = diatonic.interval(number, root=first_note,
ascending=bool(random.choice([0, 1])))
else:
raise Exception("Can't understand. st.INTERVAL_MODE = {}"
"".format(st.INTERVAL_MODE))
# change Unison intervals to P8 intervals
if len(interval) == 1:
P8 = copy(interval[0])
P8.octave += 1
interval = NoteContainer([interval[0], P8])
# store question info
st.CURRENT_Q_INFO = {'interval': interval,
'Ioctave': Ioctave,
'diatonic': diatonic}
else:
interval = st.CURRENT_Q_INFO['interval']
Ioctave = st.CURRENT_Q_INFO['Ioctave']
diatonic = st.CURRENT_Q_INFO['diatonic']
# Play interval
if st.HARMONIC_INTERVALS:
easy_play(interval)
else:
easy_play([x for x in interval])
# Request user's answer
ans = input("Enter 1-7 or note names separated by spaces: ").strip()
if ans in menu_commands:
menu_commands[ans].action()
else:
if st.NAME_INTERVAL:
eval_interval_name(ans, interval, diatonic)
else:
eval_interval(ans, interval, diatonic)
return
###############################################################################
### single chord ##############################################################
###############################################################################
@new_question
def eval_single_chord(usr_ans, correct_numeral, root_note):
correct_ = False
if usr_ans == str(st.NUMERALS.index(correct_numeral) + 1):
correct_ = True
else:
try:
usr_note_val = int(Note(usr_ans[0].upper() + usr_ans[1:])) % 12
correct_note_val = int(Note(root_note)) % 12
if usr_note_val == correct_note_val:
correct_ = True
except:
pass
return correct_
def new_question_single_chord():
# Choose new chord+octave/Progression
# Single chord mode
if st.NEWQUESTION:
if st.COUNT:
print("score: {} / {} = {:.2%}".format(st.SCORE, st.COUNT,
st.SCORE/st.COUNT))
st.COUNT += 1
# Pick random chord/octave
numeral, chord, Ioctave = random_chord()
# store question info
st.CURRENT_Q_INFO = {'numeral': numeral,
'chord': chord,
'Ioctave': Ioctave}
else:
numeral = st.CURRENT_Q_INFO['numeral']
chord = st.CURRENT_Q_INFO['chord']
Ioctave = st.CURRENT_Q_INFO['Ioctave']
# Play chord
play_progression([numeral], st.KEY, Ioctave=Ioctave)
# Request user's answer
ans = getch("Enter 1-7 or root of chord: ").strip()
if ans in menu_commands:
menu_commands[ans].action()
else:
if isvalidnote(ans):
if eval_single_chord(ans, numeral, chord[0].name):
st.SCORE += 1
print("Yes!", chordname(chord, numeral))
if st.RESOLVE_WHEN_CORRECT:
resolve_with_chords(numeral, key=st.KEY, Ioctave=Ioctave,
numerals=st.NUMERALS, bpm=st.BPM*2)
play_wait()
else:
print("No!", chordname(chord, numeral))
if st.RESOLVE_WHEN_INCORRECT:
resolve_with_chords(numeral, key=st.KEY, Ioctave=Ioctave,
numerals=st.NUMERALS, bpm=st.BPM*2)
play_wait()
else:
print("User input not understood. Please try again.")
return
###############################################################################
### progession ################################################################
###############################################################################
@new_question
def eval_progression(ans, prog, prog_strums):
try:
int(ans)
answers = [x for x in ans]
except:
answers = ans.split(" ")
answers_correct = []
for i, answer in enumerate(answers):
try:
correct_numeral = prog[i]
tmp = progressions.to_chords([correct_numeral], st.KEY)[0]
root = NoteContainer(tmp)[0].name
user_correct = eval_single_chord(answer, correct_numeral, root)
print(user_correct)
answers_correct.append(user_correct)
except IndexError:
print("too many answers")
if len(answers) < len(prog):
print("too few answers")
print("Progression:", " ".join(prog_strums))
print("Your answer: ", " ".join(answers))
print("Correct Answer:", " ".join(
[str(st.NUMERALS.index(x) + 1) for x in prog]))
if all(answers_correct):
st.SCORE += 1
print("Good Job!")
print()
else:
print("It's ok, you'll get 'em next time.")
print()
# time.sleep(st.DELAY)
play_wait()
def new_question_progression():
if st.NEWQUESTION:
if st.COUNT:
print("score: {} / {} = {:.2%}".format(st.SCORE, st.COUNT,
st.SCORE/st.COUNT))
st.COUNT += 1
# Find random chord progression
prog_length = random.choice(st.PROG_LENGTHS)
prog, prog_strums = random_progression(prog_length, st.NUMERALS,
st.CHORD_LENGTHS)
# store question info
st.CURRENT_Q_INFO = {'prog': prog,
'prog_strums': prog_strums}
else:
prog = st.CURRENT_Q_INFO['prog']
prog_strums = st.CURRENT_Q_INFO['prog_strums']
# Play chord/progression
play_progression(prog_strums, st.KEY)
# Request user's answer
ans = input("Enter your answer using root note names "
"or numbers 1-7 seperated by spaces: ").strip()
if ans in menu_commands:
menu_commands[ans].action()
else:
eval_progression(ans, prog, prog_strums)
# # Request user's answer
# ans = input("Enter your answer using root note names "
# "or numbers 1-7 seperated by spaces: ").strip()
# if ans in menu_commands:
# menu_commands[ans].action()
# else:
# eval_progression(ans, prog, prog_strums)
# @new_question
# def eval_chord_tone(ans, chord, tone):
# tone_idx = [n for n in chord].index(tone)
# correct_ans = st.TONES[tone_idx]
# return ans == correct_ans
###############################################################################
### chord tone ################################################################
###############################################################################
def resolve_chord_tone(chord, tone, Ioctave):
# play_progression([numeral], st.KEY, Ioctave=Ioctave)
if st.ALTERNATIVE_CHORD_TONE_RESOLUTION == 1:
fluidsynth.play_NoteContainer(chord)
play_wait()
fluidsynth.play_Note(tone)
play_wait()
root = chord[0]
interval = NoteContainer([root, tone])
fluidsynth.play_NoteContainer(interval)
elif st.ALTERNATIVE_CHORD_TONE_RESOLUTION == 2:
fluidsynth.play_NoteContainer(chord)
play_wait()
tone_idx = [x for x in chord].index(tone)
if tone_idx == 0:
arpeggiate()
elif tone_idx == 1:
arpeggiate(invert=[1, 0, 2])
elif tone_idx == 2:
arpeggiate(descending=True)
else:
raise Exception("This chord tone resolutions mode is only "
"implemented for triads.")
# fluidsynth.play_Note(Iup_note)
# Iup_note = Note(st.KEY)
# Iup_note.octave += 1
# fluidsynth.play_Note(Iup_note)
else:
fluidsynth.play_NoteContainer(chord)
play_wait()
fluidsynth.play_Note(tone)
play_wait()
arpeggiate() # sets NEWQUESTION = False
def new_question_chord_tone():
if st.NEWQUESTION:
if st.COUNT:
print("score: {} / {} = {:.2%}".format(st.SCORE, st.COUNT,
st.SCORE/st.COUNT))
st.COUNT += 1
# Pick random chord/octave
numeral, chord, Ioctave = random_chord()
# Pick a random tone in the chord
tone = random.choice(chord)
# store question info
st.CURRENT_Q_INFO = {'numeral': numeral,
'chord': chord,
'Ioctave': Ioctave,
'tone': tone}
else:
numeral = st.CURRENT_Q_INFO['numeral']
chord = st.CURRENT_Q_INFO['chord']
Ioctave = st.CURRENT_Q_INFO['Ioctave']
tone = st.CURRENT_Q_INFO['tone']
# Play chord, then tone
def playfcn():
play_progression([numeral], st.KEY, Ioctave=Ioctave)
play_wait()
fluidsynth.play_Note(tone)
p = Process(target=playfcn())
p.start()
# Request user's answer
mes = ("Which tone did you hear?\n""Enter {}, or {}: ".format(
", ".join([str(t) for t in st.TONES[:-1]]),
st.TONES[-1]))
ans = getch(mes).strip()
p.terminate()
if ans in menu_commands:
menu_commands[ans].action()
else:
try:
ans = int(ans)
except:
print("User input not understood. Please try again.")
st.NEWQUESTION = False
if ans in st.TONES:
tone_idx = [n for n in chord].index(tone)
correct_ans = st.TONES[tone_idx]
if ans == correct_ans:
st.SCORE += 1
print("Yes! The {} tone of".format(correct_ans),
chordname(chord, numeral))
if st.ARPEGGIATE_WHEN_CORRECT:
resolve_chord_tone(chord, tone, Ioctave)
play_wait()
st.NEWQUESTION = True
else:
print("No! The {} tone of".format(correct_ans),
chordname(chord, numeral))
if st.ARPEGGIATE_WHEN_INCORRECT:
resolve_chord_tone(chord, tone, Ioctave)
play_wait()
st.NEWQUESTION = True
# secret option
elif ans in [8, 9, 0]:
tone_idx = [8, 9, 0].index(ans)
for num in st.NUMERALS:
tmp = progressions.to_chords([num], st.KEY)[0]
num_chord = NoteContainer(tmp)
play_progression([num], st.KEY, Ioctave=Ioctave)
play_wait()
fluidsynth.play_Note(num_chord[tone_idx])
play_wait()
play_wait()
st.NEWQUESTION = False
else:
print("User input not understood. Please try again.")
st.NEWQUESTION = False
return
###############################################################################
### game moodes ###############################################################
###############################################################################
game_modes = {
'single_chord': gs.GameMode('single_chord',
intro,
new_question_single_chord
),
'progression': gs.GameMode('progression',
intro,
new_question_progression
),
'chord_tone': gs.GameMode('chord_tone',
lambda: intro(play_cadence=False),
new_question_chord_tone
),
'interval': gs.GameMode('interval',
intro,
new_question_interval
),
}
|
semaphore_test.py
|
import threading
import time
from parameterized import parameterized
from hazelcast import HazelcastClient
from hazelcast.errors import (
DistributedObjectDestroyedError,
IllegalStateError,
)
from tests.integration.backward_compatible.proxy.cp import CPTestCase
from tests.util import get_current_timestamp, random_string
SEMAPHORE_TYPES = [
"sessionless",
"sessionaware",
]
class SemaphoreTest(CPTestCase):
def setUp(self):
self.semaphore = None
def tearDown(self):
if self.semaphore:
self.semaphore.destroy()
@parameterized.expand(SEMAPHORE_TYPES)
def test_semaphore_in_another_group(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 1)
another_semaphore = self.client.cp_subsystem.get_semaphore(
semaphore._proxy_name + "@another"
).blocking()
self.assertEqual(1, semaphore.available_permits())
self.assertEqual(0, another_semaphore.available_permits())
semaphore.acquire()
self.assertEqual(0, semaphore.available_permits())
self.assertEqual(0, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_use_after_destroy(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type)
semaphore.destroy()
# the next destroy call should be ignored
semaphore.destroy()
with self.assertRaises(DistributedObjectDestroyedError):
semaphore.init(1)
semaphore2 = self.client.cp_subsystem.get_semaphore(semaphore._proxy_name).blocking()
with self.assertRaises(DistributedObjectDestroyedError):
semaphore2.init(1)
def test_session_aware_semaphore_after_client_shutdown(self):
semaphore = self.get_semaphore("sessionaware", 1)
another_client = HazelcastClient(cluster_name=self.cluster.id)
another_semaphore = another_client.cp_subsystem.get_semaphore(
semaphore._proxy_name
).blocking()
another_semaphore.acquire(1)
self.assertEqual(0, another_semaphore.available_permits())
self.assertEqual(0, semaphore.available_permits())
another_client.shutdown()
def assertion():
self.assertEqual(1, semaphore.available_permits())
self.assertTrueEventually(assertion)
@parameterized.expand(SEMAPHORE_TYPES)
def test_init(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type)
self.assertEqual(0, semaphore.available_permits())
self.assertTrue(semaphore.init(10))
self.assertEqual(10, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_init_when_already_initialized(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type)
self.assertTrue(semaphore.init(5))
self.assertFalse(semaphore.init(7))
self.assertEqual(5, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_acquire(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 42)
self.assertIsNone(semaphore.acquire(2))
self.assertEqual(40, semaphore.available_permits())
self.assertIsNone(semaphore.acquire())
self.assertEqual(39, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_acquire_when_not_enough_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 5)
f = semaphore._wrapped.acquire(10)
self.assertFalse(f.done())
time.sleep(2)
self.assertFalse(f.done())
semaphore.destroy()
with self.assertRaises(DistributedObjectDestroyedError):
f.result()
@parameterized.expand(SEMAPHORE_TYPES)
def test_acquire_blocks_until_someone_releases(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 1)
event = threading.Event()
event2 = threading.Event()
def run():
semaphore.acquire(1)
event.set()
event2.wait()
time.sleep(1)
semaphore.release()
t = threading.Thread(target=run)
t.start()
event.wait()
start = get_current_timestamp()
f = semaphore._wrapped.acquire()
event2.set()
f.result()
self.assertGreaterEqual(get_current_timestamp() - start, 1)
t.join()
@parameterized.expand(SEMAPHORE_TYPES)
def test_acquire_blocks_until_semaphore_is_destroyed(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 1)
event = threading.Event()
event2 = threading.Event()
def run():
semaphore.acquire(1)
event.set()
event2.wait()
time.sleep(1)
semaphore.destroy()
t = threading.Thread(target=run)
t.start()
event.wait()
start = get_current_timestamp()
f = semaphore._wrapped.acquire()
event2.set()
with self.assertRaises(DistributedObjectDestroyedError):
f.result()
self.assertGreaterEqual(get_current_timestamp() - start, 1)
t.join()
@parameterized.expand(SEMAPHORE_TYPES)
def test_available_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type)
self.assertEqual(0, semaphore.available_permits())
semaphore.init(5)
self.assertEqual(5, semaphore.available_permits())
semaphore.acquire(3)
self.assertEqual(2, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_drain_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 20)
semaphore.acquire(5)
self.assertEqual(15, semaphore.drain_permits())
self.assertEqual(0, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_drain_permits_when_no_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 0)
self.assertEqual(0, semaphore.drain_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_reduce_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 10)
self.assertIsNone(semaphore.reduce_permits(5))
self.assertEqual(5, semaphore.available_permits())
self.assertIsNone(semaphore.reduce_permits(0))
self.assertEqual(5, semaphore.available_permits())
def test_reduce_permits_on_negative_permits_counter_sessionless(self):
semaphore = self.get_semaphore("sessionless", 10)
semaphore.reduce_permits(15)
self.assertEqual(-5, semaphore.available_permits())
semaphore.release(10)
self.assertEqual(5, semaphore.available_permits())
def test_reduce_permits_on_negative_permits_counter_juc_sessionless(self):
semaphore = self.get_semaphore("sessionless", 0)
semaphore.reduce_permits(100)
semaphore.release(10)
self.assertEqual(-90, semaphore.available_permits())
self.assertEqual(-90, semaphore.drain_permits())
semaphore.release(10)
self.assertEqual(10, semaphore.available_permits())
self.assertEqual(10, semaphore.drain_permits())
def test_reduce_permits_on_negative_permits_counter_session_aware(self):
semaphore = self.get_semaphore("sessionaware", 10)
semaphore.reduce_permits(15)
self.assertEqual(-5, semaphore.available_permits())
def test_reduce_permits_on_negative_permits_counter_juc_session_aware(self):
semaphore = self.get_semaphore("sessionaware", 0)
semaphore.reduce_permits(100)
self.assertEqual(-100, semaphore.available_permits())
self.assertEqual(-100, semaphore.drain_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_increase_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 10)
self.assertEqual(10, semaphore.available_permits())
self.assertIsNone(semaphore.increase_permits(100))
self.assertEqual(110, semaphore.available_permits())
self.assertIsNone(semaphore.increase_permits(0))
self.assertEqual(110, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_release(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 2)
semaphore.acquire(2)
self.assertIsNone(semaphore.release(2))
self.assertEqual(2, semaphore.available_permits())
def test_release_when_acquired_by_another_client_sessionless(self):
semaphore = self.get_semaphore("sessionless")
another_client = HazelcastClient(cluster_name=self.cluster.id)
another_semaphore = another_client.cp_subsystem.get_semaphore(
semaphore._proxy_name
).blocking()
self.assertTrue(another_semaphore.init(1))
another_semaphore.acquire()
try:
semaphore.release(1)
self.assertEqual(1, semaphore.available_permits())
finally:
another_client.shutdown()
def test_release_when_not_acquired_session_aware(self):
semaphore = self.get_semaphore("sessionaware", 3)
semaphore.acquire(1)
with self.assertRaises(IllegalStateError):
semaphore.release(2)
def test_release_when_there_is_no_session_session_aware(self):
semaphore = self.get_semaphore("sessionaware", 3)
with self.assertRaises(IllegalStateError):
semaphore.release()
@parameterized.expand(SEMAPHORE_TYPES)
def test_test_try_acquire(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 5)
self.assertTrue(semaphore.try_acquire())
self.assertEqual(4, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_try_acquire_with_given_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 5)
self.assertTrue(semaphore.try_acquire(3))
self.assertEqual(2, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_try_acquire_when_not_enough_permits(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 1)
self.assertFalse(semaphore.try_acquire(2))
self.assertEqual(1, semaphore.available_permits())
@parameterized.expand(SEMAPHORE_TYPES)
def test_try_acquire_when_not_enough_permits_with_timeout(self, semaphore_type):
semaphore = self.get_semaphore(semaphore_type, 1)
start = get_current_timestamp()
self.assertFalse(semaphore.try_acquire(2, 1))
self.assertGreaterEqual(get_current_timestamp() - start, 1)
self.assertEqual(1, semaphore.available_permits())
def get_semaphore(self, semaphore_type, initialize_with=None):
semaphore = self.client.cp_subsystem.get_semaphore(
semaphore_type + random_string()
).blocking()
if initialize_with is not None:
semaphore.init(initialize_with)
self.semaphore = semaphore
return semaphore
|
main.py
|
from annotatePDF import *
from tkinter import *
from tkinter.ttk import *
from tkinter import filedialog
from PyInstaller.utils.hooks import collect_data_files
def functiona():
print("a")
def loadPDFfile(master):
master.filename = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("PDF files", "*.pdf"), ("all files", "*.*")))
path = master.filename
weight_v = "configs/Glycan_300img_5000iterations.weights"
print("Loaded file:", path, type(path))
try:
checkpath()
except PermissionError:
time.sleep(2)
checkpath()
annotatePDFGlycan(path, weight_v)
print("Scrip Finished annotated:",path)
os.startfile(r"test\\0000page.pdf")
def loadImagefile(master):
# Instruct pyinstaller to collect data files from resources package.
datas = collect_data_files('pygly3')
master.filename = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("png files", "*.png"),("jpeg files", "*.jpg"), ("all files", "*.*")))
path =master.filename
img_file = cv2.imread(path)
monoCount_dict, final, origin, mask_dict, return_contours = countcolors(img_file)
mono_dict, a, b = extractGlycanTopology(mask_dict, return_contours, origin)
for mono_id in mono_dict.keys():
print(mono_id, mono_dict[mono_id][4])
print(a.shape)
cv2.imshow('a', cv2.resize(a, None, fx=1, fy=1))
cv2.waitKey(0)
cv2.imshow('b', cv2.resize(b, None, fx=2, fy=2))
cv2.waitKey(0)
cv2.destroyAllWindows()
glycoCT=buildglycan(mono_dict)
print("Condensed GlycoCT:\n", glycoCT)
#select place to save
folder_selected = filedialog.askdirectory()
print(folder_selected)
cv2.imwrite(f"{folder_selected}/annotated_a.png",a)
cv2.imwrite(f"{folder_selected}/annotated_b.png",b)
f = open(f"{folder_selected}/GlycoCT.txt", "w+")
f.write(glycoCT)
f.close()
os.startfile(f"{folder_selected}/GlycoCT.txt")
accession = searchGlycoCT(glycoCT)
f = open(f"{folder_selected}/Accession_hits.txt", "w+")
f.write(f"{accession}\nhttps://gnome.glyomics.org/StructureBrowser.html?focus={accession}")
f.close()
os.startfile(f"{folder_selected}/Accession_hits.txt")
if __name__ == '__main__':
#findImageonDesktopVisual()
f = open("run.bat", "w+")
f.write("@ECHO OFF\nECHO Nhat Duong. Glycan extractor \nmain.exe\nPAUSE")
f.close()
#GUIp
master = Tk()
master.title("Glycan extractor")
BKbotIcon=PhotoImage(file="Data/Images/BKbotIcon_small.png")
background = PhotoImage(file="Data/Images/BKbotBackground_dim.png")
background_label = Label(master, image=background)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
master.iconphoto(False, BKbotIcon)
# sets the geometry of
# main root window
master.geometry("672x480")
label = Label(master, text="Welcome to Nhat Duong's Glycan Extraction Tool")
label.pack(side=TOP, pady=10)
# a button widget which will
# open a new window on button click
#cairos_thread = threading.Thread(target=runloop)
#riftb_thread = threading.Thread(target=farmrift)
# make test_loop terminate when the user exits the window
#cairos_thread.daemon = True
#riftb_thread.daemon = True
#c_btn = Button(master,text="Click to run Cairos.",command=lambda: cairos_thread_func(cairos_thread))
#r_btn = Button(master,text="Click to run Rift Beast.", command=lambda: riftb_thread_func(riftb_thread))
# Following line will bind click event
# On any click left / right button
# of mouse a new window will be opened
pdf_btn = Button(master, text="Open pdf file",command=lambda: loadPDFfile(master))
image_btn = Button(master, text="Open image file", command=lambda: loadImagefile(master))
quit_btn = Button(master, text="Quit", width=8,
command=master.quit)
pdf_btn.pack(pady=10)
image_btn.pack(pady=10)
quit_btn.pack(pady=10)
# mainloop, runs infinitely
mainloop()
|
euler357mp.py
|
"""Prime generating integers
Problem 357
Consider the divisors of 30: 1,2,3,5,6,10,15,30.
It can be seen that for every divisor d of 30, d+30/d is prime.
Find the sum of all positive integers n not exceeding 100 000 000
such that for every divisor d of n, d+n/d is prime.
"""
# Multiprocessing version!!!!!
from eulerlib import generatePrimesSieve
import multiprocessing
def isPrimeGeneratingInteger(n):
global isPrime
if n > 10 and not n%10 in (0, 2, 8):
return False
for d in range(1, n+1):
if n % d != 0:
continue
nd = n // d
if d > nd:
break
x = d + n // d
#print(d, "+", n, "/", d, "=", d, "+", n // d, "=",x)
if not isPrime[x]:
return False
return True
def solvePartial(start, end, out_q):
total = 0
for i in range(start, end+1):
if isPrimeGeneratingInteger(i):
print(i)
total += i
out_q.put(total);
if __name__ == '__main__':
LIMIT = 1000
#LIMIT = 100000000
print("Generating primes... ", end="")
primes, isPrime = generatePrimesSieve(LIMIT+2)
print("done.")
procs = []
nprocs = 1
for i in range(nprocs):
# calc the range to pass for the given process
start = (LIMIT // nprocs) * i + 1
end = (LIMIT // nprocs) * (i+1)
if i == nprocs - 1:
end = LIMIT
# create a child process
print("process", i, start, end)
out_q = multiprocessing.Queue()
p = multiprocessing.Process(target=solvePartial, args=(start, end, out_q))
procs.append(p)
p.start()
total = 0
for i in range(nprocs):
total += out_q.get()
for p in procs:
p.join()
print("soluction: ", total)
|
piotroski_greenblatt_more_piotroski.py
|
#!/usr/bin/env python3
# Para a análise, são utilizados princípios do Joseph D. Piotroski
# Estipulados no livro: "Value Investing: The Use of Historical Financial Statement Information to Separate Winners from Losers"
# No estudo original de Piotroski, ao longo de 20 anos (1976–1996), uma estratégia de investimento baseado nessa pontuação, com a compra de empresas com F-Score alto e a venda de empresas com F-Score baixo, gerou um retorno anual de 23%, bem superior à media do mercado.
# Piotroski elaborou um Score chamado "Piotroski F-score" que varia de 0 a 9, quanto maior, por mais filtros as ações passaram
# Princípios utilizados:
# Piotroski:
# 1. ROA > 0 (ano corrente)
# 2. FCO > 0 (ano corrente)
# 3. FCO > Lucro Líquido (ano corrente)
# 4. ROA atual > ROA ano anterior
# 5. Alavancagem atual < ano passado (Dívida Líquida / Patrimônio Líquido)
# 6. Liquidez Corrente atual > Liquidez Corrente ano anterior
# 7. Nro. Ações atual = Nro. Ações ano anterior
# 8. Margem Bruta atual > Margem Bruta ano anterior
# 9. Giro Ativo atual > Giro Ativo ano anterior
# Greenblatt:
# * Fórmula Mágica adaptada (as duas ao mesmo tempo):
# 1) > ROIC e < EV/EBIT
# 2) > ROE e < P/L
# Referência: https://medium.com/@gutenbergn/piotroski-d9a722b8ef9a
# Backtesting on USA: https://www.quant-investing.com/blogs/backtests/2018/11/06/piotroski-f-score-back-test
import sys, os
sys.path.extend([f'../{name}' for name in os.listdir("..") if os.path.isdir(f'../{name}')])
import fundamentus
import bovespa
import backtest
import browser
import pandas
import numpy
import http.cookiejar
import urllib.request
import json
import threading
import time
import pyperclip
# def print(thing):
# import pprint
# return pprint.PrettyPrinter(indent=4).pprint(thing)
def populate_shares(year):
globals()['year'] = year
globals()['infos'] = {}
if year == current_year():
shares = bovespa.shares()
else:
shares = fundamentus.shares(year)
shares = shares[shares['Cotação'] > 0]
shares = shares[shares['Liquidez 2 meses'] > 10000]
shares['Ranking (Piotroski)'] = 0
shares['Ranking (Greenblatt)'] = 0
shares['Ranking (Final)'] = 0
fill_infos(shares)
remove_bad_shares(shares)
shares = add_ratings(shares)
shares = reorder_columns(shares)
return shares
# infos = {
# 'TRPL4': {
# 'roa_positivo': True/False,
# 'fco_positivo': True/False,
# 'fco_saudavel': True/False,
# 'roa_crescente': True/False,
# 'alavancagem_decrescente': True/False,
# 'liquidez_crescente': True/False,
# 'no_acoes_constante': True/False,
# 'margem_bruta_crescente': True/False,
# 'giro_ativo_crescente': True/False
# }
# }
def fill_infos(shares):
cookie_jar = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201'),
('Accept', 'text/html, text/plain, text/css, text/sgml, */*;q=0.01')]
tickers = list(shares.index)
# import pry; pry()
threads = [threading.Thread(target=fill_infos_by_ticker, args=(ticker,opener,)) for ticker in tickers]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def fill_infos_by_ticker(ticker, opener):
infos[ticker] = {
'roa_positivo': False,
'fco_positivo': False,
'fco_saudavel': False,
'roa_crescente': False,
'alavancagem_decrescente': False,
'liquidez_crescente': False,
'no_acoes_constante': False,
'margem_bruta_crescente': False,
'giro_ativo_crescente': False
}
current_year = year
# Fetching Current Year Indicators
current_indicators_url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsDashboard?ticker={ticker}'
with opener.open(current_indicators_url) as link:
company_indicators = link.read().decode('ISO-8859-1')
company_indicators = json.loads(company_indicators)
# Fetching Previous Years Indicators
yearly_indicators_url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsYear?ticker={ticker}'
with opener.open(yearly_indicators_url) as link:
yearly_indicators = link.read().decode('ISO-8859-1')
yearly_indicators = json.loads(yearly_indicators)
company_indicators.extend(yearly_indicators)
# Only consider company indicators before OR EQUAL to the current_year (robust solution for backtesting purposes)
company_indicators = [ci for ci in company_indicators if ci['year'] <= current_year]
if (len(company_indicators) > 0):
infos[ticker]['roa_positivo'] = company_indicators[0]['roa'] > 0
infos[ticker]['fco_positivo'] = company_indicators[0]['fco'] > 0
infos[ticker]['fco_saudavel'] = company_indicators[0]['fco'] > company_indicators[0]['lucroLiquido']
if (len(company_indicators) > 1):
infos[ticker]['roa_crescente'] = company_indicators[0]['roa'] > company_indicators[1]['roa']
infos[ticker]['alavancagem_decrescente'] = company_indicators[0]['dlpl'] < company_indicators[1]['dlpl']
infos[ticker]['liquidez_crescente'] = company_indicators[0]['liqCorrent'] > company_indicators[1]['liqCorrent']
infos[ticker]['no_acoes_constante'] = company_indicators[0]['qntAcoes'] == company_indicators[1]['qntAcoes']
infos[ticker]['margem_bruta_crescente'] = company_indicators[0]['margBruta'] > company_indicators[1]['margBruta']
infos[ticker]['giro_ativo_crescente'] = company_indicators[0]['giroAtivos'] > company_indicators[1]['giroAtivos']
def remove_bad_shares(shares):
shares.drop(shares[shares['P/L'] <= 0].index, inplace=True)
shares.drop(shares[shares['ROE'] <= 0].index, inplace=True)
shares.drop(shares[shares['EV/EBIT'] <= 0].index, inplace=True)
shares.drop(shares[shares['ROIC'] <= 0].index, inplace=True)
def add_ratings(shares):
add_strategy_columns(shares)
shares = calculate(shares)
return fill_special_infos(shares)
# Inicializa os índices
def add_strategy_columns(shares):
shares['Piotroski Score'] = 0
shares['ROA positivo'] = False
shares['FCO positivo'] = False
shares['FCO > Lucro Líquido'] = False
shares['ROA crescente'] = False
shares['Alavancagem decrescente'] = False
shares['Liquidez Corrente crescente'] = False
shares['No Ações constante'] = False
shares['Margem Bruta crescente'] = False
shares['Giro Ativo crescente'] = False
shares['ROE placement'] = 0
shares['P/L placement'] = 0
shares['ROIC placement'] = 0
shares['EV/EBIT placement'] = 0
def calculate(shares):
shares['Magic Formula'] = 0
shares.sort_values(by='ROE', ascending=False, inplace=True)
shares['ROE placement'] = range(0, len(shares))
shares.sort_values(by='P/L', ascending=True, inplace=True)
shares['P/L placement'] = range(0, len(shares))
shares['Magic Formula'] += shares['ROE placement'] + shares['P/L placement']
shares.sort_values(by='ROIC', ascending=False, inplace=True)
shares['ROIC placement'] = range(0, len(shares))
shares.sort_values(by='EV/EBIT', ascending=True, inplace=True)
shares['EV/EBIT placement'] = range(0, len(shares))
shares['Magic Formula'] += shares['ROIC placement'] + shares['EV/EBIT placement']
return shares
def fill_special_infos(shares):
for index in range(len(shares)):
ticker = shares.index[index]
shares['Piotroski Score'][index] += int(infos[ticker]['roa_positivo'])
shares['ROA positivo'][index] = infos[ticker]['roa_positivo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_positivo'])
shares['FCO positivo'][index] = infos[ticker]['fco_positivo']
shares['Piotroski Score'][index] += int(infos[ticker]['fco_saudavel'])
shares['FCO > Lucro Líquido'][index] = infos[ticker]['fco_saudavel']
shares['Piotroski Score'][index] += int(infos[ticker]['roa_crescente'])
shares['ROA crescente'][index] = infos[ticker]['roa_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['alavancagem_decrescente'])
shares['Alavancagem decrescente'][index] = infos[ticker]['alavancagem_decrescente']
shares['Piotroski Score'][index] += int(infos[ticker]['liquidez_crescente'])
shares['Liquidez Corrente crescente'][index] = infos[ticker]['liquidez_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['no_acoes_constante'])
shares['No Ações constante'][index] = infos[ticker]['no_acoes_constante']
shares['Piotroski Score'][index] += int(infos[ticker]['margem_bruta_crescente'])
shares['Margem Bruta crescente'][index] = infos[ticker]['margem_bruta_crescente']
shares['Piotroski Score'][index] += int(infos[ticker]['giro_ativo_crescente'])
shares['Giro Ativo crescente'][index] = infos[ticker]['giro_ativo_crescente']
return shares
def reorder_columns(shares):
columns = ['Ranking (Final)', 'Ranking (Piotroski)', 'Ranking (Greenblatt)', 'Cotação', 'Piotroski Score', 'Magic Formula', 'P/L', 'ROE', 'EV/EBIT', 'ROIC']
return shares[columns + [col for col in shares.columns if col not in tuple(columns)]]
# Get the current_year integer value, for example: 2020
def current_year():
return int(time.strftime("%Y"))
# python3 piotroski_greenblatt_more_piotroski.py "{ 'year': 2015 }"
if __name__ == '__main__':
# Opening these URLs to automatically allow this API to receive more requests from local IP
browser.open('https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsDashboard?ticker=BBAS3')
browser.open('https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsYear?ticker=BBAS3')
year = current_year()
if len(sys.argv) > 1:
year = int(eval(sys.argv[1])['year'])
shares = populate_shares(year)
shares.sort_values(by=['Piotroski Score', 'Cotação'], ascending=[False, True], inplace=True)
shares['Ranking (Piotroski)'] = range(1, len(shares) + 1)
shares.sort_values(by=['Magic Formula', 'Cotação'], ascending=[True, True], inplace=True)
shares['Ranking (Greenblatt)'] = range(1, len(shares) + 1)
shares.sort_values(by=['Ranking (Piotroski)', 'Ranking (Greenblatt)'], ascending=[True, True], inplace=True)
shares['Ranking (Final)'] = range(1, len(shares) + 1)
print(shares)
pyperclip.copy(shares.to_markdown())
if year != current_year():
backtest.run_all(fundamentus.start_date(year), list(shares.index[:20]))
|
test_executors.py
|
import multiprocessing
import threading
import time
from datetime import timedelta
from unittest.mock import MagicMock
import pytest
import prefect
from prefect.utilities.executors import Heartbeat, timeout_handler
def test_heartbeat_calls_function_on_interval():
class A:
def __init__(self):
self.called = 0
def __call__(self):
self.called += 1
a = A()
timer = Heartbeat(0.09, a)
timer.start()
time.sleep(0.2)
timer.cancel()
timer.join()
assert a.called == 2
def test_timeout_handler_times_out():
slow_fn = lambda: time.sleep(2)
with pytest.raises(TimeoutError):
timeout_handler(slow_fn, timeout=1)
def test_timeout_handler_passes_args_and_kwargs_and_returns():
def do_nothing(x, y=None):
return x, y
assert timeout_handler(do_nothing, 5, timeout=1, y="yellow") == (5, "yellow")
def test_timeout_handler_doesnt_swallow_bad_args():
def do_nothing(x, y=None):
return x, y
with pytest.raises(TypeError):
timeout_handler(do_nothing, timeout=1)
with pytest.raises(TypeError):
timeout_handler(do_nothing, 5, timeout=1, z=10)
with pytest.raises(TypeError):
timeout_handler(do_nothing, 5, timeout=1, y="s", z=10)
def test_timeout_handler_reraises():
def do_something():
raise ValueError("test")
with pytest.raises(ValueError) as exc:
timeout_handler(do_something, timeout=1)
assert "test" in exc
def test_timeout_handler_allows_function_to_spawn_new_process():
def my_process():
p = multiprocessing.Process(target=lambda: 5)
p.start()
p.join()
p.terminate()
assert timeout_handler(my_process, timeout=1) is None
def test_timeout_handler_allows_function_to_spawn_new_thread():
def my_thread():
t = threading.Thread(target=lambda: 5)
t.start()
t.join()
assert timeout_handler(my_thread, timeout=1) is None
def test_timeout_handler_doesnt_do_anything_if_no_timeout(monkeypatch):
monkeypatch.delattr(prefect.utilities.executors, "ThreadPoolExecutor")
with pytest.raises(NameError): # to test the test's usefulness...
timeout_handler(lambda: 4, timeout=1)
assert timeout_handler(lambda: 4) == 4
def test_timeout_handler_preserves_context():
def my_fun(x, **kwargs):
return prefect.context.get("test_key")
with prefect.context(test_key=42):
res = timeout_handler(my_fun, 2, timeout=1)
assert res == 42
def test_timeout_handler_preserves_logging(caplog):
timeout_handler(prefect.Flow("logs").run, timeout=2)
assert len(caplog.records) >= 2 # 1 INFO to start, 1 INFO to end
|
benchmark.py
|
# Usage:
# PYTHONPATH=. DJANGO_SETTINGS_MODULE=sequences.test_postgresql_settings django-admin migrate
# PYTHONPATH=. DJANGO_SETTINGS_MODULE=sequences.test_postgresql_settings python benchmark.py
import threading
import time
import django
from django.db import connection
from sequences import get_next_value
django.setup()
LOOPS = 500
THREADS = 20
def get_values():
for _ in range(LOOPS):
# Add `reset_value=1000` to use SELECT + UPDATE instead of INSERT ON CONFLICT.
get_next_value()
connection.close()
threads = [threading.Thread(target=get_values) for _ in range(THREADS)]
t0 = time.perf_counter()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
t1 = time.perf_counter()
print("{} loops x {} threads in {:.2f} seconds = {:.0f} values / second"
.format(LOOPS, THREADS, t1 - t0, LOOPS * THREADS / (t1 - t0)))
|
advanced.py
|
#!./venv_nano_local/bin/python
import unittest
from src.nano_block_ops import BlockGenerator, BlockAsserts, BlockReadWrite
from src.parse_nano_local_config import ConfigReadWrite, ConfigParser, Helpers
import time
import json
from multiprocessing import Process, Queue, Value
def is_not_in_config(module,qual_name, function_name) :
return ConfigParser().skip_testcase('{}.{}.{}'.format( module, qual_name, function_name))
class ReplayLedgers(unittest.TestCase):
from testcases.setup.advanced import Init
def setUp(self) -> None:
self.bg = BlockGenerator(broadcast_blocks=True, default_rpc_index=1)
self.ba = BlockAsserts(default_rpc_index=1)
self.brw = BlockReadWrite()
self.conf = ConfigParser()
self.nano_rpc = self.bg.get_nano_rpc_default()
@unittest.skipIf(is_not_in_config(__module__, __qualname__,
"test_N1_1_publish_10_change_blocks"), "according to nano_local_config.toml")
def test_N1_1_publish_10_change_blocks(self):
ini = self.Init(1)
ini.setup_ledger(ini.pre_gen_files["ledger_file"], use_nanoticker = False)
blocks = self.brw.read_blocks_from_disk(ini.pre_gen_files["json_file"])
first_round_blocks = blocks["b"][0][:10]
first_round_block_hashes = blocks["h"][0][:10]
self.ba.assert_blocks_published(first_round_blocks)
self.ba.assert_blocks_confirmed(first_round_block_hashes, sleep_on_stall_s=0.5, log_to_console=True)
@unittest.skipIf(is_not_in_config(__module__, __qualname__,
"test_N1_2_publish_bucket_saturation"), "according to nano_local_config.toml")
def test_N1_2_publish_bucket_saturation(self):
ini = self.Init(2)
ini.setup_ledger(ini.pre_gen_files["ledger_file"], use_nanoticker = not ini.debug)
blocks = self.brw.read_blocks_from_disk(ini.pre_gen_files["json_file"])
block_count_start = self.bg.get_nano_rpc_default().block_count()["count"]
mp_procs = []
mp_q = Queue()
h = Helpers()
first_round_blocks = blocks["b"][0]
#first_round_block_hashes = blocks["h"][0]
spam_round_blocks = [x for x in blocks["b"][1:len(blocks["b"])]]
spam_block_count = sum([len(b) for b in spam_round_blocks])
t1 = time.time()
#Every spam account broadcasts a recent change block, so priority should be reduced over older blocks
# aio_http gets stuck if mp_ process follows non-mp_ process. Run everything in multiprocessing mode.
proc_round1_spam = Process(target=self.ba.assert_blocks_published, args=(first_round_blocks,), kwargs={"sync" : True})
#proc_round1_confirmed = Process(target=self.ba.assert_blocks_confirmed, args=(first_round_block_hashes,)) #not important for this test
proc_round1_spam.start()
#proc_round1_confirmed.start()
proc_round1_spam.join()
#proc_round1_confirmed.join()
first_round_duration = time.time() - t1
#Start multiple processes in parallel.
#1)Start spam with pre_generated blocks. All spam accounts have a recent transaction from blocks published in previous setp
#2)Broadcast 1 genuine block from different accounts. Monitor confirmation duration for each block and move to next account.
t2 = time.time()
mp_spam_running = Value('i', True)
spam_proc = Process(target=self.ba.assert_list_of_blocks_published, args=(spam_round_blocks,), kwargs={"sync" : False, "is_running" : mp_spam_running})
legit_proc = Process(target=ini.online_bucket_main, args=(mp_q, ini.single_tx_timeout ,mp_spam_running,))
spam_proc.start()
legit_proc.start()
spam_proc.join()
spam_duration = time.time() - t2 #measure time when spam has ended
legit_proc.join() #wait for last confirmation after spam has ended
block_count_end = self.bg.get_nano_rpc_default().block_count()
#Convert result of online_bucket_main() from mp_q to list.
mp_q.put(None)
conf_lst = list(iter(mp_q.get, None))
confirmations = [x["conf_duration"] for x in conf_lst if x["timeout"] == False]
print(confirmations[:25])
timeouts = [x for x in conf_lst if x["timeout"]]
test_duration = time.time() - t1
res = { "confs":len(confirmations),
"spam_s": spam_duration,
"bps" : spam_block_count / spam_duration,
"main_cps" : len(confirmations) / test_duration,
"min" : min(confirmations),
"max" : max(confirmations),
"timeouts": len(timeouts) ,
"timeout_s" : ini.single_tx_timeout,
"perc_50":h.percentile(confirmations,50),
"perc_75":h.percentile(confirmations,75),
"perc_90":h.percentile(confirmations,90),
"perc_99":h.percentile(confirmations,99),
"spam_block_count" : spam_block_count,
"round1_s" : first_round_duration,
"test_s" : test_duration,
"blocks_start" : block_count_start,
"blocks_end" : block_count_end["count"],
"blocks_cemented" : block_count_end["cemented"] }
print(json.dumps(res, indent=4))
return res
@unittest.skipIf(is_not_in_config(__module__, __qualname__,
"test_N1_3_loop_2_10x"), "according to nano_local_config.toml")
def test_N1_3_loop_2_10x(self):
import pandas as pd
import traceback
from tabulate import tabulate
from datetime import datetime
ini = self.Init(2)
res = []
for i in range (0,3) :
try:
res.append(self.test_N1_2_publish_bucket_saturation())
print(pd.DataFrame(res))
except Exception as e:
traceback.print_exc()
pass
df = pd.DataFrame(res)
content = tabulate(df.values.tolist(), list(df.columns), tablefmt="plain", floatfmt=".3f")
day_of_year = datetime.now().strftime('%j')
exec_time = datetime.now().strftime("%H%M%S")
file_path = f"{ini.path}/{ini.network_dir}/{day_of_year}_{exec_time}.txt"
f = open(file_path, "w")
f.write(content)
f.close()
print(f"Stats available at {file_path}" )
if __name__ == '__main__':
unittest.main()
|
test_read_parsers.py
|
from __future__ import print_function
from __future__ import absolute_import
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
#
# Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
from . import khmer_tst_utils as utils
from nose.plugins.attrib import attr
from functools import reduce
def test_read_properties():
# Note: Using a data file with only one read.
rparser = ReadParser(utils.get_test_data("single-read.fq"))
# Check the properties of all one reads in data set.
for read in rparser:
assert read.name == "895:1:1:1246:14654 1:N:0:NNNNN"
assert read.sequence == "CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT"
assert read.annotations == ""
assert read.quality == """][aaX__aa[`ZUZ[NONNFNNNNNO_____^RQ_"""
def test_with_default_arguments():
read_names = []
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser(utils.get_test_data("random-20-a.fa"))
for read in rparser:
read_names.append(int(read.name))
# "Derandomize".
read_names.sort()
# Each read number should match the corresponding name.
for m, n in enumerate(read_names):
assert m == n
def test_num_reads():
"""Test ReadParser.num_reads"""
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in rparser:
reads_count += 1
assert reads_count == 100
assert rparser.num_reads == 100
@attr('multithread')
def test_num_reads_threads():
"""Test threadsaftey of ReadParser's read counting"""
import threading
def count_reads(rparser):
for _ in rparser:
pass
n_threads = 4
threads = []
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in range(n_threads):
thr = threading.Thread(target=count_reads, args=[rparser, ])
threads.append(thr)
thr.start()
for thr in threads:
thr.join()
assert rparser.num_reads == 100
def test_num_reads_truncated():
n_reads = 0
rparser = ReadParser(utils.get_test_data("truncated.fq"))
try:
for read in rparser:
n_reads += 1
except IOError as err:
assert "Sequence is empty" in str(err), str(err)
assert rparser.num_reads == 1, "%d valid reads in file, got %d" % (
n_reads, rparser.num_reads)
def test_gzip_decompression():
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for read in rparser:
reads_count += 1
assert 100 == reads_count
def test_gzip_decompression_truncated():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.gz"))
try:
for read in rparser:
pass
assert 0, "this should fail"
except IOError as err:
print(str(err))
def test_gzip_decompression_truncated_pairiter():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.gz"))
try:
for read in rparser.iter_read_pairs():
pass
assert 0, "this should fail"
except IOError as err:
print(str(err))
def test_bzip2_decompression():
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.bz2"))
for read in rparser:
reads_count += 1
assert 100 == reads_count
def test_bzip2_decompression_truncated():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.bz2"))
try:
for read in rparser:
pass
assert 0, "this should fail"
except IOError as err:
print(str(err))
def test_bzip2_decompression_truncated_pairiter():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.bz2"))
try:
for read in rparser.iter_read_pairs():
pass
assert 0, "this should fail"
except IOError as err:
print(str(err))
def test_badbzip2():
try:
rparser = ReadParser(utils.get_test_data("test-empty.fa.bz2"))
for read in rparser:
pass
assert 0, "this should fail"
except IOError as err:
print(str(err))
except ValueError as err:
print(str(err))
@attr('multithread')
def test_with_multiple_threads(testfile="test-reads.fq.bz2"):
import operator
import threading
reads_count_1thr = 0
rparser = ReadParser(utils.get_test_data(testfile))
for read in rparser:
reads_count_1thr += 1
def count_reads(rparser, counters, tnum):
counters[tnum] = reduce(operator.add, (1 for read in rparser))
N_THREADS = 4
threads = []
reads_counts_per_thread = [0] * N_THREADS
rparser = ReadParser(utils.get_test_data(testfile))
for tnum in range(N_THREADS):
t = \
threading.Thread(
target=count_reads,
args=[rparser, reads_counts_per_thread, tnum]
)
threads.append(t)
t.start()
for t in threads:
t.join()
assert reads_count_1thr == sum(reads_counts_per_thread), \
reads_counts_per_thread
@attr('multithread')
def test_with_multiple_threads_big():
test_with_multiple_threads(testfile="test-large.fa")
@attr('multithread')
def test_old_illumina_pair_mating():
import threading
rparser = ReadParser(utils.get_test_data("test-reads.fa"))
def thread_1_runtime(rparser):
for read in rparser:
pass
def thread_2_runtime(rparser):
for readnum, read in enumerate(rparser):
if 0 == readnum:
pass
t1 = threading.Thread(target=thread_1_runtime, args=[rparser])
t2 = threading.Thread(target=thread_2_runtime, args=[rparser])
t1.start()
t2.start()
t1.join()
t2.join()
@attr('multithread')
def test_casava_1_8_pair_mating():
import threading
# Note: This file, when used in conjunction with a 64 KiB per-thread
# prefetch buffer, tests the paired read mating logic with the
# Casava >= 1.8 read name format.
rparser = ReadParser(utils.get_test_data("test-reads.fq.bz2"))
def thread_1_runtime(rparser):
for read in rparser:
pass
def thread_2_runtime(rparser):
for readnum, read in enumerate(rparser):
if 0 == readnum:
pass
# assert "895:1:1:1761:13189 2:N:0:NNNNN" == read.name, read.name
t1 = threading.Thread(target=thread_1_runtime, args=[rparser])
t2 = threading.Thread(target=thread_2_runtime, args=[rparser])
t1.start()
t2.start()
t1.join()
t2.join()
def test_read_truncated():
rparser = ReadParser(utils.get_test_data("truncated.fq"))
try:
for read in rparser:
pass
assert 0, "No exception raised on a truncated file"
except IOError as err:
assert "Sequence is empty" in str(err), str(err)
def test_iterator_identities():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
assert rparser is rparser.__iter__()
assert rparser is rparser.iter_reads()
@attr('known_failing')
def test_read_pair_iterator_in_error_mode():
assert 0
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
# If walks like an iterator and quacks like an iterator...
rpi = rparser.iter_read_pairs()
assert "__iter__" in dir(rpi)
assert "next" in dir(rpi)
# Are the alleged pairs actually pairs?
read_pairs_1 = []
for read_1, read_2 in rpi:
read_pairs_1.append([read_1, read_2])
assert read_1.name[: 19] == read_2.name[: 19]
# Reload parser.
# Note: No 'rewind' or 'reset' capability at the time of this writing.
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
# Ensure that error mode is the default mode.
read_pairs_2 = []
for read_1, read_2 \
in rparser.iter_read_pairs(ReadParser.PAIR_MODE_ERROR_ON_UNPAIRED):
read_pairs_2.append([read_1, read_2])
matches = \
list(map(
lambda rp1, rp2: rp1[0].name == rp2[0].name,
read_pairs_1, read_pairs_2
))
assert all(matches) # Assert ALL the matches. :-]
def test_read_pair_iterator_in_error_mode_xfail():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
failed = True
try:
for rpair in rparser.iter_read_pairs():
pass
failed = False
except IOError as exc:
pass
assert failed
@attr('known_failing')
def test_read_pair_iterator_in_ignore_mode():
assert 0
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
read_pairs = []
for read_1, read_2 \
in rparser.iter_read_pairs(ReadParser.PAIR_MODE_IGNORE_UNPAIRED):
read_pairs.append([read_1, read_2])
assert read_1.name[: 19] == read_2.name[: 19]
assert 2 == len(read_pairs)
def test_constructor():
# Note: Using a data file with only one read.
try:
rparser = ReadParser(utils.get_test_data("single-read.fq"), "a")
assert 0, ("ReadParser's constructor shouldn't accept a character for "
"the number of threads")
except TypeError as err:
print(str(err))
try:
rparser = ReadParser("non-existent-file-name")
assert 0, "ReadParser shouldn't accept a non-existant file name"
except ValueError as err:
print(str(err))
def test_iternext():
try:
rparser = ReadParser(utils.get_test_data("fakelump.fa.stoptags.txt"))
read_pairs = []
for read_1, read_2 in rparser.iter_read_pairs():
read_pairs.append(read_1, read_2)
assert 0, "Shouldn't be able to iterate over non FASTA file"
except IOError as err:
print(str(err))
except ValueError as err:
print(str(err))
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
api.py
|
import pytorch_quik as pq
import multiprocessing as mp
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from requests import Session
from typing import List, OrderedDict
from multiprocessing.connection import Connection
import pandas as pd
import numpy as np
from math import ceil
from pandas.io.json import json_normalize
import logging
import sys
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
URL = "http://deepshadow.gsslab.rdu2.redhat.com:8080/predictions/my_tc"
JSON_HEADER = {"Content-Type": "application/json"}
def requests_session() -> Session:
"""Create an API session that can queue and recieve multiple
requests. It can also retry when a request returns a 507 instead
of a 200.
Returns:
Session: A requests session
"""
retry_strategy = Retry(
total=10,
backoff_factor=1,
status_forcelist=[507],
method_whitelist=["POST"],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
sesh = Session()
sesh.mount("http://", adapter)
return sesh
def request_post(data_batch: str, sesh: Session, conn: Connection, num: int):
"""send a POST request based on a requests_session and a connection
Args:
data_batch (str): A batch of data to be predicted
sesh (Session): A session from requests_session
conn (Connection): The input_pipe from mp.Pipe
num (int): the batch number for when the data is recombined.
"""
r = sesh.post(URL, data=bytes(data_batch, "utf-8"), headers=JSON_HEADER)
logger.info(f"Batch {num}, status_code: {r.status_code}")
conn.send(r)
def split_and_format(arr: np.array, length: int) -> List[str]:
"""Taking a numpy array of text, split into batches for separate API
posts, and format into the torch serve required "instances" and "data."
Args:
arr (np.array): An array of text to be predicted
length (int): The length of the batch, or batch size
Returns:
List[str]: A list of strings formatted for a Transformer handler.
"""
splits = ceil(len(arr) / length)
arr_list = np.array_split(arr.flatten(), splits)
data_list = [pq.utils.txt_format(arr) for arr in arr_list]
return data_list
def batch_inference(
responses: np.array, indexed_labels: OrderedDict, batch_size: int
) -> pd.DataFrame:
"""Take an array of text fields, and return predictions via API
Args:
responses (np.array): The set of text (or survey responses) to
be predicted
indexed_labels (OrderedDict): An ordered dict of labels, for instance
0: Negative, 1: Positive
batch_size (int): The size of each batch request
Returns:
pd.DataFrame: A dataframe with the original text, logits, and
predicted label.
"""
data_list = split_and_format(responses, batch_size)
processes = []
r_list = []
sesh = requests_session()
for num, batch in enumerate(data_list):
output_pipe, input_pipe = mp.Pipe(duplex=False)
proc = mp.Process(
target=request_post, args=(batch, sesh, input_pipe, num)
)
processes.append(proc)
r_list.append(output_pipe)
proc.start()
[proc.join() for proc in processes]
r_list = [
json_normalize(r.recv().json()["predictions"], sep="_") for r in r_list
]
return pd.concat(r_list, ignore_index=True)
|
spider2.py
|
#coding=utf-8
# Download all of a model's pictures
#Usage:
# python3 spider2.py https://www.nvshens.com/girl/24282/ or python3 spider2.py https://www.nvshens.com/girl/25361/
# TODO:
#
import requests
from lxml import etree
import os
import sys
import time
# import multiprocessing
from multiprocessing import Pool
import re
import math
# import numpy as np
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Referer": 'http://www.996mb.com/',"Connection":"close"}
ref = 'https://www.nvshens.com'
class retry(object):
def __init__(self,*,times):
self._cnt=times
def __call__(self,func):
def wrapper(*args,**kw):
data=None
cnt=self._cnt
while data==None and cnt>0:
data=func(*args,**kw)
cnt-=1
print('重新请求')
return data
return wrapper
def crawl(url, path):
if url:
try:
time.sleep(0.1)
# requests.adapters.DEFAULT_RETRIES = 3
# s = requests.session()
# s.keep_alive = False
res = requests.get(url, headers=headers, timeout=7)
print(res.status_code)
res = res.content
html = etree.HTML(res)
page = html.xpath(path, stream=True)
# if html.xpath('//*[@id="htilte"]', stream=True):
# title = html.xpath('//*[@id="htilte"]', stream=True)[0].text
return page
# print(res)
except Exception as e:
print('Error1:', e)
pass
# page.pop()
def get_title(url):
if url:
try:
res = requests.get(url, headers=headers, timeout=7)
res = res.content
html = etree.HTML(res)
if html.xpath('//*[@id="htilte"]', stream=True):
title = html.xpath('//*[@id="htilte"]', stream=True)[0].text
return title
# print(res)
except Exception as e:
print('Error2:', e)
pass
def get_nums(url):
if url:
try:
res = requests.get(url, headers=headers, timeout=7)
res = res.content
html = etree.HTML(res)
if html.xpath('//*[@id="dinfo"]/span', stream=True):
nums = re.match('\d{0,3}',html.xpath('//*[@id="dinfo"]/span', stream=True)[0].text).group()
return nums
# print(res)
except Exception as e:
print('Error3:', e)
pass
def div_arr(ls,n):
result = []
cut = int(len(ls)/n)
if cut == 0:
ls = [[x] for x in ls]
none_array = [[] for i in range(0, n-len(ls))]
return ls+none_array
for i in range(0, n-1):
result.append(ls[cut*i:cut*(1+i)])
result.append(ls[cut*(n-1):len(ls)])
return result
def flatten(a):
if not isinstance(a, (list, )):
return [a]
else:
b = []
for item in a:
b += flatten(item)
return b
# def change_path(url):
# @retry(times=3)
def download_img(url):
for index,item in enumerate(url):
if item:
try:
html = requests.get(item, headers=headers, timeout=15)
# print('img_status: ', html.status_code)
html = html.content
except Exception as e:
print('Error4: ', e)
pass
# return None
img_name = str(index) + str(item.split('/')[-1])
with open(img_name, 'wb') as file: # 以byte形式将图片数据写入
file.write(html)
file.flush()
file.close() # 关闭文件
print('第%d张图片下载完成, name: %s' %(index+1, img_name))
time.sleep(1) # 自定义延时
if __name__ == '__main__':
n = 8
argv = sys.argv
url = argv[1]
if url is None:
print('cheeck your params: python3 url')
sys.exit()
main_gallery = crawl(url, '//*[@id="photo_list"]/ul/li/div[1]/a') if re.match('.*?\/\d+\/\w+\/', url) else crawl(url, '//*[@id="post"]/div[8]/div/div[3]/ul/li/div[1]/a')
# main_gallery = crawl(url, '//*[@id="post"]/div[8]/div/div[3]/ul/li/div[1]/a') ? crawl(url, '//*[@id="post"]/div[8]/div/div[3]/ul/li/div[1]/a')
# print('main:',main_gallery)
main_gallery_a = []
for item in main_gallery:
_ = ref + item.attrib['href']
main_gallery_a.append(_)
pages = int(len(main_gallery_a))
path = os.getcwd()
for item1 in main_gallery_a:
img_src = list()
single_page_img_nums = len(crawl(item1, '//*[@id="hgallery"]/img'))
nums = get_nums(item1)
nums = math.ceil(int(nums)/single_page_img_nums)
single_nav = []
for num in range(nums):
num = str(num + 1)
page = item1 + num + '.html'
single_nav.append(page)
for item2 in single_nav:
img = crawl(item2, '//*[@id="hgallery"]/img')
src = []
for i in img:
src.append(i.attrib['src'])
img_src.append(src)
img_src = list(set(flatten(img_src)))
div_a = div_arr(img_src, n)
title = get_title(item1)
# path = os.path.abspath('.')
folder_path = path + '/' + str(title) + '/'
if os.path.exists(folder_path) == False: # 判断文件夹是否已经存在
os.makedirs(folder_path) # 创建文件夹
os.chdir(folder_path) # 切换文件夹
pool = Pool(processes=n)
# datas = list()
# datas.append(div_a[i])
datas = (data for data in div_a)
pool.map(download_img, datas)
pool.close()
pool.join()
# process = []
# for i in range(n):
# arr = []
# arr.append(div_a[i])
# p = multiprocessing.Process(target=download_img,args=arr)
# p.start()
# process.append(p)
# for p in process:
# p.join()
|
weather.py
|
#!/usr/bin/env python
from lib.forecast import DarkskyWeather
from lib.view import LCDView
from time import sleep
import threading
apikey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
weather = DarkskyWeather(apikey, 71.0281183, -8.1249335, units = 'auto', lang = 'en')
CONDITION_REFRESH_INTERVAL = 30
FORECAST_REFRESH_INTERVAL = 600
def condition(lcd, mutex):
lastCond = None
while True:
cond = weather.condition
if cond != lastCond:
mutex.acquire()
lcd.condition(cond)
lastCond = cond
mutex.release()
sleep(CONDITION_REFRESH_INTERVAL)
def forecast(lcd, mutex):
lastFcst = None
while True:
fcst = weather.forecast
if fcst != lastFcst:
mutex.acquire()
lcd.forecast(fcst)
lastFcst = fcst
mutex.release()
sleep(FORECAST_REFRESH_INTERVAL)
lcd = LCDView()
lcd.initScreen()
threads = []
mutex = threading.Lock()
threads.append(threading.Thread(target = condition, args = [lcd, mutex]))
threads.append(threading.Thread(target = forecast, args = [lcd, mutex]))
for thrd in threads:
thrd.start()
|
debug.py
|
import sys
import time
import threading
class Debug(object):
def __init__(self, arg):
super(Debug, self).__init__()
self.debug_verbose = arg
self.stop_spinner = False
def log(self, text, log=False, pre=True, new=False, end='\n'):
if log and pre:
if new: print(f"\n\033[1;34m[i]\033[0m {text}", end='\n')
if not new: print(f"\033[1;34m[i]\033[0m {text}", end='\n')
elif log and not pre:
print(f"{text}")
elif self.debug_verbose:
print(f"[DEBUG] {text}")
def good(self, text):
print("\033[1;32m[+]\033[0m", text)
def error(self, text):
print("\033[1;31m[!]\033[0m", text)
def do_spinner(self, text):
spin = ["|", "/", "-", "\\"]
self.stop_spinner = False
while self.stop_spinner == False:
for s in spin:
sys.stdout.write(f"\r\033[1;34m[i]\033[0m {text} {s} \r")
time.sleep(0.1)
sys.stdout.write("\r")
def spinner(self, text):
spin_thread = threading.Thread(target=self.do_spinner, args=(text,))
spin_thread.daemon = False
spin_thread.start()
|
main.py
|
import time
import threading
import mido
import sys
import MRQ1
#import duplexPort
# globals
MIDI_PORT = False
MIDI_note_mapping = [None] * 127
MIDI_note_mapping[91] = [MRQ1.droneSnare,16]
MIDI_note_mapping[93] = [MRQ1.droneBongo,16]
MIDI_note_mapping[95] = [MRQ1.droneBass, 16]
MIDI_note_mapping[96] = [MRQ1.droneBrush,16]
MIDI_note_mapping[98] = [MRQ1.droneBlock,16]
MIDI_note_mapping[100] = [MRQ1.triggerSnare,None]
MIDI_note_mapping[101] = [MRQ1.triggerBongo,None]
MIDI_note_mapping[105] = [MRQ1.triggerBlock,None]
MIDI_note_mapping[107] = [MRQ1.triggerBass ,None]
MIDI_note_mapping[103] = [MRQ1.triggerBrush,None]
MIDI_note_mapping[119] = [MRQ1.toggleExternalClock,None]
MIDI_note_mapping[120] = [MRQ1.togglePower,None]
MIDI_CC_mapping = [None] * 127
MIDI_CC_mapping[74] = [MRQ1.setClockOscillator,0]
MIDI_CC_mapping[71] = [MRQ1.setClockOscillator,1]
MIDI_CC_mapping[91] = [MRQ1.setClockOscillator,2]
MIDI_CC_mapping[93] = [MRQ1.setVolume,3]
MIDI_CC_mapping[73] = [MRQ1.setBalance,4]
# init simurgh
"""
# init duplex port
def TestCallback():
while True:
time.sleep(1)
#duplexPort.init(testcallback)
testcallback = threading.Thread(target=TestCallback)
testcallback.start()
duplexPort.init(testcallback)
"""
# init MIDI
def mido_init():
midiInputs_l = mido.get_output_names()
print ">> MIDI Inputs", midiInputs_l
if len(midiInputs_l) < 2:
print "MIDI inputs not found. Check USB connection."
sys.exit(0)
else:
global MIDI_PORT
#MIDI_PORT = mido.open_output(midiOutputs_l[0])
MIDI_PORT = mido.open_input(midiInputs_l[1],callback=mapMIDI)
#MIDI_PORT = mido.open_input(midiInputs_l[0],callback=mapMIDI)
#MIDI_PORT.callback = mapMIDI
print MIDI_PORT
# MIDI mappings
def mapMIDI(msg):
print msg
if msg.type == "note_on":
mapping_l = MIDI_note_mapping[msg.note]
if mapping_l:
mapping_l[0](msg, mapping_l[1])
#fpgaParams = mapping_l[0](msg, mapping_l[1])
#print "fpgaParams",fpgaParams
#duplexPort.send(fpgaParams[0],fpgaParams[1])
if msg.type == "control_change":
mapping_l = MIDI_CC_mapping[msg.control]
if mapping_l:
mapping_l[0](msg, mapping_l[1])
#fpgaParams = mapping_l[0](msg, mapping_l[1])
#print "fpgaParams",fpgaParams
#duplexPort.send(fpgaParams[0],fpgaParams[1])
if msg.type == "note_off" and msg.note == 103:
print "asdf"
mapping_l = MIDI_note_mapping[msg.note]
print mapping_l
if mapping_l:
mapping_l[0](msg, mapping_l[1])
# signal functions
mido_init()
|
__init__.py
|
import time
import datetime
import threading
from .hardware import Sensor, GPIOSwitch, GPIOState
from flask import Flask, request
from flask_restful import Api, Resource, abort
app = Flask(__name__)
app.secret_key = 'x6TbAJ5QLWPtDtElwDpZu64XjvcrVV_w'
app.config['DEBUG'] = True
class HardwareState(object):
def __init__(self):
self.sensors = {
'wort': Sensor('28-00000542d319'),
'manual': Sensor('28-000009b7f883'),
}
self.switches = {
'stirrer': GPIOSwitch(5),
}
self.states = {
'valve': GPIOState(6),
'stirrer': self.switches['stirrer'],
}
self.confirm = False
self.thread = threading.Thread(target=self.run)
self.thread.start()
@property
def wort_sensor(self):
return self.sensors['wort']
@property
def stirrer(self):
return self.switches['stirrer']
def run(self):
while True:
for sensor in self.sensors.values():
sensor.read_temperature()
time.sleep(1)
class Stage(object):
def __init__(self, description):
self.started = None
self.finished = None
self.description = description
def enter(self):
self.started = datetime.datetime.now()
print("{} enter at {}".format(self, self.started))
def exit(self):
self.finished = datetime.datetime.now()
print("{} exit at {}".format(self, self.finished))
def complete(self, state):
raise NotImplementedError
def to_dict(self):
return dict(started=self.started.isoformat() if self.started else None,
finished=self.finished.isoformat() if self.finished else None,
name=self.description)
class Setup(Stage):
def __init__(self, description):
super(Setup, self).__init__(description)
def complete(self, state):
state.stirrer.switch(False)
return True
class Confirmation(Stage):
def __init__(self, description):
super(Confirmation, self).__init__(description)
def complete(self, state):
r = state.confirm
state.confirm = False
return r
def to_dict(self):
d = super(Confirmation, self).to_dict()
d['needsInteraction'] = True
return d
def __repr__(self):
return '<Stage:Confirm>'
class Wait(Stage):
def __init__(self, duration, description):
super(Wait, self).__init__(description)
self.duration = duration
def complete(self, state):
now = datetime.datetime.now()
return now >= self.started + self.duration
def __repr__(self):
return '<Stage:Wait({})>'.format(self.duration)
class TemperatureReached(Stage):
def __init__(self, temperature, description):
super(TemperatureReached, self).__init__(description)
self.temperature = temperature
self.recorded = {}
def complete(self, state):
now = datetime.datetime.now()
self.recorded[now.isoformat()] = state.wort_sensor.temperature
reached = state.wort_sensor.temperature >= self.temperature
if not reached:
state.stirrer.switch(True)
return reached
def __repr__(self):
return '<Stage:Reached({} degC)>'.format(self.temperature)
def to_dict(self):
d = super(TemperatureReached, self).to_dict()
d['temps'] = self.recorded
return d
class Brew(object):
def __init__(self, name, stages, state):
self.name = name
self.stages = stages
self.state = state
self.thread = threading.Thread(target=self.run)
self.thread.start()
self.current_stage = 0
def run(self):
for i, stage in enumerate(self.stages):
stage.enter()
self.current_stage = i
while not stage.complete(self.state):
time.sleep(1)
stage.exit()
brews = []
state = HardwareState()
class BrewList(Resource):
def get(self):
return dict(brews=[i for i,_ in enumerate(brews)])
class BrewControl(Resource):
def post(self):
data = request.get_json()
stages = [
Setup("Initialize system"),
Confirmation("Waiting for user input"),
Wait(datetime.timedelta(seconds=5), "Waiting 5 seconds"),
TemperatureReached(25.0, "Waiting for temperature to reach 25 C"),
]
brew_id = len(brews)
brews.append(Brew(data['name'], stages, state))
return dict(id=brew_id)
class BrewInteraction(Resource):
def put(self, brew_id):
if brew_id >= len(brews):
abort(404, message="Brew does not exist")
brew = brews[brew_id]
brew.state.confirm = True
return {}
def get(self, brew_id):
if brew_id >= len(brews):
abort(404, message="Brew does not exist")
brew = brews[brew_id]
stages = [s.to_dict() for s in brew.stages]
return dict(stages=stages)
class Sensors(Resource):
def get(self):
temps = {k: v.temperature for k, v in state.sensors.items()}
states = {k: v.state for k, v in state.states.items()}
return dict(temps=temps, states=states)
class Switch(Resource):
def put(self, name):
if name not in state.switches:
abort(404, message="Switch not found")
data = request.get_json()
if 'on' not in data:
abort(400, message="Payload wrong")
state.switches[name].switch(data['on'])
api = Api(app)
api.add_resource(Sensors, '/control/sensors')
api.add_resource(Switch, '/control/switch/<string:name>')
api.add_resource(BrewList, '/control/brews')
api.add_resource(BrewControl, '/control/brew')
api.add_resource(BrewInteraction, '/control/brew/<int:brew_id>')
|
worker.py
|
import asyncio
import inspect
import threading
import time
import traceback
from functools import partial
from queue import Queue
from threading import Thread
from typing import Callable
from .yqueue import Queuey, from_coroutine
class Worker:
def __init__(self, n: int, queue_maxsize=0, ignore_exceptions=()):
self.threads = [Thread(target=self._w, daemon=True) for _ in range(n)]
self.q = Queuey(queue_maxsize)
self.ignore_exceptions = ignore_exceptions
for t in self.threads:
t.start()
def push_work(self, f: Callable, *args, **kwargs):
self.q.put((f, args, kwargs))
def _w(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
threadLocal = threading.local()
threadLocal.wait_futures = []
threadLocal.async_tasks = []
threadLocal.sync_tasks = []
def handle_item(f_tuple1):
f1, args1, kwargs1 = f_tuple1
if inspect.iscoroutinefunction(f1):
task1 = f1(*args1, **kwargs1)
threadLocal.async_tasks.append(task1)
# loop.run_until_complete(asyncio.sleep(0))
# print("hello world")
else:
task1 = partial(f1, *args1, **kwargs1)
threadLocal.sync_tasks.append(task1)
while True:
if threadLocal.wait_futures:
for fut in threadLocal.wait_futures:
f_tuple1 = fut.result()
handle_item(f_tuple1)
# first wait last step waiting future
threadLocal.wait_futures = []
# TODO make 5 as args
for i in range(5):
f_tuple1, fut1 = self.q.get_noblock()
if fut1:
threadLocal.wait_futures.append(fut1)
else:
handle_item(f_tuple1)
try:
# the _w is sync func
# if from_coroutine():
task_count = len(threadLocal.async_tasks) + len(threadLocal.sync_tasks)
if threadLocal.async_tasks:
t = asyncio.gather(*threadLocal.async_tasks)
loop.run_until_complete(t)
threadLocal.async_tasks = []
if threadLocal.sync_tasks:
for task in threadLocal.sync_tasks:
task()
threadLocal.sync_tasks = []
except self.ignore_exceptions as e:
print(e, '...........abort')
except Exception:
print('...........error')
print(traceback.format_exc())
print('...........error end')
import _thread
_thread.interrupt_main()
finally:
for i in range(task_count):
self.q.task_done()
def join(self):
self.q.join()
def sleep_print(sleep_time):
# 只能说这个地方用 异步加速了。
time.sleep(sleep_time)
print("----: {} {}".format(threading.get_ident(), sleep_time))
return
async def async_sleep_print(sleep_time):
await asyncio.sleep(sleep_time)
print("----: {} {}".format(threading.get_ident(), sleep_time))
return
if __name__ == "__main__":
worker = Worker(16, queue_maxsize=4)
for i in range(100):
worker.push_work(partial(async_sleep_print, i % 10))
worker.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.