source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
plant_view.py
|
from remi.gui import *
import threading, random, time
class Container( Container ): #cheak:Maybe change class name to name of root container
def __init__(self, AppInst=None, *args, **kwargs):
super().__init__(*args, **kwargs) #cheak:remove Comments
self.AppInst = AppInst #cheak:remove Comments
self.constructUI() #cheak:instead we just call it
self.userInit(*args, **kwargs) #more. Register events. Add custom widgets. Add css classes etc.
# pass kwargs to user init in case user needs it.
def constructUI(self):
self.attr_editor_newclass = False
self.css_height = "600px"
self.css_left = "px"
self.css_margin = "0px"
self.css_position = "absolute"
self.css_top = "px"
self.css_width = "1024px"
self.variable_name = "container0"
image0 = Image()
image0.attr_editor_newclass = False
image0.attr_src = "/static:images/plant_view/visu_test.jpg"
image0.css_height = "500px"
image0.css_left = "75.0px"
image0.css_margin = "0px"
image0.css_position = "absolute"
image0.css_top = "45.0px"
image0.css_width = "px"
image0.variable_name = "image0"
self.append(image0,'image0')
vl_temp = Label()
vl_temp.attr_editor_newclass = False
vl_temp.css_align_content = "stretch"
vl_temp.css_background_color = "rgb(0,200,0)"
vl_temp.css_border_color = "rgb(0,0,0)"
vl_temp.css_border_style = "solid"
vl_temp.css_border_width = "1px"
vl_temp.css_direction = "none"
vl_temp.css_font_family = ""
vl_temp.css_font_size = "16px"
vl_temp.css_font_weight = "bold"
vl_temp.css_height = "30px"
vl_temp.css_justify_content = "flex-start"
vl_temp.css_left = "445.0px"
vl_temp.css_margin = "0px"
vl_temp.css_position = "absolute"
vl_temp.css_text_align = "center"
vl_temp.css_top = "540.0px"
vl_temp.css_width = "50px"
vl_temp.text = "6°C"
vl_temp.variable_name = "vl_temp"
self.append(vl_temp,'vl_temp')
rl_temp = Label()
rl_temp.attr_editor_newclass = False
rl_temp.css_background_color = "rgb(0,200,0)"
rl_temp.css_border_style = "solid"
rl_temp.css_border_width = "1px"
rl_temp.css_font_size = "16px"
rl_temp.css_font_weight = "bold"
rl_temp.css_height = "30px"
rl_temp.css_left = "445.0px"
rl_temp.css_margin = "0px"
rl_temp.css_position = "absolute"
rl_temp.css_text_align = "center"
rl_temp.css_top = "375.0px"
rl_temp.css_width = "50px"
rl_temp.text = "12°C"
rl_temp.variable_name = "r_temp"
self.append(rl_temp,'rl_temp')
p1_status = Label()
p1_status.attr_editor_newclass = False
p1_status.css_background_color = "rgb(0,200,0)"
p1_status.css_border_style = "solid"
p1_status.css_border_width = "1px"
p1_status.css_font_size = "14px"
p1_status.css_font_weight = "bold"
p1_status.css_height = "20px"
p1_status.css_left = "635.0px"
p1_status.css_margin = "0px"
p1_status.css_position = "absolute"
p1_status.css_text_align = "center"
p1_status.css_top = "495.0px"
p1_status.css_width = "80px"
p1_status.text = "AN (Auto)"
p1_status.variable_name = "p1_status"
self.append(p1_status,'p1_status')
p2_status = Label()
p2_status.attr_editor_newclass = False
p2_status.css_background_color = "rgb(200,0,0)"
p2_status.css_border_style = "solid"
p2_status.css_border_width = "1px"
p2_status.css_font_size = "14px"
p2_status.css_font_weight = "bold"
p2_status.css_height = "20px"
p2_status.css_left = "775.0px"
p2_status.css_margin = "0px"
p2_status.css_position = "absolute"
p2_status.css_text_align = "center"
p2_status.css_top = "495.0px"
p2_status.css_width = "80px"
p2_status.text = "AUS (Auto)"
p2_status.variable_name = "p2_status"
self.append(p2_status,'p2_status')
km2_status = Label()
km2_status.attr_editor_newclass = False
km2_status.css_background_color = "rgb(0,200,0)"
km2_status.css_border_style = "solid"
km2_status.css_border_width = "1px"
km2_status.css_font_size = "16px"
km2_status.css_font_weight = "bold"
km2_status.css_height = "30px"
km2_status.css_left = "350.0px"
km2_status.css_margin = "0px"
km2_status.css_position = "absolute"
km2_status.css_text_align = "center"
km2_status.css_top = "15.0px"
km2_status.css_width = "150px"
km2_status.text = "AN (Auto)"
km2_status.variable_name = "km2_status"
self.append(km2_status,'km2_status')
km1_status = Label()
km1_status.attr_editor_newclass = False
km1_status.css_background_color = "rgb(200,0,0)"
km1_status.css_border_style = "solid"
km1_status.css_border_width = "1px"
km1_status.css_font_size = "16px"
km1_status.css_font_weight = "bold"
km1_status.css_height = "30px"
km1_status.css_left = "125.0px"
km1_status.css_margin = "0px"
km1_status.css_position = "absolute"
km1_status.css_text_align = "center"
km1_status.css_top = "15.0px"
km1_status.css_width = "150px"
km1_status.text = "AUS (Auto)"
km1_status.variable_name = "km1_status"
self.append(km1_status,'km1_status')
km2_setup = Button()
km2_setup.attr_editor_newclass = False
km2_setup.css_background_color = "rgb(0,0,0)"
km2_setup.css_height = "30px"
km2_setup.css_left = "400.0px"
km2_setup.css_margin = "0px"
km2_setup.css_position = "absolute"
km2_setup.css_top = "205.0px"
km2_setup.css_width = "80px"
km2_setup.text = "Einstellen"
km2_setup.variable_name = "km2_setup"
self.append(km2_setup,'km2_setup')
km1_setup = Button()
km1_setup.attr_editor_newclass = False
km1_setup.css_background_color = "rgb(0,0,0)"
km1_setup.css_height = "30px"
km1_setup.css_left = "175.0px"
km1_setup.css_margin = "0px"
km1_setup.css_position = "absolute"
km1_setup.css_top = "205.0px"
km1_setup.css_width = "80px"
km1_setup.text = "Einstellen"
km1_setup.variable_name = "km1_setup"
self.append(km1_setup,'km1_setup')
p1_setup = Button()
p1_setup.attr_editor_newclass = False
p1_setup.css_background_color = "rgb(0,0,0)"
p1_setup.css_height = "30px"
p1_setup.css_left = "660.0px"
p1_setup.css_margin = "0px"
p1_setup.css_position = "absolute"
p1_setup.css_top = "550.0px"
p1_setup.css_width = "80px"
p1_setup.text = "Einstellen"
p1_setup.variable_name = "p1_setup"
self.append(p1_setup,'p1_setup')
p2_setup = Button()
p2_setup.attr_editor_newclass = False
p2_setup.css_background_color = "rgb(0,0,0)"
p2_setup.css_height = "30px"
p2_setup.css_left = "755.0px"
p2_setup.css_margin = "0px"
p2_setup.css_position = "absolute"
p2_setup.css_top = "550.0px"
p2_setup.css_width = "80px"
p2_setup.text = "Einstellen"
p2_setup.variable_name = "p2_setup"
self.append(p2_setup,'p2_setup')
def userInit(self, *args, **kwargs):
self.shownInMenu = 'My Example Menu'
self.menuTitle = 'Plant View'
self.children['km1_setup'].onclick.do(self.handleButtons)
self.children['km2_setup'].onclick.do(self.handleButtons)
self.children['p1_setup'].onclick.do(self.handleButtons)
self.children['p2_setup'].onclick.do(self.handleButtons)
self.data = {} # Dict for Datapoints
self.data_aquisition_thread_started = False
def updateView(self):
if self.data_aquisition_thread_started == False and self.AppInst.connection_established == True:
# data Aquisition in own thread
t = threading.Thread(target=self.dummy_values)
t.setDaemon(True)
t.setName(str(self.AppInst.session) + '_plantview_thread')
t.start()
return
# Update the UI controls with the new values if UI is idle
# Update Temperatures
self.children['vl_temp'].set_text(f"{self.data['vl_temp']:.1f}°C")
self.children['rl_temp'].set_text(f"{self.data['rl_temp']:.1f}°C")
# Update chiller status
self.children['km1_status'].set_text(f"{self.data['km1_status']}")
if self.data['km1_run'] == 'AUS':
self.children['km1_status'].css_background_color = 'rgb(200,0,0)'
else:
self.children['km1_status'].css_background_color = 'rgb(0,200,0)'
self.children['km2_status'].set_text(f"{self.data['km2_status']}")
if self.data['km2_run'] == 'AUS':
self.children['km2_status'].css_background_color = 'rgb(200,0,0)'
else:
self.children['km2_status'].css_background_color = 'rgb(0,200,0)'
def handleButtons(self, emittingButton):
if emittingButton == self.children['km1_setup']:
self.AppInst.showDialog(emittingButton, 'setup_km', title='Setup KM 1', km='km1')
if emittingButton == self.children['km2_setup']:
self.AppInst.showDialog(emittingButton, 'setup_km', title='Setup KM 2', km='km2')
def dummy_values(self):
# Generate Random Values like data aquisition from Bus System
# This is absolutely not needed for the UI
self.data_aquisition_thread_started = True
self.data['km1_run'] = 'AUS'
self.data['km1_setting'] = 'AUTO'
self.data['km1_setpoint'] = 6.0
self.data['km2_run'] = 'AN'
self.data['km2_setting'] = 'AUTO'
self.data['km2_setpoint'] = 6.0
# check if the Instance that opened the view is still alive. Stop the thread if its dead.
while self.AppInst.connection_established == True:
self.data['vl_temp'] = random.uniform(4.5, 8.5)
self.data['rl_temp'] = random.uniform(8.0, 12.0)
if self.data['km1_setting'] == 'AN MAN':
self.data['km1_run'] = 'AN'
if self.data['km1_setting'] == 'AUS MAN':
self.data['km1_run'] = 'AUS'
if self.data['km2_setting'] == 'AN MAN':
self.data['km2_run'] = 'AN'
if self.data['km2_setting'] == 'AUS MAN':
self.data['km2_run'] = 'AUS'
self.data['km1_status'] = self.data['km1_run'] + ' (' + self.data['km1_setting'] + ')'
self.data['km2_status'] = self.data['km2_run'] + ' (' + self.data['km2_setting'] + ')'
time.sleep(1.0)
self.data_aquisition_thread_started = False # Set the flag back for next visit
|
MangatownClass.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# Import required modules
import requests
from bs4 import BeautifulSoup
import re
import threading
class Mangatown:
def __init__(self,):
self.lock = threading.Lock()
self.Mangalink = ''
self.manga_name = 'No Name'
def getAllChapters(self,):
try:
page = requests.get(self.Mangalink)
s = BeautifulSoup(page.content,'html.parser')
chaptersAll = s.find("ul","chapter_list")
chaptersLinks = chaptersAll.find_all("li")
arr = []
for chap in chaptersLinks:
arr.append('https://www.mangatown.com'+chap.a.get('href'))
return arr
except Exception as e: print('Mangatown getAllChapters:'+str(e))
def __conseguirCantidadPaginas(self,page):
num=re.search(r"total\_pages\s\=\s(\d+)",page.text)
return int(num.group(1))
def __subprocess(self,link,num,arr):
page = requests.get(link+str(num+1)+".html")
s = BeautifulSoup(page.content,'html.parser')
img = s.find(id="image")
with self.lock:
arr.append(img["src"])
def __conseguirimagen(self,link,paginas):
arr = []
jobs = []
for num in range(paginas):
x = threading.Thread(target=self.__subprocess, args=(link,num,arr,))
jobs.append(x)
x.start()
for x in jobs:
x.join()
arr.sort()
return(arr)
def getChapter(self,numCap):
try:
"""
if numCap < 10:
cap = "00"+str(numCap)
elif numCap < 100:
cap = "0"+str(numCap)
else:
cap = str(numCap)
"""
Fulllink = self.Mangalink + numCap+'/'
page = requests.get(Fulllink)
paginas = self.__conseguirCantidadPaginas(page)
links = self.__conseguirimagen(Fulllink,paginas)
return links
except Exception as e: print("mangatown getrequestChap: "+str(e))
def parselink(self,link):
try:
sp = link.split('/')
if len(sp) < 7:
link = "https://www.mangatown.com/"+"/".join([sp[3],sp[4],sp[5]])
print("este es el linl que se guarda "+link)
self.Mangalink = link
nombremanga = sp[-2]
self.manga_name = nombremanga
return [nombremanga]
elif len(sp) < 8:
nombremanga = sp[-3]
cap = sp[-2]
#self.manga_name = nombremanga
return [nombremanga,cap]
except Exception as e: print('mangatown parselink'+str(e))
def recontruccion(self,name):
link = 'https://www.mangatown.com/manga/'+name+'/'
return link
#getrequestChap('https://www.mangatown.com/manga/','wortenia_senki',33)
|
server.py
|
from multiprocessing import Process
from fastapi import FastAPI, WebSocket
from listenerImplementation import rabbitMQ_Implementation as Listener
import data_handler as d_handler
import query_handler as q_handler
from models import *
from constants import *
# ----------- App initialisation -------------------------------------
app = FastAPI()
# on the other side, rabbitmq is mainly here to handle the system's internal data
listener = Process(target=Listener, args=(RMQ_EXCHG,))
# # then I'll tell you all about it when I see you again..
listener.start()
# ------------- Routes -----------------------------------------------
@app.get("/")
async def root():
return {"message": "Welcome to phoros' services"}
@app.get("/storage")
def storage():
return {"message": "Hello World"}
@app.post("/storage/query/data")
async def storage_submit_query(query: Query):
return await q_handler.handle_query(query.api,query.content,query.roadmap)
# using websockets for better real time responses
# @app.websocket("/ws")
@app.post("/storage/query/search")
def submit_search_query(query: Query): #
# await websocket.accept()
# while True:
# try:
# Wait for any message from the client
# data = await websocket.receive_text()
# Send message to the client
data = q_handler.handle_search(query.api,query.content)
return {"content":data}
# resp = {'content': data}#await q_handler.handle_search(data,data)
# await websocket.send_json(resp)
# except Exception as e:
# print('error:', e)
# break
@app.post("/storage/insert/graph")
async def storage_insert_graph(query: Query):
return await d_handler.handle_data(query.api,query.content,query.roadmap)
|
test_nachi_robot.py
|
from mock import patch
import socket
import rospy
import sys
from struct import pack, unpack
import time
import math
from threading import Thread, currentThread
from sensor_msgs.msg import JointState
import src.drivers.nachi_robot
from std_srvs.srv import EmptyRequest
from rosweld_drivers.msg import Move, RobotState
from rosweld_drivers.srv import MoveAlongRequest, MoveBetweenRequest, SetSpeedRequest
from ..drivers.misc.udp import UdpConnector
from mocks import *
from sensor_msgs.msg import JointState
import src.drivers.misc.status
sendPlayCallParams = None
def mock_send_play(start = 1, end = -1, d = 1, poses = None):
"""Mock the send play command
Keyword Arguments:
start {int} -- start index of the step (default: {1})
end {int} -- end index of the step (default: {-1})
d {int} -- direction: 1 - forward, -1 - backward (default: {1})
poses {Move[]} -- Moves to follow (default: {None})
"""
global sendPlayCallParams
sendPlayCallParams = { 'start': start, 'end': end, 'direction': d, 'poses': poses }
def mock_appendToQueue_setspeed(self, msg, label, handler = None):
"""Mock UDP's appendToQueue and check for setting the speed
Arguments:
msg {bytearray} -- the message object as a bytearray
label {string} -- label of the message
Keyword Arguments:
handler {function} -- callback function (default: {None})
"""
command = unpack('>i',msg[0:4])[0]
assert label == "send_set_speed"
assert command == src.drivers.nachi_robot.Commands.setSpeed
def mock_appendToQueue_abort(self, msg, label, handler = None):
"""Mock UDP's appendToQueue and check for calling the abort
Arguments:
msg {bytearray} -- the message object as a bytearray
label {string} -- label of the message
Keyword Arguments:
handler {function} -- callback function (default: {None})
"""
command = unpack('>i',msg[0:4])[0]
assert label == "send_abort"
assert command == src.drivers.nachi_robot.Commands.abort
def mock_appendToQueue_sendupdate(self, msg, label, handler = None):
"""Mock UDP's appendToQueue and check for requesting update
Arguments:
msg {bytearray} -- the message object as a bytearray
label {string} -- label of the message
Keyword Arguments:
handler {function} -- callback function (default: {None})
"""
command = unpack('>i',msg[0:4])[0]
check_command(src.drivers.nachi_robot.Commands, command)
assert label == "send_update"
assert command == src.drivers.nachi_robot.Commands.update
class TestNachiRobot(object):
"""Unit tests for the NACHI Robot driver
"""
def get_moves(self, count):
"""Generate moves list
Arguments:
count {int} -- Amount of the generated moves
Returns:
MoveAlongRequest -- Moves list
"""
moves = []
req = MoveAlongRequest()
for i in range(0, count):
moves.append(Move())
req.moves = moves
return req
def test_store_poses(self):
"""Test the store poses functionality
Requirements:
- The driver has to store all of the given moves.
- Only stores the last moves list, drops the old one.
- Sends the moves in batch to the robot with UDP
"""
# generate 500 moves
req = self.get_moves(500)
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
src.drivers.nachi_robot.udp["update"] = UdpConnector("localhost", 8000)
with patch.object(UdpConnector, 'appendToQueue') as u:
# call store poses
src.drivers.nachi_robot.store_poses(req)
# the driver has to return with the same amount of moves
assert len(src.drivers.nachi_robot.getPositions()) == len(req.moves)
# the amount of the udp calls is the smallest integer which is greater than
# the length of the move list / batch size
assert u.call_count == math.ceil( len(req.moves) / src.drivers.nachi_robot.batchSize )
# call store poses again
src.drivers.nachi_robot.store_poses(req)
# the old poses has to be removed
assert len(src.drivers.nachi_robot.getPositions()) != 2 * len(req.moves)
#stop udp threads
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
def test_move_between(self):
"""Test move between function
Requirements:
- Work with stored poses
- The robot side driver's indexing is +1
- The direction is based on the start and end index, 1 - forward, -1 backward
"""
global sendPlayCallParams
# get moves
req = self.get_moves(50)
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
src.drivers.nachi_robot.udp["update"] = UdpConnector("localhost", 8000)
with patch('src.drivers.nachi_robot.sendPlay', side_effect = mock_send_play) as m, \
patch.object(UdpConnector, 'appendToQueue') as u:
src.drivers.nachi_robot.allPositions = []
# calling the move_between without stored poses will return without performing the action
ret = src.drivers.nachi_robot.move_between(MoveBetweenRequest( start = 2, end = 31 ))
assert len(src.drivers.nachi_robot.allPositions) == 0 and ret.result != "OK"
# store poses
src.drivers.nachi_robot.store_poses(req)
assert u.call_count == len(req.moves) / src.drivers.nachi_robot.batchSize
# call move between the 2nd and 3rd step
src.drivers.nachi_robot.move_between(MoveBetweenRequest( start = 2, end = 3 ))
assert sendPlayCallParams['start'] == 3
assert sendPlayCallParams['end'] == 4
assert sendPlayCallParams['direction'] == 1
assert sendPlayCallParams['poses'] == None
assert m.called
# call move between the 3nd and 1rd step (backward)
src.drivers.nachi_robot.move_between(MoveBetweenRequest( start = 3, end = 1 ))
assert sendPlayCallParams['start'] == 4
assert sendPlayCallParams['end'] == 2
assert sendPlayCallParams['direction'] == -1
assert sendPlayCallParams['poses'] == None
assert m.call_count == 2
# stop udp threads
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
def test_move_along(self):
"""Test to move along a given path
Requirements:
- Uploads the moves to the robot
- Go through the path from 0->len(allPositions)
"""
global sendPlayCallParams
# generate moves
req = self.get_moves(5)
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
src.drivers.nachi_robot.udp["update"] = UdpConnector("localhost", 8000)
with patch('src.drivers.nachi_robot.sendPlay', side_effect = mock_send_play) as m, \
patch.object(UdpConnector, 'appendToQueue') as u:
# upload the moves and start the movement
src.drivers.nachi_robot.move_along(req)
assert m.called
assert m.call_count == 1
# start from the 1st point
assert sendPlayCallParams['start'] == 1
# go 'til the end
assert sendPlayCallParams['end'] == -1
# go forward on the path
assert sendPlayCallParams['direction'] == 1
assert sendPlayCallParams['poses'] == None
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
def test_move_pose(self):
"""Go to a specific move/pose
Requirements:
- go to the robot with 1 move
"""
global sendPlayCallParams
# generate a move
req = self.get_moves(1)
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
src.drivers.nachi_robot.udp["update"] = UdpConnector("localhost", 8000)
with patch.object(UdpConnector, 'appendToQueue') as u:
# command the driver to move the robot
src.drivers.nachi_robot.move_pose(req)
# the command is forwarded through UDP
assert u.call_count == 1
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
def test_set_speed(self):
"""Set the speed on the robot
Requirements:
- the set speed command is forwarded to the robot
"""
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind, \
patch.object(src.drivers.nachi_robot, 'sendSetSpeed') as setSpeed, \
patch.object(UdpConnector, 'appendToQueue', mock_appendToQueue_setspeed):
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
src.drivers.nachi_robot.udp["update"] = UdpConnector("localhost", 8000)
# create the command
req = SetSpeedRequest()
req.value = 5
# forward to the robot
src.drivers.nachi_robot.set_speed(req)
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
# the command is appended to the queue
assert setSpeed.called
assert setSpeed.call_count == 1
def test_abort(self):
"""Test abort command
Requirements: the command is forwarded to the robot
"""
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind, \
patch.object(src.drivers.nachi_robot, 'sendAbort') as abort, \
patch.object(UdpConnector, 'appendToQueue', mock_appendToQueue_abort):
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
src.drivers.nachi_robot.udp["update"] = UdpConnector("localhost", 8000)
# create command
req = EmptyRequest()
# send to the driver
src.drivers.nachi_robot.abort(req)
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
# the command is in the queue
assert abort.called
assert abort.call_count == 1
def test_init(self):
"""Test the driver initalization
Requirements:
- all of the services are advertised
- all of the topics are published
- the UDP connection is initalized
"""
with patch.object(rospy, "wait_for_service", return_value=True), \
patch.object(rospy, "get_param", mock_get_param), \
patch.object(rospy, "init_node", return_value=None), \
patch.object(rospy, 'spin', return_value=None), \
patch.object(rospy.Service, '__init__', return_value=None) as mock_service_init, \
patch.object(rospy.Publisher, '__init__', return_value=None) as mock_publisher_init, \
patch.object(Thread, 'start', return_value=None) as mock_start_thread, \
patch.object(Thread, 'join', return_value=None), \
patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
src.drivers.nachi_robot.init()
for sn in ["move_along", "abort", "store_poses", "move_pose", "set_speed", "move_between"]:
# both required services are advertised
assert len([call for call in mock_service_init.mock_calls if call[1][0] == sn]) == 1
# topic is advertised
assert mock_publisher_init.call_count == 2
assert mock_publisher_init.mock_calls[0][1][0] == "robot_state"
assert mock_publisher_init.mock_calls[0][1][1] == RobotState
assert mock_publisher_init.mock_calls[1][1][0] == "robot_controller_joint_state"
assert mock_publisher_init.mock_calls[1][1][1] == JointState
def test_init_fail(self):
with patch.object(rospy, "wait_for_service", return_value=True), \
patch.object(rospy, "get_param", return_value=None), \
patch.object(rospy, "init_node", return_value=None), \
patch.object(rospy, 'spin', return_value=None), \
patch.object(sys, 'exit', return_value=None) as mock_exit, \
patch.object(rospy.Service, '__init__', return_value=None) as mock_service_init, \
patch.object(rospy.Publisher, '__init__', return_value=None) as mock_publisher_init, \
patch.object(Thread, 'start', return_value=None) as mock_start_thread, \
patch.object(Thread, 'join', return_value=None), \
patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
try:
#this will fail, because no ip is set
src.drivers.nachi_robot.init()
except:
assert True
def test_send_play(self):
"""Test send play command
Requirements:
- Command the robot to move
"""
with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_bind:
src.drivers.nachi_robot.udp["command"] = UdpConnector("localhost", 8000)
with patch.object(UdpConnector, 'appendToQueue') as mock_appendToQueue:
src.drivers.nachi_robot.allPositions = self.get_moves(50).moves
src.drivers.nachi_robot.move_between(MoveBetweenRequest( start = 2, end = 3 ))
assert mock_appendToQueue.called
# append the command to the UDP queue
msg = mock_appendToQueue.mock_calls[0][1][0]
# check the udp package for the right params
assert src.drivers.nachi_robot.Commands.playback == unpack('>i',msg[0:4])[0]
assert 3 == unpack('>i',msg[4:8])[0]
assert 4 == unpack('>i',msg[8:12])[0]
assert 1 == unpack('>i',msg[12:16])[0]
src.drivers.nachi_robot.udp["command"].stopConsumeThread()
def test_robot_update(self):
"""Test the robot state update handler
Requirements:
- The robot state is called
- The topic is updating after each robot state update, if the state is different
"""
rospy.Rate = MockRate
src.drivers.nachi_robot.p_robot_state = rospy.Publisher("test1", RobotState)
src.drivers.nachi_robot.p_joint_states = rospy.Publisher("test2", JointState)
with patch.object(rospy.Publisher, 'publish', return_value=None) as mock_publish, \
patch.object(UdpConnector, 'appendToQueue', mock_appendToQueue_sendupdate), \
patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_connect:
src.drivers.nachi_robot.udp['update'] = UdpConnector("localhost", 8000)
update_thread = Thread(target = src.drivers.nachi_robot.robot_state_publisher)
update_thread.do_run = True
update_thread.start()
#run the thread for 4s, then stop, feed in every 0.1s
i = 0.
updates = 0
while i < 4:
# build up a test message
msg = bytearray()
msg.extend(pack('<i', 0)[::-1]) #seq
msg.extend(pack('<i', 0)[::-1]) #cmd
msg.extend(pack('<f', 0)[::-1]) #speed
msg.extend(pack('<f', i)[::-1]) #x
msg.extend(pack('<f', i)[::-1]) #y
msg.extend(pack('<f', 0)[::-1]) #z
msg.extend(pack('<f', 0)[::-1]) #rx
msg.extend(pack('<f', 0)[::-1]) #ry
msg.extend(pack('<f', 0)[::-1]) #rz
msg.extend(pack('<i', 0)[::-1]) #step
msg.extend(pack('<i', 0)[::-1]) #storedPoses
msg.extend(pack('<i', 0)[::-1]) #robotprogstate
msg.extend(pack('<i', 0)[::-1]) #mode
msg.extend(pack('<f', 1 * i)[::-1]) #j1
msg.extend(pack('<f', 2)[::-1]) #j2
msg.extend(pack('<f', 3)[::-1]) #j3
msg.extend(pack('<f', 4)[::-1]) #j4
msg.extend(pack('<f', 5)[::-1]) #j5
msg.extend(pack('<f', 6)[::-1]) #j6
msg.extend(pack('<f', 7)[::-1]) #j7
# call handler
src.drivers.nachi_robot.handleUpdateResponse(msg)
# go to sleep
time.sleep(0.0005)
i = i + 0.05
updates = updates + 1
# stop the thread
update_thread.do_run = False
update_thread.join()
# check if there is no new connection lost error
assert mock_publish.call_count == updates * 2 # joint and robot state publish
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
def test_broken_robot_update(self):
"""Test a broken robot update
Requirements:
- The driver keeps running
"""
rospy.Rate = MockRate
src.drivers.nachi_robot.p_robot_state = rospy.Publisher("test1", RobotState)
src.drivers.nachi_robot.p_joint_states = rospy.Publisher("test2", JointState)
with patch.object(rospy.Publisher, 'publish', return_value=None) as mock_publish, \
patch.object(UdpConnector, 'appendToQueue', mock_appendToQueue_sendupdate), \
patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \
patch.object(socket.socket, 'bind', return_value=True) as mock_connect, \
patch.object(src.drivers.nachi_robot, "status", return_value=None) as mock_status:
src.drivers.nachi_robot.udp['update'] = UdpConnector("localhost", 8000)
update_thread = Thread(target = src.drivers.nachi_robot.robot_state_publisher)
update_thread.do_run = True
update_thread.start()
#run the thread for 4s, then stop, feed in every 0.1s
i = 0.
updates = 0
while i < 4:
# build up a test message
msg = bytearray()
msg.extend(pack('<i', 0)[::-1]) #seq
msg.extend(pack('<i', 0)[::-1]) #cmd
msg.extend(pack('<f', 0)[::-1]) #speed
msg.extend(pack('<f', i)[::-1]) #x
# call handler
src.drivers.nachi_robot.handleUpdateResponse(msg)
# go to sleep
time.sleep(0.0005)
i = i + 0.05
updates = updates + 1
# stop the thread
update_thread.do_run = False
update_thread.join()
# no update, because it failed, but reaches this point
assert mock_publish.call_count == 0
for i in range(updates):
call = mock_status.mock_calls[i]
assert call[1][2] == src.drivers.misc.status.STATE.ERROR
src.drivers.nachi_robot.udp["update"].stopConsumeThread()
|
data_loader.py
|
# Original code from https://github.com/araffin/robotics-rl-srl
# Authors: Antonin Raffin, René Traoré, Ashley Hill
from __future__ import absolute_import, division, print_function
import random
import time
from multiprocessing import Process, Queue
import cv2 # pytype: disable=import-error
import imgaug
import numpy as np
import torchvision.transforms.functional as vision_fn
from imgaug import augmenters as iaa
from imgaug.augmenters import Sometimes
from joblib import Parallel, delayed
from PIL import Image, ImageChops
from six.moves import queue
from ae.autoencoder import preprocess_image, preprocess_input
class CheckFliplrPostProcessor(object):
def __init__(self):
super(CheckFliplrPostProcessor, self).__init__()
self.flipped = False
def __call__(self, images, augmenter, parents):
if "Fliplr" in augmenter.name:
self.flipped = True
return images
def get_image_augmenter():
"""
:return: (iaa.Sequential) Image Augmenter
"""
return iaa.Sequential(
[
Sometimes(0.5, iaa.Fliplr(1)),
# TODO: add shadows, see: https://markku.ai/post/data-augmentation/
# Add shadows (from https://github.com/OsamaMazhar/Random-Shadows-Highlights)
Sometimes(0.3, RandomShadows(1.0)),
# Sometimes(0.3, iaa.MultiplyBrightness((0.8, 1.2))),
Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 2.0))),
Sometimes(0.5, iaa.MotionBlur(k=(3, 11), angle=(0, 360))),
# Sometimes(0.5, iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))),
Sometimes(0.4, iaa.Add((-25, 25), per_channel=0.5)),
# Sometimes(0.5, iaa.Multiply((0.6, 1.4), per_channel=0.5)),
# Sometimes(0.2, iaa.CoarseDropout((0.0, 0.05), size_percent=(0.02, 0.10), per_channel=0.5)),
# 20% of the corresponding size of the height and width
Sometimes(0.3, iaa.Cutout(nb_iterations=(1, 5), size=0.2, squared=False)),
# Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 1.8), per_channel=0.5)),
# Sometimes(0.1, iaa.AdditiveGaussianNoise(scale=10, per_channel=True))
],
random_order=True,
)
# Adapted from https://github.com/OsamaMazhar/Random-Shadows-Highlights
class RandomShadows(iaa.meta.Augmenter):
def __init__(
self,
p=0.5,
high_ratio=(1, 2),
low_ratio=(0.01, 0.5),
left_low_ratio=(0.4, 0.6),
left_high_ratio=(0, 0.2),
right_low_ratio=(0.4, 0.6),
right_high_ratio=(0, 0.2),
seed=None,
name=None,
):
super(RandomShadows, self).__init__(seed=seed, name=name)
self.p = p
self.high_ratio = high_ratio
self.low_ratio = low_ratio
self.left_low_ratio = left_low_ratio
self.left_high_ratio = left_high_ratio
self.right_low_ratio = right_low_ratio
self.right_high_ratio = right_high_ratio
def _augment_batch_(self, batch, random_state, parents, hooks):
for i in range(batch.nb_rows):
if random.uniform(0, 1) < self.p:
batch.images[i] = self.process(
batch.images[i],
self.high_ratio,
self.low_ratio,
self.left_low_ratio,
self.left_high_ratio,
self.right_low_ratio,
self.right_high_ratio,
)
return batch
@staticmethod
def process(
img,
high_ratio,
low_ratio,
left_low_ratio,
left_high_ratio,
right_low_ratio,
right_high_ratio,
):
img = Image.fromarray(img)
w, h = img.size
# h, w, c = img.shape
high_bright_factor = random.uniform(high_ratio[0], high_ratio[1])
low_bright_factor = random.uniform(low_ratio[0], low_ratio[1])
left_low_factor = random.uniform(left_low_ratio[0] * h, left_low_ratio[1] * h)
left_high_factor = random.uniform(left_high_ratio[0] * h, left_high_ratio[1] * h)
right_low_factor = random.uniform(right_low_ratio[0] * h, right_low_ratio[1] * h)
right_high_factor = random.uniform(right_high_ratio[0] * h, right_high_ratio[1] * h)
tl = (0, left_high_factor)
bl = (0, left_high_factor + left_low_factor)
tr = (w, right_high_factor)
br = (w, right_high_factor + right_low_factor)
contour = np.array([tl, tr, br, bl], dtype=np.int32)
mask = np.zeros([h, w, 3], np.uint8)
cv2.fillPoly(mask, [contour], (255, 255, 255))
inverted_mask = cv2.bitwise_not(mask)
# we need to convert this cv2 masks to PIL images
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# we skip the above convertion because our mask is just black and white
mask_pil = Image.fromarray(mask)
inverted_mask_pil = Image.fromarray(inverted_mask)
low_brightness = vision_fn.adjust_brightness(img, low_bright_factor)
low_brightness_masked = ImageChops.multiply(low_brightness, mask_pil)
high_brightness = vision_fn.adjust_brightness(img, high_bright_factor)
high_brightness_masked = ImageChops.multiply(high_brightness, inverted_mask_pil)
return np.array(ImageChops.add(low_brightness_masked, high_brightness_masked))
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return []
class DataLoader(object):
"""
A Custom dataloader to preprocessing images and feed them to the network.
:param minibatchlist: ([np.array]) list of observations indices (grouped per minibatch)
:param images_path: (np.array) Array of path to images
:param n_workers: (int) number of preprocessing worker (load and preprocess each image)
:param infinite_loop: (bool) whether to have an iterator that can be resetted, set to False, it
:param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time
:param is_training: (bool)
:param augment: (bool) Whether to use image augmentation or not
"""
def __init__(
self,
minibatchlist,
images_path,
n_workers=1,
infinite_loop=True,
max_queue_len=4,
is_training=False,
augment=True,
):
super(DataLoader, self).__init__()
self.n_workers = n_workers
self.infinite_loop = infinite_loop
self.n_minibatches = len(minibatchlist)
self.minibatchlist = minibatchlist
self.images_path = images_path
self.shuffle = is_training
self.queue = Queue(max_queue_len)
self.process = None
self.augmenter = None
if augment:
self.augmenter = get_image_augmenter()
self.start_process()
@staticmethod
def create_minibatch_list(n_samples, batch_size):
"""
Create list of minibatches.
:param n_samples: (int)
:param batch_size: (int)
:return: ([np.array])
"""
minibatchlist = []
for i in range(n_samples // batch_size + 1):
start_idx = i * batch_size
end_idx = min(n_samples, (i + 1) * batch_size)
minibatchlist.append(np.arange(start_idx, end_idx))
return minibatchlist
def start_process(self):
"""Start preprocessing process"""
self.process = Process(target=self._run)
# Make it a deamon, so it will be deleted at the same time
# of the main process
self.process.daemon = True
self.process.start()
def _run(self):
start = True
with Parallel(n_jobs=self.n_workers, batch_size="auto", backend="threading") as parallel:
while start or self.infinite_loop:
start = False
if self.shuffle:
indices = np.random.permutation(self.n_minibatches).astype(np.int64)
else:
indices = np.arange(len(self.minibatchlist), dtype=np.int64)
for minibatch_idx in indices:
images = self.images_path[self.minibatchlist[minibatch_idx]]
if self.n_workers <= 1:
batch = [self._make_batch_element(image_path, self.augmenter) for image_path in images]
else:
batch = parallel(
delayed(self._make_batch_element)(image_path, self.augmenter) for image_path in images
)
batch_input = np.concatenate([batch_elem[0] for batch_elem in batch], axis=0)
batch_target = np.concatenate([batch_elem[1] for batch_elem in batch], axis=0)
if self.shuffle:
self.queue.put((minibatch_idx, batch_input, batch_target))
else:
self.queue.put((batch_input, batch_target))
# Free memory
del batch_input, batch_target, batch
self.queue.put(None)
@classmethod
def _make_batch_element(cls, image_path, augmenter=None):
"""
:param image_path: (str) path to an image
:param augmenter: (iaa.Sequential) Image augmenter
:return: (np.ndarray, np.ndarray)
"""
# TODO: use mp4 video directly instead of images
# cf https://stackoverflow.com/questions/33650974/opencv-python-read-specific-frame-using-videocapture
im = cv2.imread(image_path)
if im is None:
raise ValueError("tried to load {}.jpg, but it was not found".format(image_path))
postprocessor = CheckFliplrPostProcessor()
if augmenter is not None:
input_img = augmenter.augment_image(
preprocess_image(im.copy(), normalize=False), hooks=imgaug.HooksImages(postprocessor=postprocessor)
)
# Normalize
input_img = preprocess_input(input_img.astype(np.float32), mode="rl")
input_img = input_img.reshape((1,) + input_img.shape)
if postprocessor.flipped:
target_img = preprocess_image(im, normalize=False)
target_img = iaa.Fliplr(1).augment_image(target_img)
target_img = preprocess_input(target_img.astype(np.float32), mode="rl")
target_img = target_img.reshape((1,) + target_img.shape)
else:
target_img = preprocess_image(im)
target_img = target_img.reshape((1,) + target_img.shape)
if augmenter is None:
input_img = target_img.copy()
return input_img, target_img
def __len__(self):
return self.n_minibatches
def __iter__(self):
return self
def __next__(self):
while True:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
time.sleep(0.001)
continue
if val is None:
raise StopIteration
return val
def __del__(self):
if self.process is not None:
self.process.terminate()
|
codec.py
|
#
# Copyright (c) 2016-2017, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module providing a Spienl coder / decoder class.
"""
import binascii
import time
import logging
import threading
import traceback
import queue
import importlib
from struct import pack
from struct import unpack
from collections import namedtuple
from collections import defaultdict
import ipaddress
import spinel.util as util
import spinel.config as CONFIG
from spinel.const import kThread
from spinel.const import SPINEL
from spinel.const import SPINEL_LAST_STATUS_MAP
from spinel.hdlc import Hdlc
FEATURE_USE_HDLC = 1
FEATURE_USE_SLACC = 1
TIMEOUT_PROP = 2
#=========================================
# SpinelCodec
#=========================================
# 0: DATATYPE_NULL
#'.': DATATYPE_VOID: Empty data type. Used internally.
#'b': DATATYPE_BOOL: Boolean value. Encoded in 8-bits as either 0x00 or 0x01.
# All other values are illegal.
#'C': DATATYPE_UINT8: Unsigned 8-bit integer.
#'c': DATATYPE_INT8: Signed 8-bit integer.
#'S': DATATYPE_UINT16: Unsigned 16-bit integer. (Little-endian)
#'s': DATATYPE_INT16: Signed 16-bit integer. (Little-endian)
#'L': DATATYPE_UINT32: Unsigned 32-bit integer. (Little-endian)
#'l': DATATYPE_INT32: Signed 32-bit integer. (Little-endian)
#'i': DATATYPE_UINT_PACKED: Packed Unsigned Integer. (See section 7.2)
#'6': DATATYPE_IPv6ADDR: IPv6 Address. (Big-endian)
#'E': DATATYPE_EUI64: EUI-64 Address. (Big-endian)
#'e': DATATYPE_EUI48: EUI-48 Address. (Big-endian)
#'D': DATATYPE_DATA: Arbitrary Data. (See section 7.3)
#'d': DATATYPE_DATA_WLEN: Arbitrary Data with Prepended Length. (See section 7.3)
#'U': DATATYPE_UTF8: Zero-terminated UTF8-encoded string.
#'t': DATATYPE_STRUCT: Structured datatype. Compound type. Length prepended. (See section 7.4)
#'A': DATATYPE_ARRAY: Array of datatypes. Compound type. (See section 7.5)
class SpinelCodec(object):
""" A general coder / decoder class for Spinel protocol. """
@classmethod
def parse_b(cls, payload): return unpack("<B", payload[:1])[0]
@classmethod
def parse_c(cls, payload): return unpack("<b", payload[:1])[0]
@classmethod
def parse_C(cls, payload): return unpack("<B", payload[:1])[0]
@classmethod
def parse_s(cls, payload): return unpack("<h", payload[:2])[0]
@classmethod
def parse_S(cls, payload): return unpack("<H", payload[:2])[0]
@classmethod
def parse_l(cls, payload): return unpack("<l", payload[:4])[0]
@classmethod
def parse_L(cls, payload): return unpack("<L", payload[:4])[0]
@classmethod
def parse_X(cls, payload): return unpack("<Q", payload[:8])[0]
@classmethod
def parse_6(cls, payload): return payload[:16]
@classmethod
def parse_E(cls, payload): return payload[:8]
@classmethod
def parse_e(cls, payload): return payload[:6]
@classmethod
def parse_U(cls, payload):
payload = payload.decode('utf-8')
nullchar = '\0'
if payload.find(nullchar) >= 0:
return payload[:payload.index(nullchar)] # strip null
else:
return payload
@classmethod
def parse_D(cls, payload): return payload
@classmethod
def parse_d(cls, payload): return payload[2:2+unpack("<H", payload[:2])[0]]
@classmethod
def parse_i(cls, payload):
""" Decode EXI integer format. """
value = 0
value_len = 0
value_mul = 1
while value_len < 4:
byte = payload[value_len]
value += (byte & 0x7F) * value_mul
if byte < 0x80:
break
value_mul *= 0x80
value_len += 1
return (value, value_len + 1)
@classmethod
def parse_i_len(cls, payload):
""" Decode length of EXI integer format. """
return cls.parse_i(payload)[1];
@classmethod
def index_of_ending_brace(cls, spinel_format, idx):
""" Determines the index of the matching closing brace. """
count = 1
orig_idx = idx
while count > 0 and idx < len(spinel_format)-1:
idx += 1
if spinel_format[idx] == ')':
count -= 1
elif spinel_format[idx] == '(':
count += 1
if count != 0:
raise ValueError('Unbalanced parenthesis in format string "' + spinel_format + '", idx=' + idx)
return idx;
@classmethod
def parse_field(cls, payload, spinel_format):
map_decode = {
'b': cls.parse_b,
'c': cls.parse_c,
'C': cls.parse_C,
's': cls.parse_s,
'S': cls.parse_S,
'L': cls.parse_L,
'l': cls.parse_l,
'6': cls.parse_6,
'X': cls.parse_X,
'E': cls.parse_E,
'e': cls.parse_e,
'U': cls.parse_U,
'D': cls.parse_D,
'd': cls.parse_d,
'i': cls.parse_i,
}
try:
return map_decode[spinel_format[0]](payload)
except KeyError:
print(traceback.format_exc())
return None
@classmethod
def get_payload_size(cls, payload, spinel_format):
map_lengths = {
'b': 1,
'c': 1,
'C': 1,
's': 2,
'S': 2,
'l': 4,
'L': 4,
'6': 16,
'X': 8,
'E': 8,
'e': 6,
}
result = 0
idx = 0
while idx < len(spinel_format):
format = spinel_format[idx]
if format == 't':
if spinel_format[idx+1] != '(':
raise ValueError('Invalid structure format')
struct_end = cls.index_of_ending_brace(spinel_format, idx + 1);
result += 2 + cls.parse_S(payload[result:]);
idx = struct_end + 1
elif format == 'd':
result += 2 + cls.parse_S(payload[result:]);
idx += 1
elif format == 'D' or format == 'A':
if idx != len(spinel_format) - 1:
raise ValueError('Invalid type syntax for "' + format + '", must go at end of format string')
result = len(payload);
idx += 1
elif format == 'U':
result += payload[result:].index(0) + 1;
idx += 1
elif format == 'i':
result += cls.parse_i_len(payload[result:])
idx += 1
else:
result += map_lengths[format]
idx += 1
return result
@classmethod
def parse_fields(cls, payload, spinel_format):
result = []
idx = 0
while idx < len(spinel_format):
format = spinel_format[idx]
if format == 'A':
if spinel_format[idx+1] != '(':
raise ValueError('Invalid structure format')
array_end = cls.index_of_ending_brace(spinel_format, idx + 1);
array_format = spinel_format[idx+2:array_end]
array = []
while len(payload):
array.append(cls.parse_fields(payload, array_format))
payload = payload[cls.get_payload_size(payload, array_format):]
result.append(tuple(array))
idx = array_end + 1
elif format == 't':
if spinel_format[idx+1] != '(':
raise ValueError('Invalid structure format')
struct_end = cls.index_of_ending_brace(spinel_format, idx + 1);
struct_format = spinel_format[idx+2:struct_end]
struct_len = cls.parse_S(payload);
result.append(cls.parse_fields(payload[2:struct_len+2], struct_format))
payload = payload[struct_len+2:]
idx = struct_end + 1
else:
result.append(cls.parse_field(payload, format))
payload = payload[cls.get_payload_size(payload, format):]
idx += 1
return tuple(result)
@classmethod
def encode_i(cls, data):
""" Encode EXI integer format. """
result = bytes()
while data:
value = data & 0x7F
data >>= 7
if data:
value |= 0x80
result = result + pack("<B", value)
return result
@classmethod
def encode_b(cls, value): return pack('B', value)
@classmethod
def encode_c(cls, value): return pack('B', value)
@classmethod
def encode_C(cls, value): return pack('B', value)
@classmethod
def encode_s(cls, value): return pack('<h', value)
@classmethod
def encode_S(cls, value): return pack('<H', value)
@classmethod
def encode_l(cls, value): return pack('<l', value)
@classmethod
def encode_L(cls, value): return pack('<L', value)
@classmethod
def encode_6(cls, value): return value[:16]
@classmethod
def encode_E(cls, value): return value[:8]
@classmethod
def encode_e(cls, value): return value[:6]
@classmethod
def encode_U(cls, value): return value + '\0'
@classmethod
def encode_D(cls, value): return value
@classmethod
def encode_d(cls, value): return cls.encode_S(len(value)) + value
@classmethod
def encode_field(cls, code, value):
map_encode = {
'b': cls.encode_b,
'c': cls.encode_c,
'C': cls.encode_C,
's': cls.encode_s,
'S': cls.encode_S,
'L': cls.encode_L,
'l': cls.encode_l,
'6': cls.encode_6,
'E': cls.encode_E,
'e': cls.encode_e,
'U': cls.encode_U,
'D': cls.encode_D,
'd': cls.encode_d,
'i': cls.encode_i,
}
try:
return map_encode[code](value)
except KeyError:
print(traceback.format_exc())
return None
def next_code(self, spinel_format):
code = spinel_format[0]
spinel_format = spinel_format[1:]
# TODO: Handle T() and A()
return code, spinel_format
def encode_fields(self, spinel_format, *fields):
packed = bytes()
for field in fields:
code, spinel_format = self.next_code(spinel_format)
if not code:
break
packed += self.encode_field(code, field)
return packed
def encode_packet(self, command_id, payload=bytes(), tid=SPINEL.HEADER_DEFAULT):
""" Encode the given payload as a Spinel frame. """
header = pack(">B", tid)
cmd = self.encode_i(command_id)
pkt = header + cmd + payload
return pkt
#=========================================
class SpinelPropertyHandler(SpinelCodec):
def LAST_STATUS(self, _, payload): return self.parse_i(payload)[0]
def PROTOCOL_VERSION(self, _wpan_api, payload): pass
def NCP_VERSION(self, _, payload): return self.parse_U(payload)
def INTERFACE_TYPE(self, _, payload): return self.parse_i(payload)[0]
def VENDOR_ID(self, _, payload): return self.parse_i(payload)[0]
def CAPS(self, _wpan_api, payload): return self.parse_fields(payload, 'A(i)')
def INTERFACE_COUNT(self, _, payload): return self.parse_C(payload)
def POWER_STATE(self, _, payload): return self.parse_C(payload)
def HWADDR(self, _, payload): return self.parse_E(payload)
def LOCK(self, _, payload): return self.parse_b(payload)
def HBO_MEM_MAX(self, _, payload): return self.parse_L(payload)
def HBO_BLOCK_MAX(self, _, payload): return self.parse_S(payload)
def PHY_ENABLED(self, _, payload): return self.parse_b(payload)
def PHY_CHAN(self, _, payload): return self.parse_C(payload)
def PHY_CHAN_SUPPORTED(self, _wpan_api, payload): pass
def PHY_FREQ(self, _, payload): return self.parse_L(payload)
def PHY_CCA_THRESHOLD(self, _, payload): return self.parse_c(payload)
def PHY_TX_POWER(self, _, payload): return self.parse_c(payload)
def PHY_RSSI(self, _, payload): return self.parse_c(payload)
def MAC_SCAN_STATE(self, _, payload): return self.parse_C(payload)
def MAC_SCAN_MASK(self, _, payload): return self.parse_U(payload)
def MAC_SCAN_PERIOD(self, _, payload): return self.parse_S(payload)
def MAC_SCAN_BEACON(self, _, payload): return self.parse_U(payload)
def MAC_15_4_LADDR(self, _, payload): return self.parse_E(payload)
def MAC_15_4_SADDR(self, _, payload): return self.parse_S(payload)
def MAC_15_4_PANID(self, _, payload): return self.parse_S(payload)
def MAC_FILTER_MODE(self, _, payload): return self.parse_C(payload)
def MAC_RAW_STREAM_ENABLED(self, _, payload):
return self.parse_b(payload)
def MAC_WHITELIST(self, _, payload): pass
def MAC_WHITELIST_ENABLED(self, _, payload):
return self.parse_b(payload)
def MAC_BLACKLIST(self, _, payload): pass
def MAC_BLACKLIST_ENABLED(self, _, payload):
return self.parse_b(payload)
def MAC_FIXED_RSS(self, _, payload): pass
def MAC_MAX_RETRY_NUMBER_DIRECT(self, _, payload): return self.parse_C(payload)
def MAC_MAX_RETRY_NUMBER_INDIRECT(self, _, payload): return self.parse_C(payload)
def NET_SAVED(self, _, payload): return self.parse_b(payload)
def NET_IF_UP(self, _, payload): return self.parse_b(payload)
def NET_STACK_UP(self, _, payload): return self.parse_C(payload)
def NET_ROLE(self, _, payload): return self.parse_C(payload)
def NET_NETWORK_NAME(self, _, payload): return self.parse_U(payload)
def NET_XPANID(self, _, payload): return self.parse_D(payload)
def NET_MASTER_KEY(self, _, payload): return self.parse_D(payload)
def NET_KEY_SEQUENCE_COUNTER(self, _, payload): return self.parse_L(payload)
def NET_PARTITION_ID(self, _, payload): return self.parse_L(payload)
def NET_KEY_SWITCH_GUARDTIME(self, _, payload): return self.parse_L(payload)
def THREAD_LEADER_ADDR(self, _, payload): return self.parse_6(payload)
def THREAD_PARENT(self, _wpan_api, payload): return self.parse_fields(payload, "ES")
def THREAD_CHILD_TABLE(self, _, payload): return self.parse_fields(payload, "A(t(ESLLCCcCc))")
def THREAD_LEADER_RID(self, _, payload): return self.parse_C(payload)
def THREAD_LEADER_WEIGHT(self, _, payload):
return self.parse_C(payload)
def THREAD_LOCAL_LEADER_WEIGHT(self, _, payload):
return self.parse_C(payload)
def THREAD_NETWORK_DATA(self, _, payload):
return self.parse_D(payload)
def THREAD_NETWORK_DATA_VERSION(self, _wpan_api, payload):
return self.parse_C(payload)
def THREAD_STABLE_NETWORK_DATA(self, _wpan_api, payload): pass
def THREAD_STABLE_NETWORK_DATA_VERSION(self, _wpan_api, payload):
return self.parse_C(payload)
def __init__(self):
self.autoAddresses = set()
self.wpan_api = None
self.__queue_prefix = queue.Queue()
self.prefix_thread = threading.Thread(target=self.__run_prefix_handler)
self.prefix_thread.setDaemon(True)
self.prefix_thread.start()
def handle_prefix_change(self, payload):
""" Automatically ipaddr add / remove addresses for each new prefix. """
# As done by cli.cpp Interpreter::HandleNetifStateChanged
# First parse payload and extract slaac prefix information.
pay = payload
Prefix = namedtuple("Prefix", "prefix prefixlen stable flags is_local")
prefixes = []
slaacPrefixSet = set()
while len(pay) >= 22:
(_structlen) = unpack('<H', pay[:2])
struct_len = _structlen[0]
pay = pay[2:]
prefix = Prefix(*unpack('16sBBBB', pay[:20]))
if prefix.flags & kThread.PrefixSlaacFlag:
net6 = ipaddress.IPv6Network(prefix.prefix)
net6 = net6.supernet(new_prefix=prefix.prefixlen)
slaacPrefixSet.add(net6)
prefixes.append(prefix)
pay = pay[struct_len:]
if CONFIG.DEBUG_LOG_PROP:
print("\n========= PREFIX ============")
print("ipaddrs: " + str(self.autoAddresses))
print("slaac prefix set: " + str(slaacPrefixSet))
print("==============================\n")
def __run_prefix_handler(self):
while 1:
(wpan_api, payload) = self.__queue_prefix.get(True)
self.wpan_api = wpan_api
self.handle_prefix_change(payload)
self.__queue_prefix.task_done()
def THREAD_ON_MESH_NETS(self, wpan_api, payload):
if FEATURE_USE_SLACC:
# Kick prefix handler thread to allow serial rx thread to work.
self.__queue_prefix.put_nowait((wpan_api, payload))
return self.parse_D(payload)
def THREAD_LOCAL_ROUTES(self, _wpan_api, payload): pass
def THREAD_ASSISTING_PORTS(self, _wpan_api, payload): pass
def THREAD_ALLOW_LOCAL_NET_DATA_CHANGE(self, _, payload):
return self.parse_b(payload)
def THREAD_MODE(self, _, payload): return self.parse_C(payload)
def THREAD_CHILD_COUNT_MAX(self, _, payload): return self.parse_C(payload)
def THREAD_CHILD_TIMEOUT(self, _, payload): return self.parse_L(payload)
def THREAD_RLOC16(self, _, payload): return self.parse_S(payload)
def THREAD_ROUTER_UPGRADE_THRESHOLD(self, _, payload):
return self.parse_C(payload)
def THREAD_ROUTER_DOWNGRADE_THRESHOLD(self, _, payload):
return self.parse_C(payload)
def THREAD_ROUTER_SELECTION_JITTER(self, _, payload):
return self.parse_C(payload)
def THREAD_NEIGHBOR_TABLE(self, _, payload):
return self.parse_fields(payload, 'A(t(ESLCcCbLL))')
def THREAD_CONTEXT_REUSE_DELAY(self, _, payload):
return self.parse_L(payload)
def THREAD_NETWORK_ID_TIMEOUT(self, _, payload):
return self.parse_C(payload)
def THREAD_ACTIVE_ROUTER_IDS(self, _, payload):
return self.parse_D(payload)
def THREAD_RLOC16_DEBUG_PASSTHRU(self, _, payload):
return self.parse_b(payload)
def MESHCOP_JOINER_ENABLE(self, _, payload):
return self.parse_b(payload)
def MESHCOP_JOINER_CREDENTIAL(self, _, payload):
return self.parse_D(payload)
def MESHCOP_JOINER_URL(self, _, payload):
return self.parse_U(payload)
def MESHCOP_BORDER_AGENT_ENABLE(self, _, payload):
return self.parse_b(payload)
def IPV6_LL_ADDR(self, _, payload): return self.parse_6(payload)
def IPV6_ML_ADDR(self, _, payload): return self.parse_6(payload)
def IPV6_ML_PREFIX(self, _, payload): return self.parse_E(payload)
def IPV6_ADDRESS_TABLE(self, _, payload): return self.parse_D(payload)
def IPV6_ROUTE_TABLE(self, _, payload): return self.parse_D(payload)
def IPv6_ICMP_PING_OFFLOAD(self, _, payload):
return self.parse_b(payload)
def STREAM_DEBUG(self, _, payload): return self.parse_D(payload)
def STREAM_RAW(self, _, payload): return self.parse_D(payload)
def STREAM_NET(self, _, payload): return self.parse_d(payload)
def STREAM_NET_INSECURE(self, _, payload): return self.parse_d(payload)
def STREAM_LOG(self, _, payload): return self.parse_fields(payload, "UD")
def PIB_PHY_CHANNELS_SUPPORTED(self, _wpan_api, payload): pass
def PIB_MAC_PROMISCUOUS_MODE(self, _wpan_api, payload): pass
def PIB_MAC_SECURITY_ENABLED(self, _wpan_api, payload): pass
def MSG_BUFFER_COUNTERS(self, _wpan_api, payload): return self.parse_fields(payload, "SSSSSSSSSSSSSSSS")
def ALL_MAC_COUNTERS(self, _wpan_api, payload): return self.parse_fields(payload, "t(A(L))t(A(L))")
def MLE_COUNTERS(self, _wpan_api, payload): return self.parse_fields(payload, "SSSSSSSSS")
def MAC_RETRY_HISTOGRAM(self, _wpan_api, payload): return self.parse_fields(payload, "t(A(L))t(A(L))")
def NEST_STREAM_MFG(self, _wpan_api, payload): return self.parse_U(payload)
#=========================================
class SpinelCommandHandler(SpinelCodec):
def handle_prop(self, wpan_api, name, payload, tid):
(prop_id, prop_len) = self.parse_i(payload)
if prop_id in SPINEL_PROP_DISPATCH:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
# Skip any VALUE_INSERTED(CHILD_TABLE) or VALUE_REMOVED(CHILD_TABLE)
if prop_id == SPINEL.PROP_THREAD_CHILD_TABLE:
if name in ["INSERTED", "REMOVED"]:
return
prop_value = handler(wpan_api, payload[prop_len:])
if CONFIG.DEBUG_LOG_PROP:
# Generic output
if isinstance(prop_value, str):
prop_value_str = util.hexify_str(prop_value)
CONFIG.LOGGER.debug("PROP_VALUE_%s [tid=%d]: %s = %s",
name, (tid & 0xF), prop_name, prop_value_str)
else:
prop_value_str = str(prop_value)
CONFIG.LOGGER.debug("PROP_VALUE_%s [tid=%d]: %s = %s",
name, (tid & 0xF), prop_name, prop_value_str)
# Extend output for certain properties.
if prop_id == SPINEL.PROP_LAST_STATUS:
CONFIG.LOGGER.debug(SPINEL_LAST_STATUS_MAP[prop_value])
if CONFIG.DEBUG_LOG_PKT:
if ((prop_id == SPINEL.PROP_STREAM_NET) or
(prop_id == SPINEL.PROP_STREAM_NET_INSECURE)):
CONFIG.LOGGER.debug("PROP_VALUE_" + name + ": " + prop_name)
elif prop_id == SPINEL.PROP_STREAM_DEBUG:
CONFIG.LOGGER.debug("DEBUG: " + prop_value)
if wpan_api:
wpan_api.queue_add(prop_id, prop_value, tid)
else:
print("no wpan_api")
elif CONFIG.DEBUG_LOG_PROP:
prop_name = "Property Unknown"
CONFIG.LOGGER.info("\n%s (%i): ", prop_name, prop_id)
def PROP_VALUE_IS(self, wpan_api, payload, tid):
self.handle_prop(wpan_api, "IS", payload, tid)
def PROP_VALUE_INSERTED(self, wpan_api, payload, tid):
self.handle_prop(wpan_api, "INSERTED", payload, tid)
def PROP_VALUE_REMOVED(self, wpan_api, payload, tid):
self.handle_prop(wpan_api, "REMOVED", payload, tid)
WPAN_CMD_HANDLER = SpinelCommandHandler()
SPINEL_COMMAND_DISPATCH = {
SPINEL.RSP_PROP_VALUE_IS: WPAN_CMD_HANDLER.PROP_VALUE_IS,
SPINEL.RSP_PROP_VALUE_INSERTED: WPAN_CMD_HANDLER.PROP_VALUE_INSERTED,
SPINEL.RSP_PROP_VALUE_REMOVED: WPAN_CMD_HANDLER.PROP_VALUE_REMOVED,
}
try:
codec = importlib.import_module('vendor.codec')
cls = type(codec.VendorSpinelPropertyHandler.__name__, (SpinelPropertyHandler, codec.VendorSpinelPropertyHandler),
{'__name__': codec.VendorSpinelPropertyHandler.__name__})
WPAN_PROP_HANDLER = cls()
except ImportError:
codec = None
WPAN_PROP_HANDLER = SpinelPropertyHandler()
SPINEL_PROP_DISPATCH = {
SPINEL.PROP_LAST_STATUS: WPAN_PROP_HANDLER.LAST_STATUS,
SPINEL.PROP_PROTOCOL_VERSION: WPAN_PROP_HANDLER.PROTOCOL_VERSION,
SPINEL.PROP_NCP_VERSION: WPAN_PROP_HANDLER.NCP_VERSION,
SPINEL.PROP_INTERFACE_TYPE: WPAN_PROP_HANDLER.INTERFACE_TYPE,
SPINEL.PROP_VENDOR_ID: WPAN_PROP_HANDLER.VENDOR_ID,
SPINEL.PROP_CAPS: WPAN_PROP_HANDLER.CAPS,
SPINEL.PROP_INTERFACE_COUNT: WPAN_PROP_HANDLER.INTERFACE_COUNT,
SPINEL.PROP_POWER_STATE: WPAN_PROP_HANDLER.POWER_STATE,
SPINEL.PROP_HWADDR: WPAN_PROP_HANDLER.HWADDR,
SPINEL.PROP_LOCK: WPAN_PROP_HANDLER.LOCK,
SPINEL.PROP_HBO_MEM_MAX: WPAN_PROP_HANDLER.HBO_MEM_MAX,
SPINEL.PROP_HBO_BLOCK_MAX: WPAN_PROP_HANDLER.HBO_BLOCK_MAX,
SPINEL.PROP_PHY_ENABLED: WPAN_PROP_HANDLER.PHY_ENABLED,
SPINEL.PROP_PHY_CHAN: WPAN_PROP_HANDLER.PHY_CHAN,
SPINEL.PROP_PHY_CHAN_SUPPORTED: WPAN_PROP_HANDLER.PHY_CHAN_SUPPORTED,
SPINEL.PROP_PHY_FREQ: WPAN_PROP_HANDLER.PHY_FREQ,
SPINEL.PROP_PHY_CCA_THRESHOLD: WPAN_PROP_HANDLER.PHY_CCA_THRESHOLD,
SPINEL.PROP_PHY_TX_POWER: WPAN_PROP_HANDLER.PHY_TX_POWER,
SPINEL.PROP_PHY_RSSI: WPAN_PROP_HANDLER.PHY_RSSI,
SPINEL.PROP_MAC_SCAN_STATE: WPAN_PROP_HANDLER.MAC_SCAN_STATE,
SPINEL.PROP_MAC_SCAN_MASK: WPAN_PROP_HANDLER.MAC_SCAN_MASK,
SPINEL.PROP_MAC_SCAN_PERIOD: WPAN_PROP_HANDLER.MAC_SCAN_PERIOD,
SPINEL.PROP_MAC_SCAN_BEACON: WPAN_PROP_HANDLER.MAC_SCAN_BEACON,
SPINEL.PROP_MAC_15_4_LADDR: WPAN_PROP_HANDLER.MAC_15_4_LADDR,
SPINEL.PROP_MAC_15_4_SADDR: WPAN_PROP_HANDLER.MAC_15_4_SADDR,
SPINEL.PROP_MAC_15_4_PANID: WPAN_PROP_HANDLER.MAC_15_4_PANID,
SPINEL.PROP_MAC_RAW_STREAM_ENABLED: WPAN_PROP_HANDLER.MAC_RAW_STREAM_ENABLED,
SPINEL.PROP_MAC_FILTER_MODE: WPAN_PROP_HANDLER.MAC_FILTER_MODE,
SPINEL.PROP_MAC_WHITELIST: WPAN_PROP_HANDLER.MAC_WHITELIST,
SPINEL.PROP_MAC_WHITELIST_ENABLED: WPAN_PROP_HANDLER.MAC_WHITELIST_ENABLED,
SPINEL.PROP_MAC_BLACKLIST: WPAN_PROP_HANDLER.MAC_BLACKLIST,
SPINEL.PROP_MAC_BLACKLIST_ENABLED: WPAN_PROP_HANDLER.MAC_BLACKLIST_ENABLED,
SPINEL.PROP_MAC_FIXED_RSS: WPAN_PROP_HANDLER.MAC_FIXED_RSS,
SPINEL.PROP_MAC_MAX_RETRY_NUMBER_DIRECT: WPAN_PROP_HANDLER.MAC_MAX_RETRY_NUMBER_DIRECT,
SPINEL.PROP_MAC_MAX_RETRY_NUMBER_INDIRECT: WPAN_PROP_HANDLER.MAC_MAX_RETRY_NUMBER_INDIRECT,
SPINEL.PROP_NET_SAVED: WPAN_PROP_HANDLER.NET_SAVED,
SPINEL.PROP_NET_IF_UP: WPAN_PROP_HANDLER.NET_IF_UP,
SPINEL.PROP_NET_STACK_UP: WPAN_PROP_HANDLER.NET_STACK_UP,
SPINEL.PROP_NET_ROLE: WPAN_PROP_HANDLER.NET_ROLE,
SPINEL.PROP_NET_NETWORK_NAME: WPAN_PROP_HANDLER.NET_NETWORK_NAME,
SPINEL.PROP_NET_XPANID: WPAN_PROP_HANDLER.NET_XPANID,
SPINEL.PROP_NET_MASTER_KEY: WPAN_PROP_HANDLER.NET_MASTER_KEY,
SPINEL.PROP_NET_KEY_SEQUENCE_COUNTER: WPAN_PROP_HANDLER.NET_KEY_SEQUENCE_COUNTER,
SPINEL.PROP_NET_PARTITION_ID: WPAN_PROP_HANDLER.NET_PARTITION_ID,
SPINEL.PROP_NET_KEY_SWITCH_GUARDTIME: WPAN_PROP_HANDLER.NET_KEY_SWITCH_GUARDTIME,
SPINEL.PROP_THREAD_LEADER_ADDR: WPAN_PROP_HANDLER.THREAD_LEADER_ADDR,
SPINEL.PROP_THREAD_PARENT: WPAN_PROP_HANDLER.THREAD_PARENT,
SPINEL.PROP_THREAD_CHILD_TABLE: WPAN_PROP_HANDLER.THREAD_CHILD_TABLE,
SPINEL.PROP_THREAD_LEADER_RID: WPAN_PROP_HANDLER.THREAD_LEADER_RID,
SPINEL.PROP_THREAD_LEADER_WEIGHT: WPAN_PROP_HANDLER.THREAD_LEADER_WEIGHT,
SPINEL.PROP_THREAD_LOCAL_LEADER_WEIGHT: WPAN_PROP_HANDLER.THREAD_LOCAL_LEADER_WEIGHT,
SPINEL.PROP_THREAD_NETWORK_DATA: WPAN_PROP_HANDLER.THREAD_NETWORK_DATA,
SPINEL.PROP_THREAD_NETWORK_DATA_VERSION: WPAN_PROP_HANDLER.THREAD_NETWORK_DATA_VERSION,
SPINEL.PROP_THREAD_STABLE_NETWORK_DATA: WPAN_PROP_HANDLER.THREAD_STABLE_NETWORK_DATA,
SPINEL.PROP_THREAD_STABLE_NETWORK_DATA_VERSION:
WPAN_PROP_HANDLER.THREAD_STABLE_NETWORK_DATA_VERSION,
SPINEL.PROP_THREAD_ON_MESH_NETS: WPAN_PROP_HANDLER.THREAD_ON_MESH_NETS,
SPINEL.PROP_THREAD_LOCAL_ROUTES: WPAN_PROP_HANDLER.THREAD_LOCAL_ROUTES,
SPINEL.PROP_THREAD_ASSISTING_PORTS: WPAN_PROP_HANDLER.THREAD_ASSISTING_PORTS,
SPINEL.PROP_THREAD_ALLOW_LOCAL_NET_DATA_CHANGE:
WPAN_PROP_HANDLER.THREAD_ALLOW_LOCAL_NET_DATA_CHANGE,
SPINEL.PROP_THREAD_MODE: WPAN_PROP_HANDLER.THREAD_MODE,
SPINEL.PROP_THREAD_CHILD_COUNT_MAX: WPAN_PROP_HANDLER.THREAD_CHILD_COUNT_MAX,
SPINEL.PROP_THREAD_CHILD_TIMEOUT: WPAN_PROP_HANDLER.THREAD_CHILD_TIMEOUT,
SPINEL.PROP_THREAD_RLOC16: WPAN_PROP_HANDLER.THREAD_RLOC16,
SPINEL.PROP_THREAD_ROUTER_UPGRADE_THRESHOLD: WPAN_PROP_HANDLER.THREAD_ROUTER_UPGRADE_THRESHOLD,
SPINEL.PROP_THREAD_ROUTER_DOWNGRADE_THRESHOLD:
WPAN_PROP_HANDLER.THREAD_ROUTER_DOWNGRADE_THRESHOLD,
SPINEL.PROP_THREAD_ROUTER_SELECTION_JITTER: WPAN_PROP_HANDLER.THREAD_ROUTER_SELECTION_JITTER,
SPINEL.PROP_THREAD_CONTEXT_REUSE_DELAY: WPAN_PROP_HANDLER.THREAD_CONTEXT_REUSE_DELAY,
SPINEL.PROP_THREAD_NETWORK_ID_TIMEOUT: WPAN_PROP_HANDLER.THREAD_NETWORK_ID_TIMEOUT,
SPINEL.PROP_THREAD_ACTIVE_ROUTER_IDS: WPAN_PROP_HANDLER.THREAD_ACTIVE_ROUTER_IDS,
SPINEL.PROP_THREAD_RLOC16_DEBUG_PASSTHRU: WPAN_PROP_HANDLER.THREAD_RLOC16_DEBUG_PASSTHRU,
SPINEL.PROP_THREAD_NEIGHBOR_TABLE: WPAN_PROP_HANDLER.THREAD_NEIGHBOR_TABLE,
SPINEL.PROP_MESHCOP_JOINER_ENABLE: WPAN_PROP_HANDLER.MESHCOP_JOINER_ENABLE,
SPINEL.PROP_MESHCOP_JOINER_CREDENTIAL: WPAN_PROP_HANDLER.MESHCOP_JOINER_CREDENTIAL,
SPINEL.PROP_MESHCOP_JOINER_URL: WPAN_PROP_HANDLER.MESHCOP_JOINER_URL,
SPINEL.PROP_MESHCOP_BORDER_AGENT_ENABLE: WPAN_PROP_HANDLER.MESHCOP_BORDER_AGENT_ENABLE,
SPINEL.PROP_IPV6_LL_ADDR: WPAN_PROP_HANDLER.IPV6_LL_ADDR,
SPINEL.PROP_IPV6_ML_ADDR: WPAN_PROP_HANDLER.IPV6_ML_ADDR,
SPINEL.PROP_IPV6_ML_PREFIX: WPAN_PROP_HANDLER.IPV6_ML_PREFIX,
SPINEL.PROP_IPV6_ADDRESS_TABLE: WPAN_PROP_HANDLER.IPV6_ADDRESS_TABLE,
SPINEL.PROP_IPV6_ROUTE_TABLE: WPAN_PROP_HANDLER.IPV6_ROUTE_TABLE,
SPINEL.PROP_IPv6_ICMP_PING_OFFLOAD: WPAN_PROP_HANDLER.IPv6_ICMP_PING_OFFLOAD,
SPINEL.PROP_STREAM_DEBUG: WPAN_PROP_HANDLER.STREAM_DEBUG,
SPINEL.PROP_STREAM_RAW: WPAN_PROP_HANDLER.STREAM_RAW,
SPINEL.PROP_STREAM_NET: WPAN_PROP_HANDLER.STREAM_NET,
SPINEL.PROP_STREAM_NET_INSECURE: WPAN_PROP_HANDLER.STREAM_NET_INSECURE,
SPINEL.PROP_STREAM_LOG: WPAN_PROP_HANDLER.STREAM_LOG,
SPINEL.PROP_PIB_15_4_PHY_CHANNELS_SUPPORTED: WPAN_PROP_HANDLER.PIB_PHY_CHANNELS_SUPPORTED,
SPINEL.PROP_PIB_15_4_MAC_PROMISCUOUS_MODE: WPAN_PROP_HANDLER.PIB_MAC_PROMISCUOUS_MODE,
SPINEL.PROP_PIB_15_4_MAC_SECURITY_ENABLED: WPAN_PROP_HANDLER.PIB_MAC_SECURITY_ENABLED,
SPINEL.PROP_MSG_BUFFER_COUNTERS: WPAN_PROP_HANDLER.MSG_BUFFER_COUNTERS,
SPINEL.PROP_CNTR_ALL_MAC_COUNTERS: WPAN_PROP_HANDLER.ALL_MAC_COUNTERS,
SPINEL.PROP_CNTR_MLE_COUNTERS: WPAN_PROP_HANDLER.MLE_COUNTERS,
SPINEL.PROP_CNTR_MAC_RETRY_HISTOGRAM: WPAN_PROP_HANDLER.MAC_RETRY_HISTOGRAM,
SPINEL.PROP_NEST_STREAM_MFG: WPAN_PROP_HANDLER.NEST_STREAM_MFG
}
if codec is not None:
SPINEL_PROP_DISPATCH.update(codec.VENDOR_SPINEL_PROP_DISPATCH)
class WpanApi(SpinelCodec):
""" Helper class to format wpan command packets """
def __init__(self, stream, nodeid, use_hdlc=FEATURE_USE_HDLC, timeout=TIMEOUT_PROP):
self.stream = stream
self.nodeid = nodeid
self.timeout = timeout
self.use_hdlc = use_hdlc
if self.use_hdlc:
self.hdlc = Hdlc(self.stream)
# PARSER state
self.rx_pkt = []
self.callback = defaultdict(list) # Map prop_id to list of callbacks.
# Fire up threads
self._reader_alive = True
self.tid_filter = set()
self.__queue_prop = defaultdict(queue.Queue) # Map tid to Queue.
self.queue_register()
self.__start_reader()
def __del__(self):
self._reader_alive = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._reader_alive = False
def __start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.stream_rx)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def transact(self, command_id, payload=bytes(), tid=SPINEL.HEADER_DEFAULT):
pkt = self.encode_packet(command_id, payload, tid)
if CONFIG.DEBUG_LOG_SERIAL:
msg = "TX Pay: (%i) %s " % (len(pkt), binascii.hexlify(pkt).decode('utf-8'))
CONFIG.LOGGER.debug(msg)
if self.use_hdlc:
pkt = self.hdlc.encode(pkt)
self.stream_tx(pkt)
def parse_rx(self, pkt):
if not pkt:
return
if CONFIG.DEBUG_LOG_SERIAL:
msg = "RX Pay: (%i) %s " % (
len(pkt), binascii.hexlify(pkt).decode('utf-8'))
CONFIG.LOGGER.debug(msg)
length = len(pkt) - 2
if length < 0:
return
spkt = pkt
#if not isinstance(spkt, str):
# spkt = "".join(map(chr, spkt))
tid = self.parse_C(spkt[:1])
(cmd_id, cmd_length) = self.parse_i(spkt[1:])
pay_start = cmd_length + 1
payload = spkt[pay_start:]
try:
handler = SPINEL_COMMAND_DISPATCH[cmd_id]
cmd_name = handler.__name__
handler(self, payload, tid)
except Exception as _ex:
print(traceback.format_exc())
cmd_name = "CB_Unknown"
CONFIG.LOGGER.info("\n%s (%i): ", cmd_name, cmd_id)
if CONFIG.DEBUG_CMD_RESPONSE:
CONFIG.LOGGER.info("\n%s (%i): ", cmd_name, cmd_id)
CONFIG.LOGGER.info("===> %s", binascii.hexlify(payload).decode('utf-8'))
def stream_tx(self, pkt):
# Encapsulate lagging and Framer support in self.stream class.
self.stream.write(pkt)
def stream_rx(self):
""" Recieve thread and parser. """
try:
while self._reader_alive:
if self.use_hdlc:
self.rx_pkt = self.hdlc.collect()
else:
# size=None: Assume stream will always deliver packets
pkt = self.stream.read(None)
self.rx_pkt = util.packed_to_array(pkt)
self.parse_rx(self.rx_pkt)
except:
if self._reader_alive:
raise
else:
# Ignore the error since we are exiting
pass
class PropertyItem(object):
""" Queue item for NCP response to property commands. """
def __init__(self, prop, value, tid):
self.prop = prop
self.value = value
self.tid = tid
def callback_register(self, prop, cb):
self.callback[prop].append(cb)
def queue_register(self, tid=SPINEL.HEADER_DEFAULT):
self.tid_filter.add(tid)
return self.__queue_prop[tid]
def queue_wait_prepare(self, _prop_id, tid=SPINEL.HEADER_DEFAULT):
self.queue_clear(tid)
def queue_add(self, prop, value, tid):
cb_list = self.callback[prop]
# Asynchronous handlers can consume message and not add to queue.
if len(cb_list) > 0:
consumed = cb_list[0](prop, value, tid)
if consumed: return
if tid not in self.tid_filter:
return
item = self.PropertyItem(prop, value, tid)
self.__queue_prop[tid].put_nowait(item)
def queue_clear(self, tid):
with self.__queue_prop[tid].mutex:
self.__queue_prop[tid].queue.clear()
def queue_get(self, tid, timeout = None):
try:
if (timeout):
item = self.__queue_prop[tid].get(True, timeout)
else:
item = self.__queue_prop[tid].get_nowait()
except queue.Empty:
item = None
return item
def queue_wait_for_prop(self, _prop, tid=SPINEL.HEADER_DEFAULT, timeout=None):
if _prop is None:
return None
if timeout is None:
timeout = self.timeout
processed_queue = queue.Queue()
timeout_time = time.time() + timeout
while time.time() < timeout_time:
item = self.queue_get(tid, timeout_time - time.time())
if item is None:
continue
if item.prop == _prop:
break
processed_queue.put_nowait(item)
else:
item = None
# To make sure that all received properties will be processed in the same order.
with self.__queue_prop[tid].mutex:
while self.__queue_prop[tid]._qsize() > 0:
processed_queue.put(self.__queue_prop[tid]._get())
while not processed_queue.empty():
self.__queue_prop[tid]._put(processed_queue.get_nowait())
return item
def ip_send(self, pkt):
pay = self.encode_i(SPINEL.PROP_STREAM_NET)
pkt_len = len(pkt)
pay += pack("<H", pkt_len) # Start with length of IPv6 packet
pkt_len += 2 # Increment to include length word
pay += pkt # Append packet after length
self.transact(SPINEL.CMD_PROP_VALUE_SET, pay)
def cmd_reset(self):
self.queue_wait_prepare(None, SPINEL.HEADER_ASYNC)
self.transact(SPINEL.CMD_RESET)
result = self.queue_wait_for_prop(SPINEL.PROP_LAST_STATUS, SPINEL.HEADER_ASYNC)
return (result is not None and result.value == 114)
def cmd_send(self, command_id, payload=bytes(), tid=SPINEL.HEADER_DEFAULT):
self.queue_wait_prepare(None, tid)
self.transact(command_id, payload, tid)
self.queue_wait_for_prop(None, tid)
def prop_change_async(self, cmd, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
pay = self.encode_i(prop_id)
if py_format != None:
pay += pack(py_format, value)
self.transact(cmd, pay, tid)
def prop_insert_async(self, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
self.prop_change_async(SPINEL.CMD_PROP_VALUE_INSERT, prop_id,
value, py_format, tid)
def prop_remove_async(self, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
self.prop_change_async(SPINEL.CMD_PROP_VALUE_REMOVE, prop_id,
value, py_format, tid)
def __prop_change_value(self, cmd, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Utility routine to change a property value over SPINEL. """
self.queue_wait_prepare(prop_id, tid)
pay = self.encode_i(prop_id)
if py_format != None:
pay += pack(py_format, value)
self.transact(cmd, pay, tid)
result = self.queue_wait_for_prop(prop_id, tid)
if result:
return result.value
else:
return None
def prop_get_value(self, prop_id, tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to get a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_GET [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_GET, prop_id,
None, None, tid)
def prop_set_value(self, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to set a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_SET [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_SET, prop_id,
value, py_format, tid)
def prop_insert_value(self, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to insert a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_INSERT [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_INSERT, prop_id,
value, py_format, tid)
def prop_remove_value(self, prop_id, value, py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to remove a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_REMOVE [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_REMOVE, prop_id,
value, py_format, tid)
def get_ipaddrs(self, tid=SPINEL.HEADER_DEFAULT):
"""
Return current list of ip addresses for the device.
"""
value = self.prop_get_value(SPINEL.PROP_IPV6_ADDRESS_TABLE, tid)
# TODO: clean up table parsing to be less hard-coded magic.
if value is None:
return None
size = 0x1B
addrs = [value[i:i + size] for i in range(0, len(value), size)]
ipaddrs = []
for addr in addrs:
addr = addr[2:18]
ipaddrs.append(ipaddress.IPv6Address(addr))
return ipaddrs
|
rkclusterlock_sample.py
|
import time
from threading import Thread, current_thread
from rkclusterlock import RKClusterLock
def runClient():
#rlock = RKClusterLock('localhost', 9191,'FKAPP')
rlock = RKClusterLock('13.251.32.176', 9191,'FKAPP')
cur_thread = current_thread()
while 1:
data = ""
resp, data = rlock.acquire(wait=True, acquire_wait_time=5, max_release_time=5)
if resp:
try:
print(f"Got Lock for thread {cur_thread.name} with data {data}")
# Here is what we will do during the lock mode
if data:
try:
int_data = int(data)
int_data = int_data + 1
data = str(int_data)
except:
data = "1"
else:
data = "1"
#time.sleep(10)
finally:
rlock.release(data)
else:
print(f"Failed to get Lock for thread {cur_thread.name}")
time.sleep(1)
"""
print("Lets start some clients now")
rlock = RKClusterLock('13.251.32.176', 9191,'FKAPP')
data = ""
resp, data = rlock.acquire(wait=True, acquire_wait_time=5, max_release_time=5)
print(data)
if data:
try:
int_data = int(data)
int_data = int_data + 1
data = str(int_data)
except:
data = "1"
else:
data = "1"
rlock.release(str(data))
"""
for i in range(200):
print(f"Creating Client {i}")
client = Thread(target=runClient)
client.daemon = False
client.start()
|
mupen64plus_env.py
|
import sys
PY3_OR_LATER = sys.version_info[0] >= 3
if PY3_OR_LATER:
# Python 3 specific definitions
from http.server import BaseHTTPRequestHandler, HTTPServer
else:
# Python 2 specific definitions
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import abc
import array
from contextlib import contextmanager
import inspect
import itertools
import json
import os
import subprocess
import threading
import time
from termcolor import cprint
import yaml
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import mss
###############################################
class ImageHelper:
def GetPixelColor(self, image_array, x, y):
base_pixel = image_array[y][x]
red = base_pixel[0]
green = base_pixel[1]
blue = base_pixel[2]
return (red, green, blue)
###############################################
### Variables & Constants ###
###############################################
# The width, height, and depth of the emulator window:
SCR_W = 640
SCR_H = 480
SCR_D = 3
MILLISECOND = 1.0 / 1000.0
IMAGE_HELPER = ImageHelper()
###############################################
class Mupen64PlusEnv(gym.Env):
__metaclass__ = abc.ABCMeta
metadata = {'render.modes': ['human']}
def __init__(self):
self.viewer = None
self.reset_count = 0
self.step_count = 0
self.running = True
self.episode_over = False
self.pixel_array = None
self.np_random = np.random.RandomState()
self._base_load_config()
self._base_validate_config()
self.frame_skip = self.config['FRAME_SKIP']
if self.frame_skip < 1:
self.frame_skip = 1
self.controller_server, self.controller_server_thread = self._start_controller_server()
initial_disp = os.environ["DISPLAY"]
cprint('Initially on DISPLAY %s' % initial_disp, 'red')
# If the EXTERNAL_EMULATOR environment variable is True, we are running the
# emulator out-of-process (likely via docker/docker-compose). If not, we need
# to start the emulator in-process here
external_emulator = "EXTERNAL_EMULATOR" in os.environ and os.environ["EXTERNAL_EMULATOR"] == 'True'
if not external_emulator:
self.xvfb_process, self.emulator_process = \
self._start_emulator(rom_name=self.config['ROM_NAME'],
gfx_plugin=self.config['GFX_PLUGIN'],
input_driver_path=self.config['INPUT_DRIVER_PATH'])
# TODO: Test and cleanup:
# May need to initialize this after the DISPLAY env var has been set
# so it attaches to the correct X display; otherwise screenshots may
# come from the wrong place. This used to be true when we were using
# wxPython for screenshots. Untested after switching to mss.
cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red')
self.mss_grabber = mss.mss()
time.sleep(10) # Give mss a couple seconds to initialize; also may not be necessary
# Restore the DISPLAY env var
os.environ["DISPLAY"] = initial_disp
cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red')
with self.controller_server.frame_skip_disabled():
self._navigate_menu()
self.observation_space = \
spaces.Box(low=0, high=255, shape=(SCR_H, SCR_W, SCR_D))
# Actions are as follows:
# [Joystick X, Joystick Y, A, B, RB, LB, Z, C Right, C Left, C Down, C UP,
# D-Pad Right, D-Pad Left, D-Pad Down, D-Pad Up, Start]
self.action_space = spaces.Box(np.array([-80, -80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([80, 80, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]),
shape=(16,),
dtype=np.uint8)
def _base_load_config(self):
self.config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml")))
self._load_config()
@abc.abstractmethod
def _load_config(self):
return
def _base_validate_config(self):
if 'ROM_NAME' not in self.config:
raise AssertionError('ROM_NAME configuration is required')
if 'GFX_PLUGIN' not in self.config:
raise AssertionError('GFX_PLUGIN configuration is required')
self._validate_config()
@abc.abstractmethod
def _validate_config(self):
return
def step(self, action):
#cprint('Step %i: %s' % (self.step_count, action), 'green')
self._act(action)
obs = self.observe()
self.episode_over = self._evaluate_end_state()
reward = self._get_reward()
self.step_count += 1
return obs, reward, self.episode_over, {}
def _act(self, action, count=1):
for _ in itertools.repeat(None, count):
self.controller_server.send_controls(ControllerState(action))
def _wait(self, count=1, wait_for='Unknown'):
self._act(ControllerState.NO_OP, count=count)
def _press_button(self, button, times=1):
for _ in itertools.repeat(None, times):
self._act(button) # Press
self._act(ControllerState.NO_OP) # and release
def observe(self):
#cprint('Observe called!', 'yellow')
if self.config['USE_XVFB']:
offset_x = 0
offset_y = 0
else:
offset_x = self.config['OFFSET_X']
offset_y = self.config['OFFSET_Y']
image_array = \
np.array(self.mss_grabber.grab({"top": offset_y,
"left": offset_x,
"width": SCR_W,
"height": SCR_H}),
dtype=np.uint8)
# drop the alpha channel and flip red and blue channels (BGRA -> RGB)
self.pixel_array = np.flip(image_array[:, :, :3], 2)
return self.pixel_array
@abc.abstractmethod
def _navigate_menu(self):
return
@abc.abstractmethod
def _get_reward(self):
#cprint('Get Reward called!', 'yellow')
return 0
@abc.abstractmethod
def _evaluate_end_state(self):
#cprint('Evaluate End State called!', 'yellow')
return False
@abc.abstractmethod
def reset(self):
cprint('Reset called!', 'yellow')
self.reset_count += 1
self.step_count = 0
return self.observe()
def render(self, mode='human', close=False):
if close:
if hasattr(self, 'viewer') and self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.pixel_array
if mode == 'rgb_array':
return img
elif mode == 'human':
if not hasattr(self, 'viewer') or self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def close(self):
cprint('Close called!', 'yellow')
self.running = False
self._kill_emulator()
self._stop_controller_server()
def _start_controller_server(self):
server = ControllerHTTPServer(server_address = ('', self.config['PORT_NUMBER']),
control_timeout = self.config['ACTION_TIMEOUT'],
frame_skip = self.frame_skip) # TODO: Environment argument (with issue #26)
server_thread = threading.Thread(target=server.serve_forever, args=())
server_thread.daemon = True
server_thread.start()
print('ControllerHTTPServer started on port ', self.config['PORT_NUMBER'])
return server, server_thread
def _stop_controller_server(self):
#cprint('Stop Controller Server called!', 'yellow')
if hasattr(self, 'controller_server'):
self.controller_server.shutdown()
def _start_emulator(self,
rom_name,
gfx_plugin,
input_driver_path,
res_w=SCR_W,
res_h=SCR_H,
res_d=SCR_D):
rom_path = os.path.abspath(
os.path.join(os.path.dirname(inspect.stack()[0][1]),
'../ROMs',
rom_name))
if not os.path.isfile(rom_path):
msg = "ROM not found: " + rom_path
cprint(msg, 'red')
raise Exception(msg)
input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path))
if not os.path.isfile(input_driver_path):
msg = "Input driver not found: " + input_driver_path
cprint(msg, 'red')
raise Exception(msg)
cmd = [self.config['MUPEN_CMD'],
"--nospeedlimit",
"--nosaveoptions",
"--resolution",
"%ix%i" % (res_w, res_h),
"--gfx", gfx_plugin,
"--audio", "dummy",
"--input", input_driver_path,
rom_path]
xvfb_proc = None
if self.config['USE_XVFB']:
display_num = -1
success = False
# If we couldn't find an open display number after 15 attempts, give up
while not success and display_num <= 15:
display_num += 1
xvfb_cmd = [self.config['XVFB_CMD'],
":" + str(display_num),
"-screen",
"0",
"%ix%ix%i" % (res_w, res_h, res_d * 8),
"-fbdir",
self.config['TMP_DIR']]
cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow')
xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT)
time.sleep(2) # Give xvfb a couple seconds to start up
# Poll the process to see if it exited early
# (most likely due to a server already active on the display_num)
if xvfb_proc.poll() is None:
success = True
print('') # new line
if not success:
msg = "Failed to initialize Xvfb!"
cprint(msg, 'red')
raise Exception(msg)
os.environ["DISPLAY"] = ":" + str(display_num)
cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue')
cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red')
cmd = [self.config['VGLRUN_CMD'], "-d", ":" + str(display_num)] + cmd
cprint('Starting emulator with comand: %s' % cmd, 'yellow')
emulator_process = subprocess.Popen(cmd,
env=os.environ.copy(),
shell=False,
stderr=subprocess.STDOUT)
emu_mon = EmulatorMonitor()
monitor_thread = threading.Thread(target=emu_mon.monitor_emulator,
args=[emulator_process])
monitor_thread.daemon = True
monitor_thread.start()
return xvfb_proc, emulator_process
def _kill_emulator(self):
#cprint('Kill Emulator called!', 'yellow')
try:
self._act(ControllerState.NO_OP)
if self.emulator_process is not None:
self.emulator_process.kill()
if self.xvfb_process is not None:
self.xvfb_process.terminate()
except AttributeError:
pass # We may be shut down during intialization before these attributes have been set
###############################################
class EmulatorMonitor:
def monitor_emulator(self, emulator):
emu_return = emulator.poll()
while emu_return is None:
time.sleep(2)
if emulator is not None:
emu_return = emulator.poll()
else:
print('Emulator reference is no longer valid. Shutting down?')
return
# TODO: this means our environment died... need to die too
print('Emulator closed with code: ' + str(emu_return))
###############################################
class ControllerState(object):
# Controls [ JX, JY, A, B, RB, LB, Z, CR, CL, CD, CU, DR, DL, DD, DU, S]
NO_OP = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
START_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
A_BUTTON = [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
B_BUTTON = [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
RB_BUTTON = [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
CR_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
CL_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
CD_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
CU_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
JOYSTICK_UP = [ 0, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_DOWN = [ 0, -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_LEFT = [-128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_RIGHT = [ 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def __init__(self, controls=NO_OP):
self.X_AXIS = controls[0]
self.Y_AXIS = controls[1]
self.A_BUTTON = controls[2]
self.B_BUTTON = controls[3]
self.R_TRIG = controls[4]
self.L_TRIG = controls[5]
self.Z_TRIG = controls[6]
self.R_CBUTTON = controls[7]
self.L_CBUTTON = controls[8]
self.D_CBUTTON = controls[9]
self.U_CBUTTON = controls[10]
self.R_DPAD = controls[11]
self.L_DPAD = controls[12]
self.D_DPAD = controls[13]
self.U_DPAD = controls[14]
self.START_BUTTON = controls[15]
def to_json(self):
return json.dumps(self.__dict__)
###############################################
class ControllerHTTPServer(HTTPServer, object):
def __init__(self, server_address, control_timeout, frame_skip):
self.control_timeout = control_timeout
self.controls = ControllerState()
self.controls_updated = threading.Event()
self.response_sent = threading.Event()
self.running = True
self.responses_sent = 0
self.frame_skip = frame_skip
self.frame_skip_enabled = True
super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler)
def send_controls(self, controls):
self.responses_sent = 0
self.controls = controls
# Tell the request handler that the controls have been updated so it can send the response now:
self.controls_updated.set()
# Wait for response to actually be sent before returning:
if self.running:
self.response_sent.wait()
self.response_sent.clear()
def shutdown(self):
self.running = False
# Make sure we aren't blocking on anything:
self.response_sent.set()
self.controls_updated.set()
# Shutdown the server:
if PY3_OR_LATER:
super().shutdown()
super().server_close()
else:
super(ControllerHTTPServer, self).shutdown()
super(ControllerHTTPServer, self).server_close()
# http://preshing.com/20110920/the-python-with-statement-by-example/#implementing-the-context-manager-as-a-generator
@contextmanager
def frame_skip_disabled(self):
self.frame_skip_enabled = False
yield True
self.frame_skip_enabled = True
class ControllerRequestHandler(BaseHTTPRequestHandler, object):
def log_message(self, fmt, *args):
pass
def write_response(self, resp_code, resp_data):
self.send_response(resp_code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(resp_data.encode())
def do_GET(self):
# Wait for the controls to be updated before responding:
if self.server.running:
self.server.controls_updated.wait()
if not self.server.running:
print('Sending SHUTDOWN response')
# TODO: This sometimes fails with a broken pipe because
# the emulator has already stopped. Should handle gracefully (Issue #4)
self.write_response(500, "SHUTDOWN")
else:
### respond with controller output
self.write_response(200, self.server.controls.to_json())
self.server.responses_sent += 1
# If we have sent the controls 'n' times now...
if self.server.responses_sent >= self.server.frame_skip or not self.server.frame_skip_enabled:
# ...we fire the response_sent event so the next action can happen:
self.server.controls_updated.clear()
self.server.response_sent.set()
###############################################
|
load14yidata.py
|
"""
加载14亿数据并存入数据库
by judy 2020/06/17
email和pwd设置为长字符串
"""
import queue
import threading
import time
import traceback
from pathlib import Path
import pymysql
class MailSecret(object):
def __init__(self):
self.datafile_queue = queue.Queue()
self.res_queue = queue.Queue()
self.data_path = Path(r"./data")
self.get_data_file(self.data_path)
self.allcount = 0
# 长连接数据库
self.db = pymysql.connect(host="192.168.90.66", user="root", password="123456", database="14yidata")
# 插入标志位,要继续入
self._insert_flag = True
def get_data_file(self, datapath: Path):
"""
获取所有的数据文件
:return:
"""
for el in datapath.iterdir():
if el.is_dir():
self.get_data_file(el)
else:
self.datafile_queue.put(el)
def process_datafile(self):
"""
处理文件
email:pwd
:return:
"""
while True:
if self.datafile_queue.empty():
break
try:
datafile: Path = self.datafile_queue.get()
print(f'Process file, file:{datafile.as_posix()}')
with datafile.open('r', encoding='utf-8', errors="ignore") as fp:
for line in fp.readlines():
line = line.strip()
# errors = self.detect_decoding_errors_line(line)
# if errors:
# print(f'Error encoding, line:{line}')
# continue
try:
if ':' in line:
splite_res = line.split(':')
elif ';' in line:
splite_res = line.split(';')
elif len(line.split('\t')) == 3:
splite_res = line.split('\t')[1:]
else:
print(f'Unknown lines spilit: {line}')
continue
email, pwd = splite_res[0], splite_res[1]
# if not self._insert_flag:
# if email == 'lollyman@gmail.com' and pwd == 'cocowow2':
# self._insert_flag = True
# print("Find the last record")
# continue
while self.res_queue.qsize() > 100000:
print('Too many data, please wait 5 second')
time.sleep(5)
if self._insert_flag:
self.res_queue.put((email, pwd))
except:
print(f'error line:{line}')
continue
self.datafile_queue.task_done()
except Exception as err:
print(f'read file error, err:{traceback.format_exc()}')
def store_data(self, manydata: list):
"""
保存数据到mysql
:return:
"""
db_curs = self.db.cursor()
try:
sql = '''
INSERT INTO allb(Email, Password) VALUES (%s, %s);
'''
db_curs.executemany(sql, manydata)
self.db.commit()
print(f'Insert 10000 data ok')
except Exception as error:
self.db.rollback()
print(f"insert error, err:{error}")
finally:
db_curs.close()
def store_data_to_sqlite(self):
"""
保存数据在mysql
:return:
"""
save_res = []
count = 0
stop = 10
while True:
if stop == 0:
break
if self.datafile_queue.empty() and self.res_queue.empty():
stop -= 1
print(f'No more data ,will exit in {stop} seconds')
time.sleep(1)
continue
if count > 10000:
self.allcount += count
self.store_data(save_res)
save_res = []
count = 0
data = self.res_queue.get()
save_res.append(data)
count += 1
self.res_queue.task_done()
# 结束后
if count > 0:
self.allcount += count
self.store_data(save_res)
print(f'All data stored, almost {self.allcount} lines')
self.db.close()
def start(self):
for i in range(5):
t1 = threading.Thread(target=self.process_datafile, name='getalldata')
t1.start()
t2 = threading.Thread(target=self.store_data_to_sqlite, name='store_data')
t2.start()
if __name__ == '__main__':
ms = MailSecret()
ms.start()
|
connection.py
|
# Copyright (c) 2020 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
import io
import struct
import socket
import pickle
import threading
import queue
import multiprocessing as mp
import multiprocessing.connection as connection
def send_recv(conn, sdata):
conn.send(sdata)
rdata = conn.recv()
return rdata
class PickledConnection:
def __init__(self, conn):
self.conn = conn
def __del__(self):
self.close()
def close(self):
if self.conn is not None:
self.conn.close()
self.conn = None
def fileno(self):
return self.conn.fileno()
def _recv(self, size):
buf = io.BytesIO()
while size > 0:
chunk = self.conn.recv(size)
if len(chunk) == 0:
raise ConnectionResetError
size -= len(chunk)
buf.write(chunk)
return buf
def recv(self):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
buf = self._recv(size)
return pickle.loads(buf.getvalue())
def _send(self, buf):
size = len(buf)
while size > 0:
n = self.conn.send(buf)
size -= n
buf = buf[n:]
def send(self, msg):
buf = pickle.dumps(msg)
n = len(buf)
header = struct.pack("!i", n)
if n > 16384:
chunks = [header, buf]
elif n > 0:
chunks = [header + buf]
else:
chunks = [header]
for chunk in chunks:
self._send(chunk)
def open_socket_connection(port, reuse=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1
)
sock.bind(('', int(port)))
return sock
def accept_socket_connection(sock):
try:
conn, _ = sock.accept()
return PickledConnection(conn)
except socket.timeout:
return None
def listen_socket_connections(n, port):
sock = open_socket_connection(port)
sock.listen(n)
return [accept_socket_connection(sock) for _ in range(n)]
def connect_socket_connection(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, int(port)))
except ConnectionRefusedError:
print('failed to connect %s %d' % (host, port))
return PickledConnection(sock)
def accept_socket_connections(port, timeout=None, maxsize=1024):
sock = open_socket_connection(port)
sock.listen(maxsize)
sock.settimeout(timeout)
cnt = 0
while cnt < maxsize:
conn = accept_socket_connection(sock)
if conn is not None:
cnt += 1
yield conn
def open_multiprocessing_connections(num_process, target, args_func):
# open connections
s_conns, g_conns = [], []
for _ in range(num_process):
conn0, conn1 = mp.Pipe(duplex=True)
s_conns.append(conn0)
g_conns.append(conn1)
# open workers
for i, conn in enumerate(g_conns):
mp.Process(target=target, args=args_func(i, conn)).start()
conn.close()
return s_conns
class MultiProcessJobExecutor:
def __init__(self, func, send_generator, num_workers, postprocess=None):
self.send_generator = send_generator
self.postprocess = postprocess
self.conns = []
self.waiting_conns = queue.Queue()
self.output_queue = queue.Queue(maxsize=8)
for i in range(num_workers):
conn0, conn1 = mp.Pipe(duplex=True)
mp.Process(target=func, args=(conn1, i), daemon=True).start()
conn1.close()
self.conns.append(conn0)
self.waiting_conns.put(conn0)
def recv(self):
return self.output_queue.get()
def start(self):
threading.Thread(target=self._sender, daemon=True).start()
threading.Thread(target=self._receiver, daemon=True).start()
def _sender(self):
print('start sender')
while True:
data = next(self.send_generator)
conn = self.waiting_conns.get()
conn.send(data)
print('finished sender')
def _receiver(self):
print('start receiver')
while True:
conns = connection.wait(self.conns)
for conn in conns:
data = conn.recv()
self.waiting_conns.put(conn)
if self.postprocess is not None:
data = self.postprocess(data)
self.output_queue.put(data)
print('finished receiver')
class QueueCommunicator:
def __init__(self, conns=[]):
self.input_queue = queue.Queue(maxsize=256)
self.output_queue = queue.Queue(maxsize=256)
self.conns = set()
for conn in conns:
self.add_connection(conn)
threading.Thread(target=self._send_thread, daemon=True).start()
threading.Thread(target=self._recv_thread, daemon=True).start()
def connection_count(self):
return len(self.conns)
def recv(self, timeout=None):
return self.input_queue.get(timeout=timeout)
def send(self, conn, send_data):
self.output_queue.put((conn, send_data))
def add_connection(self, conn):
self.conns.add(conn)
def disconnect(self, conn):
print('disconnected')
self.conns.discard(conn)
def _send_thread(self):
while True:
conn, send_data = self.output_queue.get()
try:
conn.send(send_data)
except ConnectionResetError:
self.disconnect(conn)
except BrokenPipeError:
self.disconnect(conn)
def _recv_thread(self):
while True:
conns = connection.wait(self.conns, timeout=0.3)
for conn in conns:
try:
recv_data = conn.recv()
except ConnectionResetError:
self.disconnect(conn)
continue
except EOFError:
self.disconnect(conn)
continue
self.input_queue.put((conn, recv_data))
|
run.py
|
import threading
import os
def process():
os.sys('conda activate py3.7')
os.sys('G:\code\Projects\AI\Competation\Kaggle\003_MNIST> & D:/ProgramData/Anaconda3/envs/py3.7/python.exe g:/code/Projects/AI/Competation/Kaggle/003_MNIST/tf_MNIST.py')
for i in range(3):
t1 = threading.Thread(target=process, args=[])
tt.start()
|
cashacct.py
|
##!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Electron Cash - A Bitcoin Cash SPV Wallet
# This file Copyright (c) 2019 Calin Culianu <calin.culianu@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
DeVault IDs related classes and functions.
Note that this file also contains a unique class called `ScriptOutput` (which
inherits from address.py's own ScriptOutput), so always import this file
carefully if also importing address.py.
'''
import re
import requests
import threading
import queue
import random
import time
from collections import defaultdict, namedtuple
from typing import List, Tuple, Dict
from . import bitcoin
from . import util
from .address import Address, OpCodes, Script, ScriptError, UnknownAddress
from .address import ScriptOutput as ScriptOutputBase
from .transaction import BCDataStream, Transaction
from . import verifier
from . import blockchain
from . import caches
# DeVault IDs protocol code prefix is 0x01010101
# See OP_RETURN prefix guideline: https://github.com/devaultorg/devault.org/blob/master/spec/op_return-prefix-guideline.md
protocol_code = bytes.fromhex("01010101")
activation_height = 84001 # all cash acct registrations are invalid if they appear before this block height
height_modification = activation_height - 1 # compute the cashacct.number by subtracting this value from tx block height
collision_hash_length = 10 # DO NOT MODIFY -- this is hard-coded in spec
# This RE is used to accept/reject names
name_accept_re = re.compile(r'^[a-zA-Z0-9_]{1,99}$')
# Accept/reject collision_hash -- must be a number string of precisely length 10
collision_hash_accept_re = re.compile(f'^[0-9]{{{collision_hash_length}}}$')
# mapping of Address.kind -> cash account data types
_addr_kind_data_types = { Address.ADDR_P2PKH : 0x1, Address.ADDR_P2SH : 0x2 }
_unsupported_types = { 0x03, 0x04, 0x83, 0x84 }
# negative lengths here indicate advisory and not enforced.
_data_type_lengths = { 0x1 : 20, 0x2 : 20, 0x3 : 80, 0x4 : -66, 0x81 : 20, 0x82 : 20, 0x83 : 80, 0x84 : -66 }
_data_types_addr_kind = {
0x1 : Address.ADDR_P2PKH, 0x2 : Address.ADDR_P2SH,
0x81 : Address.ADDR_P2PKH, 0x82 : Address.ADDR_P2SH, # FIXME: These should really map to SLP addresses, but this works too.
}
_preferred_types = { 0x1, 0x2 } # these take precedence over 0x81, 0x82 in the case of multi registrations containing more than 1 type
assert set(_unsupported_types) | set(_data_types_addr_kind) == set(_data_type_lengths)
def _i2b(val): return bytes((val,))
class ArgumentError(ValueError):
'''Raised by various CashAcct functions if the supplied args are bad or
out of spec.'''
class ScriptOutput(ScriptOutputBase):
'''A class to encapsulate a DeVault IDs script output. Use the __new__ or
@classmethod factory methods to create instances. Suitable for including in
a Transaction as an output.
Note: This class is named ScriptOutput like its base. This is intentional
and client code should import this file such that referring to this class
is module-qualified, eg cashacct.ScriptOutput.
Note2: that the Transaction class automatically deserializes TYPE_SCRIPT
outputs to instances of this class if the script contents match the
CashAccounts protocol (based on boolean result of protocol_match() below).
See the address.ScriptOutput 'protocol' mechanism (in address.py).'''
_protocol_prefix = _i2b(OpCodes.OP_RETURN) + _i2b(4) + protocol_code
# Additional attributes outside of the base class tuple's 1 attribute
attrs_extra = ( 'name', 'address', 'addresses', 'number', 'collision_hash', 'emoji' )
@classmethod
def _protocol_match_fast(cls, script_bytes):
'''Returns true iff the `script_bytes` at least START with the correct
protocol code. Useful for fast-matching script outputs and testing
if they are potential CashAcct registrations.
`script_bytes` should be the full script as a bytes-like-object,
including the OP_RETURN byte prefix.'''
return script_bytes.startswith(cls._protocol_prefix)
@classmethod
def protocol_match(cls, script_bytes):
'''Returns true iff the `script_bytes` is a valid DeVault IDs
registration script (has all the requisite fields, etc).'''
try:
res = cls.parse_script(script_bytes)
return bool(res)
except (ValueError, TypeError):
return False
@classmethod
def is_valid(cls, script):
'''Alias for protocol_match. Returns true if script is a valid CashAcct
registration script.'''
return cls.protocol_match(script)
def __new__(cls, script, *, number=None, collision_hash=None, emoji=None):
'''Instantiate from a script (or address.ScriptOutput) you wish to parse.
Use number=, collision_hash=, emoji= kwargs if you also have that
information and want to store it in this instance.
The script will be parsed and self.name and self.address will be set
regardless. Raises ArgumentError on invalid script.
Always has the following attributes defined (even if None):
name, address, number, collision_hash, emoji
'''
if isinstance(script, cls) and not any((number, collision_hash, emoji)):
# copy constructor work-alike
number, collision_hash, emoji = script.number, script.collision_hash, script.emoji
script = cls._ensure_script(script)
self = super(__class__, cls).__new__(cls, script)
self.name, self.address, self.addresses = self.parse_script(self.script) # raises on error
assert self.address in self.addresses
self.number, self.collision_hash, self.emoji = None, None, None # ensure attributes defined
self.make_complete2(number, collision_hash, emoji=emoji) # raises if number bad and/or if collision_hash is bad, otherwise just sets attributes. None ok for args.
return self
def copy(self):
''' Creates a copy. '''
return ScriptOutput(self)
@staticmethod
def _check_name_address(name, address, *, allow_unknown=False, addresses=None):
'''Raises ArgumentError if either name or address are somehow invalid.'''
if not isinstance(name, str) or not name_accept_re.match(name):
raise ArgumentError('Invalid name specified: must be an alphanumeric ascii string of length 1-99', name)
if name != name.encode('ascii', errors='ignore').decode('ascii', errors='ignore'): # <-- ensure ascii. Note that this test is perhaps superfluous but the mysteries of unicode and how re's deal with it elude me, so it's here just in case.
raise ArgumentError('Name must be pure ascii', name)
if addresses is None:
addresses = [address]
if address not in addresses:
raise ArgumentError('Address not in address list', address, addresses)
for address in addresses:
allowed_classes = (Address, UnknownAddress) if allow_unknown else (Address,)
if not isinstance(address, allowed_classes):
raise ArgumentError(f'Address of type \'{allowed_classes}\' expected', address)
if isinstance(address, Address) and address.kind not in _addr_kind_data_types:
raise ArgumentError('Invalid or unsupported address type', address)
return True
@staticmethod
def _check_number_collision_hash(number, collision_hash):
'''Raises ArgumentError if either number or collision_hash aren't to spec.'''
if number is not None: # We don't raise on None
if not isinstance(number, int) or number < 1:
raise ArgumentError('Number must be an int >= 1')
if collision_hash is not None: # We don't raise on None
if isinstance(collision_hash, int): collision_hash = str(collision_hash) # grr.. it was an int
if not isinstance(collision_hash, str) or not collision_hash_accept_re.match(collision_hash):
raise ArgumentError('Collision hash must be a number string, right-padded with zeroes, of length 10')
return number is not None and collision_hash is not None
def is_complete(self, fast_check=False):
'''Returns true iff we have the number and collision_hash data for this
instance, as well as valid name and valid address.'''
if fast_check:
return self.name and self.address and self.number and self.collision_hash
try:
return self._check_name_address(self.name, self.address, allow_unknown=True, addresses=self.addresses) and self._check_number_collision_hash(self.number, self.collision_hash)
except ArgumentError:
return False
def make_complete2(self, number, collision_hash, *, emoji=None):
'''Make this ScriptOutput instance complete by filling in the number and
collision_hash info. Raises ArgumentError on bad/out-of-spec args (None
args are ok though, the cashacct just won't be complete).'''
ok = self._check_number_collision_hash(number, collision_hash)
self.number = number
self.collision_hash = collision_hash
self.emoji = emoji or self.emoji
return ok
def make_complete(self, block_height=None, block_hash=None, txid=None):
'''Make this ScriptOutput instance complete by specifying block height,
block_hash (hex string or bytes), and txid (hex string or bytes)'''
ch = collision_hash(block_hash, txid) if block_hash and txid else None
num = bh2num(block_height) if block_height is not None else None
em = emoji(block_hash, txid) if ch else None
return self.make_complete2(num, ch, emoji=em)
def clear_completion(self):
'''Make this ScriptOutput incomplete again.'''
self.number = self.collision_hash = self.emoji = None
def to_ui_string(self, ignored=True):
''' Overrides super to add cashaccount data '''
s = super().to_ui_string(ignored)
extra = []
for a in __class__.attrs_extra:
val = getattr(self, a, None)
if val is not None:
if a == "addresses":
# For the addresses list, we just show how many there are
# in the list. We do not support more than the primary
# address anyway. If list is 1 or empty, skip
a, val = "num_addresses", len(val)
if val < 2:
continue
extra.append(f'{a}={val}')
extra = ' '.join(extra)
return f'{s} [CashAcct: {extra}]' if extra else f'{s} [CashAcct]'
def block_height(self) -> int:
''' Convenience method to returns the block_height.
Requires that this class have its 'number' attribute not None, otherwise
returns 0. '''
return self.number + height_modification if self.number else 0
def __repr__(self):
return f'<ScriptOutput (CashAcct) {self.__str__()}>'
def __eq__(self, other):
res = super().__eq__(other)
if res and isinstance(other, __class__) and self is not other:
# awkward.. we do a deep check if self and other are both this type
for a in __class__.attrs_extra:
res = res and getattr(self, a, None) == getattr(other, a, None)
if not res:
break
return res
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
l = [self.script]
for name in __class__.attrs_extra:
v = getattr(self, name, None)
if isinstance(v, list):
v = tuple(v)
l.append(v)
return hash(tuple(l))
@staticmethod
def _ensure_script(script):
'''Returns script or script.script if script is a ScriptOutput instance.
Raises if script is not bytes and/or not ScriptOutput. Always returns
a bytes-like-object.'''
if isinstance(script, ScriptOutputBase):
script = script.script
script = _ensure_bytes(script, "Script")
return script
@classmethod
def parse_script(cls, script):
'''Parses `script`, which may be either a ScriptOutput class, or raw
bytes data. Will raise various exceptions if it cannot parse. Returns
(name: str, address: Address) as a tuple. '''
script = cls._ensure_script(script)
# Check prefix, length, and that the 'type' byte is one we know about
if not cls._protocol_match_fast(script) or len(script) < 30:
raise ArgumentError('Not a valid CashAcct registration script')
script_short = script
try:
script_short = script[len(cls._protocol_prefix):] # take off the already-validated prefix
ops = Script.get_ops(script_short) # unpack ops
except Exception as e:
raise ArgumentError('Bad CashAcct script', script_short.hex()) from e
# Check for extra garbage at the end, too few items and/or other nonsense
if not ops or not len(ops) >= 2 or not all(len(op) == 2 and op[1] for op in ops):
raise ArgumentError('CashAcct script parse error', ops)
name_bytes = ops[0][1]
try:
name = name_bytes.decode('ascii')
except UnicodeError as e:
raise ArgumentError('CashAcct names must be ascii encoded', name_bytes) from e
addresses = []
addresses_preferred = [] # subset of above with types either 0x1 or 0x2, all valid Address instances (may be empty if registration contained no 0x1/0x2)
try:
# parse the list of payment data (more than 1), and try and grab
# the first address we understand (type 1 or 2)
for op in ops[1:]:
def get_address(op):
type_byte = op[1][0]
hash160_bytes = op[1][1:]
req_len = _data_type_lengths.get(type_byte) or 0
strict = req_len >= 0
req_len = abs(req_len)
if type_byte in _data_types_addr_kind:
if len(hash160_bytes) != req_len:
if strict:
raise AssertionError('hash160 had wrong length')
else:
util.print_error(f"parse_script: type 0x{type_byte:02x} had length {len(hash160_bytes)} != expected length of {req_len}, will proceed anyway")
return Address(hash160_bytes, _data_types_addr_kind[type_byte]), type_byte
elif type_byte in _unsupported_types:
# unsupported type, just acknowledge this registration but
# mark the address as unknown
if len(hash160_bytes) != req_len:
msg = f"parse_script: unsupported type 0x{type_byte:02x} has unexpected length {len(hash160_bytes)}, expected {req_len}"
util.print_error(msg)
if strict:
raise AssertionError(msg)
return UnknownAddress(hash160_bytes), type_byte
else:
raise ValueError(f'unknown cash address type 0x{type_byte:02x}')
# / get_address
adr, type_byte = get_address(op)
addresses.append(adr)
if type_byte in _preferred_types and isinstance(adr, Address):
addresses_preferred.append(adr)
del adr, type_byte # defensive programming
assert addresses
maybes = [a for a in (addresses_preferred or addresses) if isinstance(a, Address)]
address = (maybes and maybes[0]) or addresses[0]
except Exception as e:
# Paranoia -- this branch should never be reached at this point
raise ArgumentError('Bad address or address could not be parsed') from e
cls._check_name_address(name, address, addresses=addresses, allow_unknown=True) # raises if invalid
return name, address, addresses
############################################################################
# FACTORY METHODS #
############################################################################
@classmethod
def create_registration(cls, name, address):
'''Generate a CashAccounts registration script output for a given
address. Raises ArgumentError (a ValueError subclass) if args are bad,
otherwise returns an instance of this class.'''
cls._check_name_address(name, address)
# prepare payload
# From: https://gitlab.com/cash-accounts/specification/blob/master/SPECIFICATION.md
#
# Sample payload (hex bytes) for registration of 'bv1' -> devault:qzgvpjawln2l8wfmsg2qwnnytcua02hy45vpdvrqu5
# (This example is a real tx with txid: 4a2da2a69fba3ac07b7047dd17927a890091f13a9e89440a4cd4cfb4c009de1f)
#
# hex bytes:
# 6a040101010103627631150190c0cbaefcd5f3b93b8214074e645e39d7aae4ad
# | | |......|| |....|| | |......................................|
# | | |......|| |....|| | ↳ hash160 of devault:qzgvpjawln2l8wfmsg2qwnnytcua02hy45vpdvrqu5
# | | |......|| |....|| |
# | | |......|| |....|| ↳ type (01 = p2pkh)
# | | |......|| |....||
# | | |......|| |....|↳ OP_PUSH(0x15 = 21)
# | | |......|| |....|
# | | |......|| ↳'bv1'
# | | |......||
# | | |......|↳OP_PUSH(3)
# | | |......|
# | | ↳protocol_code = 0x01010101
# | |
# | ↳OP_PUSH(4)
# |
# ↳OP_RETURN
class MyBCDataStream(BCDataStream):
def push_data(self, data):
self.input = self.input or bytearray()
self.input += Script.push_data(data)
bcd = MyBCDataStream()
bcd.write(cls._protocol_prefix) # OP_RETURN -> 0x6a + 0x4 (pushdata 4 bytes) + 0x01010101 (protocol code)
bcd.push_data(name.encode('ascii'))
bcd.push_data(
# type byte: 0x1 for ADDR_P2PKH, 0x2 for ADDR_P2SH
_i2b(_addr_kind_data_types[address.kind])
# 20 byte haash160
+ address.hash160
)
return cls(bytes(bcd.input))
@classmethod
def from_script(cls, script, *,
# these two optional args, if specified, take precedence
number=None, collision_hash=None,
# additionally these other args can be specified to
# have this class calculate number and collision_hash
# for you. Use either set of optional args but not both.
block_height=None, # if set, self.number will be set. Cannot specify this & number
# Cannot specify these & collision_hash at the same time
block_hash=None, txid=None # if block_hash and txid are set, .emoji will be set too on returned class (along with .collision_hash)
):
'''Create an instance from a `script`, which may be either a
ScriptOutput class, or raw bytes data. Will raise various exceptions if
it cannot parse and/or script or args are invalid.'''
if block_height is not None:
if number is not None:
raise ArgumentError('Cannot specify both block_height and number')
number = number_from_block_height(block_height)
tup = (block_hash, txid)
myemoji=None
if any(tup):
if not all(tup):
raise ArgumentError('block_hash and txid must both be specified or not specified at all')
if collision_hash is not None:
raise ArgumentError('Cannot specify collision_hash, block_hash & txid together')
collision_hash = chash(block_hash, txid)
myemoji = emoji(block_hash, txid)
return cls(script, number=number, collision_hash=collision_hash, emoji=myemoji)
@classmethod
def from_dict(cls, d: dict) -> object:
''' Create an isntance from a dict created by to_dict. '''
return cls(d['script'], # hex -> bytes will get auto-converted in c'tor
number=d.get('number'), collision_hash=d.get('collision_hash'),
emoji=d.get('emoji'))
def to_dict(self) -> dict:
assert self.script
d = { 'script' : self.script.hex() }
if self.number is not None: d['number'] = self.number
if self.collision_hash is not None: d['collision_hash'] = self.collision_hash
if self.emoji is not None: d['emoji'] = self.emoji
return d
# register the above class with the ScriptOutput protocol system
ScriptOutputBase.protocol_classes.add(ScriptOutput)
# Helper Functions
def _ensure_bytes(arg, argname='Arg'):
if isinstance(arg, str):
try:
arg = bytes.fromhex(arg)
except ValueError as e:
raise ArgumentError(f'{argname} could not be binhex decoded', arg) from e
if not isinstance(arg, (bytes, bytearray)):
raise ArgumentError(f'{argname} argument not a bytes-like-object', arg)
if isinstance(arg, bytearray):
arg = bytes(arg) # ensure actual bytes so hash() works.
return arg
def _collision_hash(block_hash, txid):
''' Returns the full sha256 collision hash as bytes given the hex strings
and/or raw bytes as input. May raise ValueError or other. '''
bh = _ensure_bytes(block_hash, 'block_hash')
tx = _ensure_bytes(txid, 'txid')
if not all( len(x) == 32 for x in (bh, tx) ):
raise ArgumentError('Invalid arguments', block_hash, txid)
return bitcoin.sha256(bh + tx)
def collision_hash(block_hash, txid):
''' May raise if block_hash and txid are not valid hex-encoded strings
and/or raw bytes, otherwise returns the 0-padded collision hash string
(always a str of length 10).'''
ch = _collision_hash(block_hash, txid)[:4]
ch = ''.join(reversed(str(int.from_bytes(ch, byteorder='big')))) # convert int to string, reverse it
ch += '0' * (10 - len(ch)) # pad with 0's at the end
return ch
chash = collision_hash # alias.
def emoji_index(block_hash, txid):
''' May raise. Otherwise returns an emoji index from 0 to 99. '''
ch = _collision_hash(block_hash, txid)[-4:]
return int.from_bytes(ch, byteorder='big') % 100
emoji_list = ( 128123, 128018, 128021, 128008, 128014, 128004, 128022, 128016,
128042, 128024, 128000, 128007, 128063, 129415, 128019, 128039,
129414, 129417, 128034, 128013, 128031, 128025, 128012, 129419,
128029, 128030, 128375, 127803, 127794, 127796, 127797, 127809,
127808, 127815, 127817, 127819, 127820, 127822, 127826, 127827,
129373, 129381, 129365, 127805, 127798, 127812, 129472, 129370,
129408, 127850, 127874, 127853, 127968, 128663, 128690, 9973,
9992, 128641, 128640, 8986, 9728, 11088, 127752, 9730, 127880,
127872, 9917, 9824, 9829, 9830, 9827, 128083, 128081, 127913,
128276, 127925, 127908, 127911, 127928, 127930, 129345, 128269,
128367, 128161, 128214, 9993, 128230, 9999, 128188, 128203,
9986, 128273, 128274, 128296, 128295, 9878, 9775, 128681,
128099, 127838 )
emoji_set = frozenset(chr(o) for o in emoji_list)
def emoji(block_hash, txid):
''' Returns the emoji character givern a block hash and txid. May raise.'''
return chr(emoji_list[emoji_index(block_hash, txid)])
_emoji = emoji # alias for internal use if names clash
def number_from_block_height(block_height):
''' Given a block height, returns the cash account 'number' (as int).
This is simply the block height minus 563620. '''
return int(block_height - height_modification)
def number_to_block_height(number):
''' Reciprocal of number_to_block_height '''
return int(number + height_modification)
bh2num = number_from_block_height # alias
num2bh = number_to_block_height # alias
#### Lookup & Verification
class Info(namedtuple("Info", "name, address, number, collision_hash, emoji, txid")):
@classmethod
def from_script(cls, script, txid):
''' Converts a script to an Info object. Note that ideally the passed-in
script.is_complete() should be True otherwise most of the fields of the
returned Info object will be None.'''
return cls(name=script.name,
address=script.address,
number=script.number,
collision_hash=script.collision_hash,
emoji=script.emoji,
txid=txid)
def to_script(self):
''' Inverse of from_script, returns a (script, txid) tuple. '''
script = ScriptOutput.create_registration(name=self.name, address=self.address)
script.make_complete2(number=self.number, collision_hash=self.collision_hash,
emoji=self.emoji)
return script, self.txid
@classmethod
def from_regtx(cls, regtx):
return cls.from_script(regtx.script, regtx.txid)
servers = [
"https://api.devaultid.com" # Runs official 'cash-accounts' lookup server software
]
debug = False # network debug setting. Set to True when developing to see more verbose information about network operations.
timeout = 12.5 # default timeout used in various network functions, in seconds.
def lookup(server, number, name=None, collision_prefix=None, timeout=timeout, exc=[], debug=debug) -> tuple:
''' Synchronous lookup, returns a tuple of:
block_hash, List[ RegTx(txid, script) namedtuples ]
or None on error. Note the .script in each returned RegTx will always have
.is_complete() == True (has all fields filled-in from the lookup server).
Optionally, pass a list as the `exc` parameter and the exception encountered
will be returned to caller by appending to the list.
Use `collision_prefix` and `name` to narrow the search, otherwise all
results (if any) for a particular block (number) are returned.
Name matching is case-insensitive. Additionally, as of the time of this
writing, collision_prefix without a specified name will always return no
results from the lookup server. Also, name should be a complete name and not
a substring.
Note:
Resulting tx's are not verified (in the SPV sense) by this function and
further verification (SPV) is necessary before presenting any results to the
user for the purposes of sending funds.'''
url = f'{server}/lookup/{number}'
if name:
name = name.strip().lower()
url += f'/{name}'
if collision_prefix:
collision_prefix = collision_prefix.strip()
url += f'/{collision_prefix}'
try:
ret = []
r = requests.get(url, allow_redirects=True, timeout=timeout) # will raise requests.exceptions.Timeout on timeout
r.raise_for_status()
d = r.json()
if not isinstance(d, dict) or not d.get('results') or not isinstance(d.get('block'), int):
raise RuntimeError('Unexpected response', r.text)
res, block = d['results'], int(d['block'])
bnumber = bh2num(block)
if bnumber != number:
raise RuntimeError('Bad response')
if not isinstance(res, list) or number < 1:
raise RuntimeError('Bad response')
block_hash, header_prev = None, None
unparseable = set()
for d in res:
txraw = d['transaction']
header_hex = d['inclusion_proof'][:blockchain.HEADER_SIZE*2].lower()
header_prev = header_prev or header_hex
if len(header_hex)//2 != blockchain.HEADER_SIZE:
raise AssertionError('Could not get header')
if not block_hash:
block_hash = blockchain.hash_header_hex(header_hex)
elif header_prev != header_hex:
raise AssertionError('Differing headers in results')
tx = Transaction(txraw)
txid = Transaction._txid(txraw)
op_return_count = 0
tx_regs = [] # there should be exactly 1 of these per tx, as per cash acount spec.. we reject tx's with more than 1 op_return
for _typ, script, value in tx.outputs():
if isinstance(script, ScriptOutputBase):
if script.is_opreturn():
op_return_count += 1
if isinstance(script, ScriptOutput): # note ScriptOutput here is our subclass defined at the top of this file, not addess.ScriptOutput
script.make_complete(block_height=block, block_hash=block_hash, txid=txid)
tx_regs.append(CashAcct.RegTx(txid, script))
if len(tx_regs) == 1 and op_return_count == 1:
# we only accept tx's with exactly 1 OP_RETURN, as per the spec
ret.extend(tx_regs)
else:
if debug:
util.print_error(f"lookup: {txid} had no valid registrations in it using server {server} (len(tx_regs)={len(tx_regs)} op_return_count={op_return_count})")
unparseable.add(txid)
if unparseable:
util.print_error(f"lookup: Warning for block number {number}: got "
f"{len(res)} transactions from the server but "
f"unable to parse {len(unparseable)} of them."
" See if the DeVault IDs spec has changed!", unparseable)
if debug:
util.print_error(f"lookup: found {len(ret)} reg txs at block height {block} (number={number})")
return block_hash, ret
except Exception as e:
if debug:
util.print_error("lookup:", repr(e))
if isinstance(exc, list):
exc.append(e)
def lookup_asynch(server, number, success_cb, error_cb=None,
name=None, collision_prefix=None, timeout=timeout, debug=debug):
''' Like lookup() above, but spawns a thread and does its lookup
asynchronously.
success_cb - will be called on successful completion with a single arg:
a tuple of (block_hash, the results list).
error_cb - will be called on failure with a single arg: the exception
(guaranteed to be an Exception subclass).
In either case one of the two callbacks will be called. It's ok for
success_cb and error_cb to be the same function (in which case it should
inspect the arg passed to it). Note that the callbacks are called in the
context of the spawned thread, (So e.g. Qt GUI code using this function
should not modify the GUI directly from the callbacks but instead should
emit a Qt signal from within the callbacks to be delivered to the main
thread as usual.) '''
def thread_func():
exc = []
res = lookup(server=server, number=number, name=name, collision_prefix=collision_prefix, timeout=timeout, exc=exc, debug=debug)
called = False
if res is None:
if callable(error_cb) and exc:
error_cb(exc[-1])
called = True
else:
success_cb(res)
called = True
if not called:
# this should never happen
util.print_error("WARNING: no callback called for ", threading.current_thread().name)
t = threading.Thread(name=f"CashAcct lookup_asynch: {server} {number} ({name},{collision_prefix},{timeout})",
target=thread_func, daemon=True)
t.start()
def lookup_asynch_all(number, success_cb, error_cb=None, name=None,
collision_prefix=None, timeout=timeout, debug=debug):
''' Like lookup_asynch above except it tries *all* the hard-coded servers
from `servers` and if all fail, then calls the error_cb exactly once.
If any succeed, calls success_cb exactly once.
Note: in this function success_cb is called with TWO args:
- first arg is the tuple of (block_hash, regtx-results-list)
- the second arg is the 'server' that was successful (server string)
One of the two callbacks are guaranteed to be called in either case.
Callbacks are called in another thread context so GUI-facing code should
be aware of that fact (see nodes for lookup_asynch above). '''
assert servers, "No servers hard-coded in cashacct.py. FIXME!"
my_servers = servers.copy()
random.shuffle(my_servers)
N = len(my_servers)
q = queue.Queue()
lock = threading.Lock()
n_ok, n_err = 0, 0
def on_succ(res, server):
nonlocal n_ok
q.put(None)
with lock:
if debug: util.print_error("success", n_ok+n_err, server)
if n_ok:
return
n_ok += 1
success_cb(res, server)
def on_err(exc, server):
nonlocal n_err
q.put(None)
with lock:
if debug: util.print_error("error", n_ok+n_err, server, exc)
if n_ok:
return
n_err += 1
if n_err < N:
return
if error_cb:
error_cb(exc)
def do_lookup_all_staggered():
''' Send req. out to all servers, staggering the requests every 200ms,
and stopping early after the first success. The goal here is to
maximize the chance of successful results returned, with tolerance for
some servers being unavailable, while also conserving on bandwidth a
little bit and not unconditionally going out to ALL servers.'''
t0 = time.time()
for i, server in enumerate(my_servers):
if debug: util.print_error("server:", server, i)
lookup_asynch(server, number = number,
success_cb = lambda res, _server=server: on_succ(res, _server),
error_cb = lambda exc, _server=server: on_err(exc, _server),
name = name, collision_prefix = collision_prefix, timeout = timeout,
debug = debug)
try:
q.get(timeout=0.200)
while True:
# Drain queue in case previous iteration's servers also
# wrote to it while we were sleeping, so that next iteration
# the queue is hopefully empty, to increase the chances
# we get to sleep.
q.get_nowait()
except queue.Empty:
pass
with lock:
if n_ok: # check for success
if debug:
util.print_error(f"do_lookup_all_staggered: returning "
f"early on server {i} of {len(my_servers)} after {(time.time()-t0)*1e3} msec")
return
t = threading.Thread(daemon=True, target=do_lookup_all_staggered)
t.start()
class ProcessedBlock:
__slots__ = ( 'hash', # str binhex block header hash
'height', # int blockchain block height
'status_hash', # str binhex computed value derived from Hash(hash + height + reg_txs..) see compute_status_hash
'reg_txs' ) # dict of txid -> RegTx(txid, script) namedtuple
def __init__(self, *args, **kwargs):
assert not args, "This class only takes kwargs"
assert all(k in self.__slots__ for k in kwargs), "Unknown kwarg specified"
for s in self.__slots__:
setattr(self, s, kwargs.get(s))
assert self.reg_txs is None or (isinstance(self.reg_txs, dict) and all(bytes.fromhex(k).hex() == bytes.fromhex(v.txid).hex() for k,v in self.reg_txs.items()))
assert self.hash is None or (isinstance(self.hash, str) and bytes.fromhex(self.hash).hex())
assert self.height is None or (isinstance(self.height, int) and self.height >= activation_height)
self.status_hash or self.set_status_hash() # tries to recompute if not provided
assert self.status_hash is None or (isinstance(self.status_hash, str) and bytes.fromhex(self.status_hash))
def __repr__(self):
return ( f'<ProcessedBlock at 0x{id(self):x} hash={self.hash} height={self.height} status_hash={self.status_hash}'
+ f' with {0 if not self.reg_txs else len(self.reg_txs)} registration(s)>')
def set_status_hash(self) -> str:
self.status_hash = self.compute_status_hash(self.hash, self.height, self.reg_txs)
return self.status_hash
def set_hash_from_raw_header_hex(self, rawhex : str) -> str:
assert len(rawhex) >= blockchain.HEADER_SIZE * 2
self.hash = blockchain.hash_header_hex(rawhex[:blockchain.HEADER_SIZE*2])
return self.hash
@staticmethod
def compute_status_hash(hash_hex : str, height : int, reg_txs : dict) -> str:
if hash_hex and isinstance(height, int) and isinstance(reg_txs, dict):
ba = bytearray()
ba.extend(int.to_bytes(height, length=4, byteorder='little'))
ba.extend(bytes.fromhex(hash_hex))
for txid in sorted(reg_txs.keys()):
ba.extend(bytes.fromhex(txid))
status_hash = bitcoin.hash_encode(bitcoin.Hash(ba))
return status_hash
def __eq__(self, other):
if other is self: return True
if isinstance(other, ProcessedBlock):
return bool(self.hash == other.hash and self.height == other.height and (self.status_hash or self.set_status_hash()) == (other.status_hash or other.set_status_hash()))
return False
def __neq__(self, other):
return not self.__eq__(other)
def __hash__(self):
l = []
for name in self.__slots__:
v = getattr(self, name, None)
if isinstance(v, dict):
# Python really needs a frozendict type. :) This dict doesn't
# mutate anyway once constructed, so this is safe.
v = tuple(v.items())
# uncomment below if we add a list to this class
#elif isinstance(v, list):
# v = tuple(v)
l.append(v)
return hash(tuple(l))
class CashAcct(util.PrintError, verifier.SPVDelegate):
''' Class implementing cash account subsystem such as verification, etc. '''
# info for a registration tx. may or may not be currently verified
RegTx = namedtuple("RegTx", "txid, script")
# info for a verified RegTx. Invariant should be all VerifTx's have a
# corrseponding RegTx but not necessarily vice-versa.
VerifTx = namedtuple("VerifTx", "txid, block_height, block_hash")
def __init__(self, wallet):
assert wallet, "CashAcct cannot be instantiated without a wallet"
self.wallet = wallet
self.network = None
self.verifier = None
self.lock = threading.Lock() # note, this lock is subordinate to wallet.lock and should always be taken AFTER wallet.lock and never before
self._init_data()
# below is used by method self.verify_block_asynch:
self._blocks_in_flight = defaultdict(list) # number (eg 100-based-modified height) -> List[tuple(success_cb, error_cb)]; guarded with lock
def _init_data(self):
self.wallet_reg_tx = dict() # dict of txid -> RegTx
self.ext_reg_tx = dict() # dict of txid -> RegTx
self.v_tx = dict() # dict of txid -> VerifTx
self.v_by_addr = defaultdict(set) # dict of addr -> set of txid
self.v_by_name = defaultdict(set) # dict of lowercased name -> set of txid
self.ext_unverif = dict() # ephemeral (not saved) dict of txid -> block_height. This is however re-computed in load() (TODO: see if this should not be the case)
self.ext_incomplete_tx = dict() # ephemeral (not saved) dict of txid -> RegTx (all regtx's are incomplete here)
# minimal collision hash encodings cache. keyed off (name.lower(), number, collision_hash) -> '03' string or '' string, serialized to disk for good UX on startup.
self.minimal_ch_cache = caches.ExpiringCache(name=f"{self.wallet.diagnostic_name()} - CashAcct minimal collision_hash cache", timeout=3600.0)
# Dict of block_height -> ProcessedBlock (not serialized to disk)
self.processed_blocks = caches.ExpiringCache(name=f"{self.wallet.diagnostic_name()} - CashAcct processed block cache", maxlen=5000, timeout=3600.0)
def diagnostic_name(self):
return f'{self.wallet.diagnostic_name()}.{__class__.__name__}'
def start(self, network):
assert network, "CashAcct start requires a valid network instance"
if not self.network:
assert not self.verifier
self.network = network
# our own private verifier, we give it work via the delegate methods
self.verifier = verifier.SPV(self.network, self)
self.network.add_jobs([self.verifier])
util.finalization_print_error(self.verifier)
self.network.register_callback(self._fw_wallet_updated, ['wallet_updated'])
def stop(self):
if self.verifier:
assert self.network
self.network.unregister_callback(self._fw_wallet_updated)
self.verifier.release()
self.verifier = None
self.network = None
def fmt_info(self, info : Info, minimal_chash: str = None, emoji=False) -> str:
''' Given an Info object, returns a string of the form:
name#123.1234;
name2#100;
name3#101.1234567890;
If emoji=True, then we will append the emoji character like so:
"NilacTheGrim#123.45; 🌶"
(Note that the returned string will always end in a semicolon.)
Will implicitly go out to network to cache the minimal_chash value
if minimal_chash==None.. such that subsequent calls may return
a shortened version once the minimal_chash is computed.'''
name, number, chash = info.name, info.number, info.collision_hash
if minimal_chash is None:
minimal_chash = self.get_minimal_chash(name, number, chash)
if minimal_chash: minimal_chash = '.' + minimal_chash
emojipart = f' {info.emoji}' if emoji and info.emoji else ''
return f"{name}#{number}{minimal_chash};{emojipart}"
_number_re = re.compile(r'^[0-9]{1,}$')
_collision_re = re.compile(r'^[0-9]{0,10}$')
@staticmethod
def strip_emoji(s : str) -> str:
return ''.join(filter(lambda x: x not in emoji_set, s))
@classmethod
def parse_string(cls, s : str) -> tuple:
''' Returns a (name, number, collision_prefix) tuple on parse success
of a string of the form: "name#100" or "name#100.12" or "name#100.123;"
(trailing ; is ignored).
Returns None on parse failure.
Note:
- number must always be >= 100 otherwise None is returned. e.g.
mark#99 is bad but mark#100 is good.
- collision_prefix must be empty or length <= 10 otherwise None is
returned. e.g. mark#100.01234567899 is too long but mark#100.0123456789 is ok
Does not raise, merely returns None on all errors.'''
s = s.strip()
while s and s[-1] in emoji_set:
s = s[:-1].strip() # strip trailing "<space><emoji>"
while s.endswith(';'):
s = s[:-1] # strip trailing ;
parts = s.split('#')
if len(parts) != 2:
return None
name, therest = parts
if name and name[0] in emoji_set: # support a custom style string with "emoji name#number.123" as the format
name = name[1:].strip()
if not name_accept_re.match(name):
return None
parts = therest.split('.')
if len(parts) == 1:
number = parts[0]
collision_prefix = ''
elif len(parts) == 2:
number, collision_prefix = parts
else:
return None
if not cls._number_re.match(number):
return None
if not cls._collision_re.match(collision_prefix):
return None
try:
number = int(number)
except:
return None
if number < 1:
return None
return name, number, collision_prefix
def resolve_verify(self, ca_string : str, timeout: float = timeout, exc: list = None) -> List[Tuple[Info, str]]:
''' Blocking resolver for DeVault ID names. Given a ca_string of the
form: name#number[.123], will verify the block it is on and do other
magic. It will return a list of tuple of (Info, minimal_chash).
This goes out to the network each time, so use it in GUI code that
really needs to know verified CashAccount tx's (eg before sending funds),
but not in advisory GUI code, since it can be slow (on the order of less
than a second to several seconds depending on network speed).
timeout is a timeout in seconds. If timer expires None is returned.
It will return None on failure or nothing found.
Optional arg `exc` is where to put the exception on network or other
failure. '''
tup = self.parse_string(ca_string)
if not tup:
return
name, number, chash = tup
specified_chash = chash or ''
done = threading.Event()
pb = None
def done_cb(thing):
nonlocal pb
if isinstance(thing, ProcessedBlock) and thing.reg_txs:
pb = thing
elif isinstance(thing, Exception) and isinstance(exc, list):
exc.append(thing)
done.set()
self.verify_block_asynch(number, success_cb=done_cb, error_cb=done_cb, timeout=timeout)
if not done.wait(timeout=timeout) or not pb:
return
matches = list()
found = None
lname = name.lower()
for txid, rtx in pb.reg_txs.items():
rtx_lname = rtx.script.name.lower()
if rtx_lname == lname:
matches.append((txid, rtx_lname, rtx.script.collision_hash))
if not matches:
return # no match
d = self._calc_minimal_chashes_for_sorted_lcased_tups(sorted(t[1:] for t in matches))
ret = []
empty_dict = dict()
for txid, lname, chash in matches:
min_chash = d.get(lname, empty_dict).get(chash, None)
if min_chash is None:
self.print_error(f"resolve_verify: WARNING! Internal Error! Did not find calculated minimal chash for {lname}.{chash}. FIXME!")
min_chash = chash
rtx = pb.reg_txs[txid]
if rtx.script.collision_hash.startswith(specified_chash):
info = Info.from_regtx(rtx)
ret.append((info, min_chash))
return ret or None
def get_minimal_chash(self, name, number, collision_hash, *,
success_cb = None, skip_caches = False, only_cached = False) -> str:
''' Returns a string of the minimal collision hash for a given
name, number, collision_hash combination. This initially will just
return collision_hash, but will go out to the network and
subsequent calls will return the cached results from the asynch. network
lookup should it complete successfully. Note that cached results get
saved to wallet storage, so over the course of the life of a wallet
at least the GUI for the wallet's own addresses should contain correct
results here.
Client code can use the 'ca_updated_minimal_chash' network callback
(see below) to be notified asynchronously when minimal_chash's are
updated.
Optionally client code can supply a success_cb callback function which
will be passed 2 args: (name, number, collision_hash), minimal_collision_hash
Callback if specified is guaranteed to be called before or after this
function returns, but it may be called in another thread.'''
key = (name.lower(), number, collision_hash)
def call_success_cb(min_ch):
''' Inform caller if they supplied a callback that the process is done. '''
if success_cb: success_cb((name, number, collision_hash), min_ch)
found, pb_cached = None, None
if not skip_caches:
with self.lock:
found = self.minimal_ch_cache.get(key)
if found is None:
# See if we have the block cached
pb_cached = self.processed_blocks.get(num2bh(number))
if found is None and pb_cached is not None:
# We didn't have the chash but we do have the block, use that
# immediately without going out to network
tup = self._calc_minimal_chash(name, collision_hash, pb_cached)
if tup:
found = tup[1]
with self.lock:
# Cache result
self.minimal_ch_cache.put(key, found)
# clean up after ourselves
del tup
if found is not None:
call_success_cb(found)
return found
elif only_cached:
call_success_cb(collision_hash)
return collision_hash
else:
def do_lookup():
t0 = time.time()
def on_success(pb : ProcessedBlock):
minimal_chash = collision_hash # start with worst-case, so finally block below has data no matter what happens..
try:
if bh2num(pb.height) != number:
self.print_error(f"get_minimal_chash: WARNING - Internal error. pb.height: {pb.height} != num2bh: {num2bh(number)}")
return
tup = self._calc_minimal_chash(name, collision_hash, pb)
if not tup:
# hmm. empty results.. or bad lookup. in either case,
# don't cache anything.
self.print_error("get_minimal_chash: no results found for", name, number, collision_hash)
return
rtx, minimal_chash = tup
with self.lock:
self.minimal_ch_cache.put(key, minimal_chash)
self.print_error(f"get_minimal_chash: network lookup completed in {time.time()-t0:1.2f} seconds")
network = self.network # capture network obj to avoid race conditions with self.stop()
if network and rtx and minimal_chash != collision_hash:
network.trigger_callback('ca_updated_minimal_chash', self, Info.from_regtx(rtx), minimal_chash)
finally:
call_success_cb(minimal_chash)
# /on_success
self.verify_block_asynch(number=number, success_cb=on_success)
if self.network: # only do this if not 'offline'
do_lookup() # start the asynch lookup
else:
# no network, just call success_cb anyway with what we have so caller doesn't block on waiting for callback...
call_success_cb(collision_hash)
# Immediately return the long-form chash so we give the caller a
# result immediately, even if it is not the final result.
# The caller should subscribe to the ca_updated_minimal_chash
# network signal to get final minimal_chash when it is ready.
return collision_hash
def get_cashaccounts(self, domain=None, inv=False) -> List[Info]:
''' Returns a list of Info objects for verified cash accounts in domain.
Domain must be an iterable of addresses (either wallet or external).
If domain is None, every verified cash account we know about is returned.
If inv is True, then domain specifies addresses NOT to include
in the results (i.e. eevery verified cash account we know about not in
domain be returned). '''
if domain is None:
domain = self.v_by_addr if not inv else set()
ret = []
seen = set()
with self.lock:
if inv:
domain = set(self.v_by_addr) - set(domain)
for addr in domain:
txids = self.v_by_addr.get(addr, set())
for txid in txids:
script = self._find_script(txid)
if script and txid not in seen:
seen.add(txid)
ret.append(Info.from_script(script, txid))
return ret
def get_wallet_cashaccounts(self) -> List[Info]:
''' Convenience method, returns all the verified cash accounts we
know about for wallet addresses only. '''
return self.get_cashaccounts(domain=self.wallet.get_addresses())
def get_external_cashaccounts(self) -> List[Info]:
''' Convenience method, retruns all the verified cash accounts we
know about that are not for wallet addresses. '''
return self.get_cashaccounts(domain=self.wallet.get_addresses(), inv=True)
def load(self):
''' Note: loading should happen before threads are started, so no lock
is needed.'''
self._init_data()
dd = self.wallet.storage.get('cash_accounts_data', {})
wat_d = dd.get('wallet_reg_tx', {})
eat_d = dd.get('ext_reg_tx', {})
vtx_d = dd.get('verified_tx', {})
min_enc_l = dd.get('minimal_ch_cache', [])
seen_scripts = {}
for txid, script_dict in wat_d.items():
txid = txid.lower()
script = ScriptOutput.from_dict(script_dict)
if script.is_complete():
# sanity check
seen_scripts[txid] = script
# Note we allow incomplete scripts in the wallet_reg_tx dict because
# the user may close wallet and restart and then verifier will see
# the tx as verified as it synchs, thus completing it.
# This is safe since by default _find_script() only returns complete
# scripts unless incomplete=True is specified.
self.wallet_reg_tx[txid] = self.RegTx(txid, script)
for txid, script_dict in eat_d.items():
script = ScriptOutput.from_dict(script_dict)
if script.is_complete() and txid not in seen_scripts:
# sanity check
seen_scripts[txid] = script
# allow incomplete scripts to be loaded here too, in case
# verification comes in later.
self.ext_reg_tx[txid] = self.RegTx(txid, script)
for txid, info in vtx_d.items():
block_height, block_hash = info
script = seen_scripts.get(txid)
if script:
self._add_vtx(self.VerifTx(txid, block_height, block_hash), script)
for item in min_enc_l:
value = item[-1]
key = item[:-1]
self.minimal_ch_cache.put(tuple(key), value) # re-populate the cache
# Re-enqueue previously unverified for verification.
# they may come from either wallet or external source, but we
# enqueue them with the private verifier here.
# Note that verification failures will cause the tx's to get popped
# and thus they shouldn't forever verify (see verification_failed et al).
d = self.ext_reg_tx.copy()
d.update(self.wallet_reg_tx)
for txid, item in d.items():
if txid not in self.v_tx and item.script.number is not None and item.script.number >= 1:
self.ext_unverif[txid] = num2bh(item.script.number)
# Note that 'wallet.load_transactions' will be called after this point
# in the wallet c'tor and it will take care of removing wallet_reg_tx
# and v_tx entries from self if it detects unreferenced transactions in
# history (via the remove_transaction_hook callback).
def save(self, write=False):
'''
FYI, current data model is:
RegTx = namedtuple("RegTx", "txid, script")
VerifTx = namedtuple("VerifTx", "txid, block_height, block_hash")
self.wallet_reg_tx = dict() # dict of txid -> RegTx
self.ext_reg_tx = dict() # dict of txid -> RegTx
self.v_tx = dict() # dict of txid -> VerifTx
self.v_by_addr = defaultdict(set) # dict of addr -> set of txid
self.v_by_name = defaultdict(set) # dict of lowercased name -> set of txid
'''
wat_d, eat_d, vtx_d = dict(), dict(), dict()
min_enc_l = list()
with self.lock:
for txid, rtx in self.wallet_reg_tx.items():
wat_d[txid] = rtx.script.to_dict()
for txid, rtx in self.ext_reg_tx.items():
eat_d[txid] = rtx.script.to_dict()
for txid, vtx in self.v_tx.items():
vtx_d[txid] = [vtx.block_height, vtx.block_hash]
for key, tup in self.minimal_ch_cache.copy_dict().items():
value = tup[-1]
if value is None:
# we sometimes write 'None' to the cache to invalidate
# items but don't delete the entry. Skip these.
continue
min_enc_l.append([*key, value])
data = {
'wallet_reg_tx' : wat_d,
'ext_reg_tx' : eat_d,
'verified_tx' : vtx_d,
'minimal_ch_cache' : min_enc_l,
}
self.wallet.storage.put('cash_accounts_data', data)
if write:
self.wallet.storage.write()
def get_verified(self, ca_name) -> Info:
''' Returns the Info object for ca_name of the form: Name#123.1234
or None if not found in self.v_tx '''
tup = self.parse_string(ca_name)
if tup:
name, num, cp = tup
l = self.find_verified(name=name, number=num, collision_prefix=cp)
if len(l) == 1:
return l[0]
def find_verified(self, name: str, number: int = None, collision_prefix: str = None) -> List[Info]:
''' Returns a list of Info objects for verified cash accounts matching
lowercased name. Optionally you can narrow the search by specifying
number (int) and a collision_prefix (str of digits) '''
ret = []
with self.lock:
name = name.lower()
s = self.v_by_name.get(name, set())
for txid in s:
script = self._find_script(txid, False)
if script:
if script.name.lower() != name:
self.print_error(f"find: FIXME -- v_by_name has inconsistent data for {txid}, name {name} != {script.name}")
continue
if not script.is_complete():
self.print_error(f"find: FIXME -- v_by_name has a script that is not 'complete' for {txid} name='{name}'")
continue
if number is not None and script.number != number:
continue
if collision_prefix is not None and not script.collision_hash.startswith(collision_prefix):
continue
ret.append(Info.from_script(script, txid))
return ret
def add_ext_tx(self, txid : str, script : ScriptOutput):
''' This will add txid to our ext_tx cache, and kick off verification,
but only if it's not verified already and/or not in wallet_reg_tx. '''
if not isinstance(script, ScriptOutput) or not script.is_complete():
raise ArgumentError("Please pass an 'is_complete' script to add_ext_tx")
with self.lock:
if txid not in self.wallet_reg_tx:
self.ext_reg_tx[txid] = self.RegTx(txid, script)
if txid not in self.v_tx:
self.ext_unverif[txid] = num2bh(script.number)
def has_tx(self, txid: str) -> bool:
''' Returns true if we know about a complete tx, whether verified or not. '''
with self.lock:
return bool(self._find_script(txid, False))
def is_verified(self, txid: str) -> bool:
with self.lock:
return txid in self.v_tx
def add_ext_incomplete_tx(self, txid : str, block_height : int, script : ScriptOutput):
if not isinstance(script, ScriptOutput) or not isinstance(block_height, (int, float)) or not txid or not isinstance(txid, str):
raise ArgumentError("bad args to add_ext_incomplete_tx")
script.number = bh2num(block_height)
if script.number < 1:
raise ArgumentError("bad block height")
with self.lock:
self.ext_incomplete_tx[txid] = self.RegTx(txid, script)
self.ext_unverif[txid] = block_height
@staticmethod
def _do_verify_block_argchecks(network, number, exc=[], server='https://unknown'):
if not isinstance(number, int) or number < 1:
raise ArgumentError('number must be >= 1')
if not isinstance(server, str) or not server:
raise ArgumentError('bad server arg')
if not isinstance(exc, list):
raise ArgumentError('bad exc arg')
if not network:
exc.append(RuntimeError('no network'))
return False
return True
def verify_block_asynch(self, number : int, success_cb=None, error_cb=None, timeout=timeout, debug=debug):
''' Tries all servers. Calls success_cb with the verified ProcessedBlock
as the single argument on first successful retrieval of the block.
Calls error_cb with the exc as the only argument on failure. Guaranteed
to call 1 of the 2 callbacks in either case. Callbacks are optional
and won't be called if specified as None. '''
network = self.network # capture network object in case it goes away while we are running
exc = []
if not self._do_verify_block_argchecks(network=network, number=number, exc=exc):
if error_cb: error_cb((exc and exc[-1]) or RuntimeError('error'))
return
def on_error(exc):
with self.lock:
l = self._blocks_in_flight.pop(number, [])
ct = 0
for success_cb, error_cb in l:
if error_cb:
error_cb(exc)
ct += 1
if debug: self.print_error(f"verify_block_asynch: called {ct} error callbacks for #{number}")
def on_success(res, server):
pb = self._verify_block_inner(res, network, server, number, True, timeout, exc, debug=debug)
if pb:
with self.lock:
l = self._blocks_in_flight.pop(number, [])
ct = 0
for success_cb, error_cb in l:
if success_cb:
success_cb(pb)
ct += 1
if debug: self.print_error(f"verify_block_asynch: called {ct} success callbacks for #{number}")
else:
on_error(exc[-1])
with self.lock:
l = self._blocks_in_flight[number]
l.append((success_cb, error_cb))
if len(l) == 1:
if debug: self.print_error(f"verify_block_asynch: initiating new lookup_asynch_all on #{number}")
lookup_asynch_all(number=number, success_cb=on_success, error_cb=on_error, timeout=timeout, debug=debug)
else:
if debug: self.print_error(f"verify_block_asynch: #{number} already in-flight, will just enqueue callbacks")
def verify_block_synch(self, server : str, number : int, verify_txs=True, timeout=timeout, exc=[], debug=debug) -> ProcessedBlock:
''' Processes a whole block from the lookup server and returns it.
Returns None on failure, and puts the Exception in the exc parameter.
Note if this returns successfully, then all the tx's in the returned ProcessedBlock
are guaranteed to have verified successfully. '''
network = self.network # just in case network goes away, capture it
if not self._do_verify_block_argchecks(network=network, number=number, exc=exc, server=server):
return
res = lookup(server=server, number=number, timeout=timeout, exc=exc, debug=debug)
if not res:
return
return self._verify_block_inner(res, network, server, number, verify_txs, timeout, exc, debug=debug)
def _verify_block_inner(self, res, network, server, number, verify_txs, timeout, exc, debug=debug) -> ProcessedBlock:
''' Do not call this from the Network thread, as it actually relies on
the network thread being another thread (it waits for callbacks from it
to proceed). Caller should NOT hold any locks. '''
pb = ProcessedBlock(hash=res[0], height=num2bh(number), reg_txs={ r.txid : r for r in res[1] })
if len(pb.reg_txs) == 0:
self.print_error(f"Warning, received a block from server with number {number}"
"but we didn't recognize any tx's in it. "
"To the dev reading this: See if the DeVault ID spec has changed!")
# REORG or BAD SERVER CHECK
def check_sanity_detect_reorg_etc():
minimal_ch_removed = []
with self.lock:
pb_cached = self.processed_blocks.get(pb.height)
if pb_cached and pb != pb_cached:
# Poor man's reorg detection below...
self.processed_blocks.put(pb.height, None)
self.print_error(f"Warning, retrieved block info from server {server} is {pb} which differs from cached version {pb_cached}! Reverifying!")
keys = set() # (lname, number, collision_hash) tuples
chash_rtxs = dict() # chash_key_tuple -> regtx
for txid in set(set(pb_cached.reg_txs or set()) | set(pb.reg_txs or set())):
self._rm_vtx(txid, rm_from_verifier=True)
script = self._find_script(txid, False)
if script:
k = (script.name.lower(), script.number, script.collision_hash)
keys.add(k)
rtx = pb.reg_txs.get(txid) or pb_cached.reg_txs.get(txid)
if rtx: chash_rtxs[k] = rtx
# invalidate minimal_chashes for block
for k in keys:
if self.minimal_ch_cache.get(k):
self.print_error("invalidated minimal_chash", k)
self.minimal_ch_cache.put(k, None) # invalidate cache item
rtx = chash_rtxs.get(k)
if rtx:
minimal_ch_removed.append((Info.from_regtx(rtx), rtx.script.collision_hash))
verify_txs = True
# finally, inform interested GUI code about the invalidations so that
# it may re-enqueue some refreshes of the minimal collision hashes
for info, long_chash in minimal_ch_removed:
if debug:
self.print_error("triggering ca_updated_minimal_chash for", info, long_chash)
network.trigger_callback('ca_updated_minimal_chash', self, info, long_chash)
check_sanity_detect_reorg_etc()
# /REORG or BAD SERVER CHECK
def num_needed():
with self.lock:
return len(set(pb.reg_txs) - set(self.v_tx))
if verify_txs and pb.reg_txs and num_needed():
q = queue.Queue()
class VFail(RuntimeWarning): pass
def on_verified(event, *args):
if not args or args[0] is not self:
# all the events we care about pass self as arg
return
if event == 'ca_verified_tx':
if not num_needed(): # this implcititly checks if the tx's we care about are ready
q.put('done')
elif event == 'ca_verification_failed' and len(args) >= 3 and args[1] in pb.reg_txs:
q.put(('failed', args[1], args[2]))
if args[2] == 'tx_not_found':
ctr = 0
with self.lock:
for txid in pb.reg_txs:
if txid not in self.v_tx:
self._wipe_tx(txid, rm_from_verifier=True)
ctr += 1
if ctr:
self.print_error(f"_verify_block_inner: Block number {number} from server {server} appears to be invalid on this chain: '{args[2]}' undid {ctr} verification requests")
try:
network.register_callback(on_verified, ['ca_verified_tx', 'ca_verification_failed'])
for txid, regtx in pb.reg_txs.items():
self.add_ext_tx(txid, regtx.script) # NB: this is a no-op if already verified and/or in wallet_reg_txs
if num_needed():
thing = q.get(timeout=timeout)
if thing == 'done':
pass # ok, success!
elif isinstance(thing, tuple) and thing[0] == 'failed':
raise VFail(thing[1], thing[2])
else:
self.print_error("INTERNAL ERROR: Got unknown thing from an internal queue in _verify_block_inner. FIXME!")
raise VFail("INTERNAL ERROR", "_verify_block_inner")
except (queue.Empty, VFail) as e:
if num_needed():
exc.append(e)
return
finally:
network.unregister_callback(on_verified)
with self.lock:
self.processed_blocks.put(pb.height, pb)
return pb
############################
# UI / Prefs / Convenience #
############################
def get_address_default(self, infos : List[Info]) -> Info:
''' Returns the preferred Info object for a particular address from
a given list. `infos' is a list of Info objects pertaining to a
particular address (they should all pertain to said address, but this
is not checked). '''
if infos:
last = infos[-1]
d = self.wallet.storage.get('cash_accounts_address_defaults')
if isinstance(d, dict) and isinstance(last.address, Address): # sanity check, .address may not always be Address but may be UnknownAddress
tup = d.get(last.address.to_storage_string())
if isinstance(tup, (tuple, list)) and len(tup) == 3:
name, number, chash = tup
if isinstance(name, str) and isinstance(number, (int, float)) and isinstance(chash, str):
# find the matching one in the list
for info in infos:
if (name.lower(), number, chash) == (info.name.lower(), info.number, info.collision_hash):
return info
# just return the latest one if no default specified
return last
def set_address_default(self, info : Info):
''' Set the default CashAccount for a particular address. Pass the Info
object pertaining to the DeVault ID / Address in question. '''
if not isinstance(info.address, Address):
self.print_error("Warning: Info object does not have an Address", info)
return
d = self.wallet.storage.get('cash_accounts_address_defaults', {})
addr_str = info.address.to_storage_string()
new_value = [info.name, info.number, info.collision_hash]
d[addr_str] = new_value
self.wallet.storage.put('cash_accounts_address_defaults', d)
###################
# Private Methods #
###################
@classmethod
def _calc_minimal_chash(cls, name: str, collision_hash: str, pb : ProcessedBlock) -> Tuple[RegTx, str]:
''' returns None on failure, otherwise returns (RegTx, minimal_chash) tuple '''
num_res = int(bool(pb.reg_txs) and len(pb.reg_txs))
pb_num = bh2num(pb.height)
if not num_res:
util.print_error(f"_calc_minimal_chash: no results in block {pb_num}!")
return
lc_name = name.lower()
d = cls._calc_minimal_chashes_for_block(pb, lc_name)
minimal_chash = d.get(lc_name, {}).get(collision_hash, None)
if minimal_chash is None:
util.print_error(f"_calc_minimal_chash: WARNING INTERNAL ERROR: Could not find the minimal_chash for {pb_num} {lc_name}!")
return
found = None
for rtx in pb.reg_txs.values():
if lc_name == rtx.script.name.lower() and collision_hash == rtx.script.collision_hash:
found = rtx
break
if not found:
util.print_error(f"_calc_minimal_chash: WARNING INTERNAL ERROR: Could not find the minimal_chash for {pb_num} {lc_name}!")
return
if found.script.number != pb_num:
util.print_error(f"_calc_minimal_chash: WARNING: script number differs from block number for block {pb_num} {lc_name} {found.txid}!")
return found, minimal_chash
@classmethod
def _calc_minimal_chashes_for_block(cls, pb : ProcessedBlock, name: str = None) -> Dict[str, Dict[str, str]]:
''' Given a ProcessedBlock, returns a dict of:
lc_name -> dict of collision_hash -> minimal_collision_hash.
Optionally, pass a name to filter by name. '''
if name is not None:
name = name.lower()
tups = sorted( (rtx.script.name.lower(), rtx.script.collision_hash)
for rtx in pb.reg_txs.values()
if rtx.script.name.lower() == name )
else:
tups = sorted( (rtx.script.name.lower(), rtx.script.collision_hash)
for rtx in pb.reg_txs.values() )
# tups is now a sorted list of (name, collision_hash)
return cls._calc_minimal_chashes_for_sorted_lcased_tups(tups)
@staticmethod
def _calc_minimal_chashes_for_sorted_lcased_tups(tups : List[Tuple[str,str]]) -> Dict[str, Dict[str, str]]:
'''' Given a list of sorted tuples, with names already all lowercased,
returns a dict of:
lc_ name -> dict of collision_hash -> minimal_collision_hash '''
ret = defaultdict(dict)
N = collision_hash_length
idxs = [0] * len(tups)
for i in range(len(tups)-1):
pnam, pch = tups[i]
nam, ch = tups[i+1]
j = 0
if pnam == nam:
while j < N and ch[:j] == pch[:j]:
j += 1
idxs[i] = max(idxs[i], j)
idxs[i+1] = max(idxs[i+1], j)
for n, tupe in enumerate(tups):
nam, ch = tupe
ret[nam][ch] = ch[:idxs[n]]
return ret
def _fw_wallet_updated(self, evt, *args):
''' Our private verifier is done. Propagate updated signal to parent
wallet so that the GUI will refresh. '''
if evt == 'wallet_updated' and args and args[0] is self:
self.print_error("forwarding 'wallet_updated' as parent wallet")
self.network.trigger_callback('wallet_updated', self.wallet)
def _find_script(self, txid, print_if_missing=True, *, incomplete=False, giveto=None):
''' lock should be held by caller '''
maybes = (self.wallet_reg_tx.get(txid), self.ext_reg_tx.get(txid))
item = None
for maybe in maybes:
if maybe and (not item or (not item.script.is_complete() and maybe.script.is_complete())):
item = maybe
del maybe, maybes
if not item and incomplete:
item = self.ext_incomplete_tx.get(txid)
if item and not item.script.is_complete() and not incomplete:
item = None # refuse to return an incomplete tx unless incomplete=True
if item:
# Note the giveto with incomplete=True is fragile and requires
# a call to _add_verified_tx_common right after this
# _find_script call.
# Also note: we intentionally don't pop the ext_incomplete_tx
# dict here as perhaps client code is maintaining a reference
# and we want to update that reference later in add_verified_common.
if giveto == 'e':
self.wallet_reg_tx.pop(txid, None)
self.ext_reg_tx[txid] = item
elif giveto == 'w':
self.ext_reg_tx.pop(txid, None)
self.wallet_reg_tx[txid] = item
return item.script
if print_if_missing:
self.print_error("_find_script: could not find script for txid", txid)
def _add_vtx(self, vtx, script):
''' lock should be held by caller '''
self.v_tx[vtx.txid] = vtx
self.v_by_addr[script.address].add(vtx.txid)
self.v_by_name[script.name.lower()].add(vtx.txid)
def _rm_vtx(self, txid, *, force=False, rm_from_verifier=False):
''' lock should be held by caller '''
vtx = self.v_tx.pop(txid, None)
if not vtx:
# was not relevant, abort early
return
assert txid == vtx.txid
script = self._find_script(txid, print_if_missing=not force) # will print_error if script not found
if script:
addr, name = script.address, script.name.lower()
self.v_by_addr[addr].discard(txid)
if not self.v_by_addr[addr]: self.v_by_addr.pop(addr, None)
self.v_by_name[name].discard(txid)
if not self.v_by_name[name]: self.v_by_name.pop(name, None)
elif force:
self.print_error("force remove v_tx", txid)
empty = set()
for a, s in self.v_by_addr.items():
s.discard(txid)
if not s:
empty.add(a)
for a in empty:
self.v_by_addr.pop(a, None)
empty.clear()
for n, s in self.v_by_name.items():
s.discard(txid)
if not s:
empty.add(n)
for n in empty:
self.v_by_name.pop(n, None)
if rm_from_verifier:
verifier = self.verifier
if verifier:
verifier.remove_spv_proof_for_tx(txid)
def _wipe_tx(self, txid, rm_from_verifier=False):
''' called to completely forget a tx from all caches '''
self._rm_vtx(txid, force=True, rm_from_verifier=rm_from_verifier)
self.wallet_reg_tx.pop(txid, None)
self.ext_reg_tx.pop(txid, None)
self.ext_incomplete_tx.pop(txid, None)
self.ext_unverif.pop(txid, None)
def _add_verified_tx_common(self, script, txid, height, header):
''' caller must hold locks '''
if not script or height < activation_height:
# no-op or not relevant callback
return
block_hash = blockchain.hash_header(header)
v = self.VerifTx(txid=txid, block_height=height, block_hash=block_hash)
# update/completeify
script.make_complete(block_height=v.block_height, block_hash=v.block_hash, txid=v.txid)
rtx = self.ext_incomplete_tx.pop(txid, None)
if rtx:
# in case client code somewhere has a copy of this script ..
# update it to 'complete' so GUI can reflect change.
# (relevant to TxDialog class)
rtx.script.make_complete(block_height=v.block_height, block_hash=v.block_hash, txid=v.txid)
if txid not in self.ext_reg_tx and txid not in self.wallet_reg_tx:
# save this is_complete RegTx to ext_reg_tx dict which gets saved to disk
self.ext_reg_tx[txid] = rtx
# register this tx as verified
self._add_vtx(v, script)
def _add_vtx_chk_height(self, txid, height_ts_pos_tup):
''' caller must hold locks '''
height = height_ts_pos_tup[0]
if not isinstance(height, (int, float)) or height < activation_height:
self.print_error(f"Warning: Got a tx {txid} with height {height} < activation height {activation_height}!")
self._wipe_tx(txid)
return 0
return int(height)
#########################
# Wallet hook callbacks #
#########################
def add_verified_tx_hook(self, txid: str, height_ts_pos_tup: tuple, header: dict):
''' Called by wallet when it itself got a verified tx from its own
verifier. We need to know about tx's that the parent wallet verified
so we don't do the same work again. '''
with self.lock:
# Note: precondition here is that the tx exists in one of our RegTx
# dicts, otherwise the tx is not relevant to us (contains no cash
# account registrations). We need this check because we are called
# a lot for every tx the wallet verifies.
script = self._find_script(txid, False, giveto='w', incomplete=True)
if not script:
return
self.print_error("verified internal:", txid, height_ts_pos_tup)
height = self._add_vtx_chk_height(txid, height_ts_pos_tup) # prints to print_error and wipes tx on error
if not height:
return
self._add_verified_tx_common(script, txid, height, header)
# this needs to be done without the lock held
if self.network and script.is_complete(): # paranoia checks
self.network.trigger_callback('ca_verified_tx', self, Info.from_script(script, txid))
def verification_failed_hook(self, txid, reason):
''' Called by wallet when it receives a verification_failed callback
from its verifier. We must check if the tx is relevant and if so,
forwrd the information on with a callback '''
with self.lock:
script = self._find_script(txid, False, giveto='w', incomplete=True)
if not script:
# not relevant to us
return
if self.network:
self.network.trigger_callback('ca_verification_failed', self, txid, reason)
def undo_verifications_hook(self, txs: set):
''' Called by wallet when it itself got called to undo_verifictions by
its verifier. We need to be told what set of tx_hash was undone. '''
if not txs: return
with self.lock:
for txid in txs:
self._rm_vtx(txid) # this is safe as a no-op if txid was not relevant
self._find_script(txid, False, giveto='w')
# Since we have a chain reorg, invalidate the processed block and
# minimal_ch_cache to force revalidation of our collision hashes.
# FIXME: Do this more elegantly. This casts a pretty wide net.
# NB: I believe assiging a new {} to .d is safer than d.clear()
# in this case as the caches._ExpiringCacheMgr doesn't like it
# when you remove items from the existing dict, but should run ok
# if you just assign a new dict (it keeps a working reference as
# it flushes the cache)... so assigning to .d is safer in this case.
self.minimal_ch_cache.d = {}
self.processed_blocks.d = {}
def add_transaction_hook(self, txid: str, tx: object, out_n: int, script: ScriptOutput):
''' Called by wallet inside add_transaction (with wallet.lock held) to
notify us about transactions that were added containing a cashacct
scriptoutput. Note these tx's aren't yet in the verified set. '''
assert isinstance(script, ScriptOutput)
with self.lock:
self.wallet_reg_tx[txid] = self.RegTx(txid=txid, script=script)
self._find_script(txid, giveto='w') # makes sure there is only 1 copy in wallet_reg_tx
def remove_transaction_hook(self, txid: str):
''' Called by wallet inside remove_transaction (with wallet.lock held)
to tell us about a transaction that was removed. '''
with self.lock:
self._rm_vtx(txid)
self.wallet_reg_tx.pop(txid, None)
def add_unverified_tx_hook(self, txid: str, block_height: int):
''' This is called by wallet when we expect a future subsequent
verification to happen. So let's pop the vtx from our data structure
in anticipation of a possible future verification coming in. '''
with self.lock:
self._rm_vtx(txid)
self._find_script(txid, False, giveto='w', incomplete=True)
def on_address_addition(self, address):
''' Called by wallet when a new address is added in imported wallet.'''
def on_address_deletion(self, address):
''' Called by wallet when an existing address is deleted in imported wallet.'''
def on_clear_history(self):
''' Called by wallet rebuild history mechanism to clear everything. '''
with self.lock:
self._init_data()
def save_verified_tx_hook(self, write=False):
self.save(write)
# /Wallet hook callbacks
#######################
# SPVDelegate Methods #
#######################
def get_unverified_txs(self) -> dict:
''' Return a dict of tx_hash (hex encoded) -> height (int)'''
with self.lock:
return self.ext_unverif.copy()
def add_verified_tx(self, tx_hash : str, height_ts_pos_tup : tuple, header : dict) -> None:
''' Called when a verification is successful.
Params:
#1 tx_hash - hex string
#2 tuple of: (tx_height: int, timestamp: int, pos : int)
#3 the header - dict. This can be subsequently serialized using
blockchain.serialize_header if so desiered, or it can be ignored.
'''
self.print_error('verified external:', tx_hash, height_ts_pos_tup)
with self.wallet.lock: # thread safety, even though for 1-liners in CPython it hardly matters.
# maintain invariant -- this is because pvt verifier can get kicked
# off on .load() for any missing unverified tx (wallet or external)
# so we have to determine here where to put the final tx should live
giveto = 'w' if tx_hash in self.wallet.transactions else 'e'
with self.lock:
self.ext_unverif.pop(tx_hash, None) # pop it off unconditionally
height = self._add_vtx_chk_height(tx_hash, height_ts_pos_tup) # prints to print_error and wipes tx on error
if not height:
return
script = self._find_script(tx_hash, incomplete=True, giveto=giveto)
# call back into the same codepath that registers tx's as verified, and completes them...
self._add_verified_tx_common(script, tx_hash, height, header)
# this needs to be done without the lock held
if self.network and script and script.is_complete(): # paranoia checks
self.network.trigger_callback('ca_verified_tx', self, Info.from_script(script, tx_hash))
def is_up_to_date(self) -> bool:
'''Return True to kick off network wallet_updated callback and
save_verified_tx callback to us, only when nothing left to verify. '''
return not self.ext_unverif
def save_verified_tx(self, write : bool = False):
''' Save state. Called by ext verified when it's done. '''
self.save(write)
def undo_verifications(self, bchain : object, height : int) -> set:
''' Called when the blockchain has changed to tell the wallet to undo
verifications when a reorg has happened. Returns a set of tx_hash. '''
txs = set()
with self.lock:
for txid, vtx in self.v_tx.copy().items():
if txid in self.wallet_reg_tx:
# wallet verifier will take care of this one
continue
if vtx.block_height >= height:
header = bchain.read_header(vtx.block_height)
if not header or vtx.block_hash != blockchain.hash_header(header):
self._rm_vtx(txid)
self.ext_unverif[txid] = vtx.block_height # re-enqueue for verification with private verifier...? TODO: how to detect tx's dropped out of new chain?
txs.add(txid)
return txs
def verification_failed(self, tx_hash, reason):
''' TODO.. figure out what to do here. Or with wallet verification in
general in this error case. '''
self.print_error(f"SPV failed for {tx_hash}, reason: '{reason}'")
try:
with self.lock:
script = self._find_script(tx_hash)
idx = self.verifier.failure_reasons.index(reason)
if idx < 3 or not script or not script.is_complete():
# actual verification failure.. remove this tx
self.print_error("removing tx from ext_reg_tx cache")
self.ext_unverif.pop(tx_hash, None)
self.ext_reg_tx.pop(tx_hash, None)
elif idx == 5:
# tx not found -- might be either we are testnet and lookup
# server was mainnet *OR* some other strangeness. Not sure
# what to do here, so we just wipe the tx from our caches
# because keeping it around will cause the client to DoS
# itself versus the ElectrumX server each time it connects.
self.print_error("tx appears to be completely unknown to server, wiping from cache")
self._wipe_tx(tx_hash)
else:
# Note that the above ^ branch can also be reached due to a
# misbehaving server so .. not really sure what to do here.
# TODO: Determine best strategy for verification failures.
self.print_error("ignoring failure due misc. error response from server.. will try again next session")
except ValueError:
self.print_error(f"Cannot find '{reason}' in verifier reason list! FIXME!")
if self.network:
self.network.trigger_callback('ca_verification_failed', self, tx_hash, reason)
# /SPVDelegate Methods
###############################################
# Experimental Methods (stuff we may not use) #
###############################################
def scan_servers_for_registrations(self, start=1, stop=None, progress_cb=None, error_cb=None, timeout=timeout,
add_only_mine=True, debug=debug):
''' This is slow and not particularly useful. Will maybe delete this
code soon. I used it for testing to populate wallet.
progress_cb is called with (progress : float, num_added : int, number : int) as args!
error_cb is called with no arguments to indicate failure.
Upon completion, either progress_cb(1.0 ..) will be called to indicate
successful completion of the task. Or, error_cb() will be called to
indicate error abort (usually due to timeout).
Returned object can be used to stop the process. obj.stop() is the
method.
'''
if not self.network:
return
cancel_evt = threading.Event()
stop = num2bh(stop) if stop is not None else stop
start = num2bh(max(start or 0, 1))
def stop_height():
return stop or self.wallet.get_local_height()+1
def progress(h, added):
if progress_cb:
progress_cb(max((h-start)/(stop_height() - start), 0.0), added, bh2num(h))
def thread_func():
q = queue.Queue()
h = start
added = 0
while self.network and not cancel_evt.is_set() and h < stop_height():
num = bh2num(h)
lookup_asynch_all(number=num,
success_cb = lambda res,server: q.put(res),
error_cb = q.put,
timeout=timeout, debug=debug)
try:
thing = q.get(timeout=timeout)
if isinstance(thing, Exception):
e = thing
if debug:
self.print_error(f"Height {h} got exception in lookup: {repr(e)}")
elif isinstance(thing, tuple):
block_hash, res = thing
for rtx in res:
if rtx.txid not in self.wallet_reg_tx and rtx.txid not in self.ext_reg_tx and (not add_only_mine or self.wallet.is_mine(rtx.script.address)):
self.add_ext_tx(rtx.txid, rtx.script)
added += 1
progress(h, added)
except queue.Empty:
self.print_error("Could not complete request, timed out!")
if error_cb:
error_cb()
return
h += 1
progress(h, added)
t = threading.Thread(daemon=True, target=thread_func)
t.start()
class ScanStopper(namedtuple("ScanStopper", "thread, event")):
def is_alive(self):
return self.thread.is_alive()
def stop(self):
if self.is_alive():
self.event.set()
self.thread.join()
return ScanStopper(t, cancel_evt)
|
caffe_mnist_client.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""A client that talks to tensorflow_model_server loaded with mnist model.
The client downloads test images of mnist data set, queries the service with
such test images to get predictions, and calculates the inference error rate.
Typical usage example:
mnist_client.py --num_tests=100 --server=localhost:9000
"""
from __future__ import print_function
import time
import sys
import threading
import numpy as np
# This is a placeholder for a Google-internal import.
from grpc.beta import implementations
import numpy
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
import mnist_input_data
tf.app.flags.DEFINE_integer('concurrency', 1,
'maximum number of concurrent inference requests')
tf.app.flags.DEFINE_integer('num_tests', 100, 'Number of test images')
tf.app.flags.DEFINE_string('server', '', 'PredictionService host:port')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory. ')
FLAGS = tf.app.flags.FLAGS
# class _ResultCounter(object):
# """Counter for the prediction results."""
# def __init__(self, num_tests, concurrency):
# self._num_tests = num_tests
# self._concurrency = concurrency
# self._error = 0
# self._done = 0
# self._active = 0
# self._condition = threading.Condition()
# def inc_error(self):
# with self._condition:
# self._error += 1
# def inc_done(self):
# with self._condition:
# self._done += 1
# self._condition.notify()
# def dec_active(self):
# with self._condition:
# self._active -= 1
# self._condition.notify()
# def get_error_rate(self):
# with self._condition:
# while self._done != self._num_tests:
# self._condition.wait()
# return self._error / float(self._num_tests)
# def throttle(self):
# with self._condition:
# while self._active == self._concurrency:
# self._condition.wait()
# self._active += 1
# def _create_rpc_callback(label, result_counter):
# """Creates RPC callback function.
# Args:
# label: The correct label for the predicted example.
# result_counter: Counter for the prediction result.
# Returns:
# The callback function.
# """
# def _callback(result_future):
# """Callback function.
# Calculates the statistics for the prediction result.
# Args:
# result_future: Result future of the RPC.
# """
# exception = result_future.exception()
# if exception:
# result_counter.inc_error()
# print(exception)
# else:
# sys.stdout.write('.')
# sys.stdout.flush()
# response = numpy.array(
# result_future.result().outputs['scores'].float_val)
# prediction = numpy.argmax(response)
# if label != prediction:
# result_counter.inc_error()
# result_counter.inc_done()
# result_counter.dec_active()
# return _callback
# def do_inference(hostport, work_dir, concurrency, num_tests):
# """Tests PredictionService with concurrent requests.
# Args:
# hostport: Host:port address of the PredictionService.
# work_dir: The full path of working directory for test data set.
# concurrency: Maximum number of concurrent requests.
# num_tests: Number of test images to use.
# Returns:
# The classification error rate.
# Raises:
# IOError: An error occurred processing test data set.
# """
# test_data_set = mnist_input_data.read_data_sets(work_dir).test
# host, port = hostport.split(':')
# channel = implementations.insecure_channel(host, int(port))
# stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# start = time.time()
# for _ in range(num_tests):
# request = predict_pb2.PredictRequest()
# request.model_spec.name = 'caffe_mnist'
# request.model_spec.signature_name = 'predict_images'
# image, label = test_data_set.next_batch(5)
# # tmp_image, _ = test_data_set.next_batch(1)
# # image = np.reshape(tmp_image[0], (1, 28, 28, 1))
# # print("[Yitao] %s" % image[0].size)
# # print(image[0].shape)
# # print(image[0].dtype)
# # print(label.shape)
# # print(label.dtype)
# request.inputs['images'].CopyFrom(
# tf.contrib.util.make_tensor_proto(image[0], shape=[5, 28, 28, 1]))
# # print("Bangbangbang")
# tmp_result = stub.Predict(request, 10.0) # 5 seconds
# # print(tmp_result)
# sys.stdout.write('.')
# sys.stdout.flush()
# end = time.time()
# print('\nFinished!')
# print('It takes %s sec to run %d images by using MNIST' % (str(end - start), num_tests))
def myFuncWarmUp(stub, i, test_data_set):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'caffe_mnist'
request.model_spec.signature_name = 'predict_images'
batchSize = 1
durationSum = 0.0
runNum = 13
# test_data_set = mnist_input_data.read_data_sets(FLAGS.work_dir).test
for k in range(runNum):
start = time.time()
image, label = test_data_set.next_batch(batchSize)
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image[0], shape=[batchSize, 28, 28, 1]))
tmp_result = stub.Predict(request, 10.0) # 5 seconds
end = time.time()
duration = (end - start)
print("it takes %s sec" % str(duration))
if (k != 0 and k != 3 and k != 8):
durationSum += duration
print("[Warm up] on average, it takes %s sec to run a batch of %d images over %d runs" % (str(durationSum / (runNum - 3)), batchSize, (runNum - 3)))
def myFuncParallel(stub, i, test_data_set):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'caffe_mnist'
request.model_spec.signature_name = 'predict_images'
batchSize = 1
durationSum = 0.0
runNum = 10
# test_data_set = mnist_input_data.read_data_sets(FLAGS.work_dir).test
for k in range(runNum):
start = time.time()
image, label = test_data_set.next_batch(batchSize)
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image[0], shape=[batchSize, 28, 28, 1]))
tmp_result = stub.Predict(request, 10.0) # 5 seconds
end = time.time()
duration = (end - start)
print("[thread-%d-%d] it takes %s sec" % (i, k, str(duration)))
if True:
durationSum += duration
print("[Parallel-thread-%d] on average, it takes %s sec to run a batch of %d images over %d runs" % (i, str(durationSum / runNum), batchSize, runNum))
def main(_):
# if FLAGS.num_tests > 10000:
# print('num_tests should not be greater than 10k')
# return
# if not FLAGS.server:
# print('please specify server host:port')
# return
# do_inference(FLAGS.server, FLAGS.work_dir,
# FLAGS.concurrency, FLAGS.num_tests)
hostport = FLAGS.server
work_dir = FLAGS.work_dir
concurrency = FLAGS.concurrency
num_tests = FLAGS.num_tests
test_data_set = mnist_input_data.read_data_sets(work_dir).test
host, port = hostport.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
myFuncWarmUp(stub, 0, test_data_set)
num_tests = 0
tPool = []
for i in range(num_tests):
tPool.append(threading.Thread(target = myFuncParallel, args = (stub, i, test_data_set)))
start = time.time()
for i in range(num_tests):
t = tPool[i]
t.start()
# time.sleep(2.0)
for i in range(num_tests):
t = tPool[i]
t.join()
end = time.time()
print('\nFinished!')
print('[Parallel] The total running time to run %d concurrent jobs is %s' % (num_tests, str(end - start)))
if __name__ == '__main__':
tf.app.run()
|
python_lang.py
|
"""
**Source** Kevin Markham https://github.com/justmarkham/python-reference
"""
###############################################################################
# Import libraries
# ----------------
#
# 'generic import' of math module
import math
math.sqrt(25)
# import a function
from math import sqrt
sqrt(25) # no longer have to reference the module
# import multiple functions at once
from math import cos, floor
# import all functions in a module (generally discouraged)
# from os import *
# define an alias
import numpy as np
# show all functions in math module
content = dir(math)
###############################################################################
# Basic operations
# ----------------
#
# Numbers
10 + 4 # add (returns 14)
10 - 4 # subtract (returns 6)
10 * 4 # multiply (returns 40)
10 ** 4 # exponent (returns 10000)
10 / 4 # divide (returns 2 because both types are 'int')
10 / float(4) # divide (returns 2.5)
5 % 4 # modulo (returns 1) - also known as the remainder
10 / 4 # true division (returns 2.5)
10 // 4 # floor division (returns 2)
# Boolean operations
# comparisons (these return True)
5 > 3
5 >= 3
5 != 3
5 == 5
# boolean operations (these return True)
5 > 3 and 6 > 3
5 > 3 or 5 < 3
not False
False or not False and True # evaluation order: not, and, or
###############################################################################
# Data types
# ----------
#
# determine the type of an object
type(2) # returns 'int'
type(2.0) # returns 'float'
type('two') # returns 'str'
type(True) # returns 'bool'
type(None) # returns 'NoneType'
# check if an object is of a given type
isinstance(2.0, int) # returns False
isinstance(2.0, (int, float)) # returns True
# convert an object to a given type
float(2)
int(2.9)
str(2.9)
# zero, None, and empty containers are converted to False
bool(0)
bool(None)
bool('') # empty string
bool([]) # empty list
bool({}) # empty dictionary
# non-empty containers and non-zeros are converted to True
bool(2)
bool('two')
bool([2])
###############################################################################
# Lists
# ~~~~~
#
# Different objects categorized along a certain ordered sequence, lists
# are ordered, iterable, mutable (adding or removing objects changes the
# list size), can contain multiple data types.
# create an empty list (two ways)
empty_list = []
empty_list = list()
# create a list
simpsons = ['homer', 'marge', 'bart']
# examine a list
simpsons[0] # print element 0 ('homer')
len(simpsons) # returns the length (3)
# modify a list (does not return the list)
simpsons.append('lisa') # append element to end
simpsons.extend(['itchy', 'scratchy']) # append multiple elements to end
simpsons.insert(0, 'maggie') # insert element at index 0 (shifts everything right)
simpsons.remove('bart') # searches for first instance and removes it
simpsons.pop(0) # removes element 0 and returns it
del simpsons[0] # removes element 0 (does not return it)
simpsons[0] = 'krusty' # replace element 0
# concatenate lists (slower than 'extend' method)
neighbors = simpsons + ['ned','rod','todd']
# find elements in a list
'lisa' in simpsons
simpsons.count('lisa') # counts the number of instances
simpsons.index('itchy') # returns index of first instance
# list slicing [start:end:stride]
weekdays = ['mon','tues','wed','thurs','fri']
weekdays[0] # element 0
weekdays[0:3] # elements 0, 1, 2
weekdays[:3] # elements 0, 1, 2
weekdays[3:] # elements 3, 4
weekdays[-1] # last element (element 4)
weekdays[::2] # every 2nd element (0, 2, 4)
weekdays[::-1] # backwards (4, 3, 2, 1, 0)
# alternative method for returning the list backwards
list(reversed(weekdays))
# sort a list in place (modifies but does not return the list)
simpsons.sort()
simpsons.sort(reverse=True) # sort in reverse
simpsons.sort(key=len) # sort by a key
# return a sorted list (but does not modify the original list)
sorted(simpsons)
sorted(simpsons, reverse=True)
sorted(simpsons, key=len)
# create a second reference to the same list
num = [1, 2, 3]
same_num = num
same_num[0] = 0 # modifies both 'num' and 'same_num'
# copy a list (three ways)
new_num = num.copy()
new_num = num[:]
new_num = list(num)
# examine objects
id(num) == id(same_num) # returns True
id(num) == id(new_num) # returns False
num is same_num # returns True
num is new_num # returns False
num == same_num # returns True
num == new_num # returns True (their contents are equivalent)
# conatenate +, replicate *
[1, 2, 3] + [4, 5, 6]
["a"] * 2 + ["b"] * 3
###############################################################################
# Tuples
# ~~~~~~
#
# Like lists, but their size cannot change: ordered, iterable, immutable,
# can contain multiple data types
#
# create a tuple
digits = (0, 1, 'two') # create a tuple directly
digits = tuple([0, 1, 'two']) # create a tuple from a list
zero = (0,) # trailing comma is required to indicate it's a tuple
# examine a tuple
digits[2] # returns 'two'
len(digits) # returns 3
digits.count(0) # counts the number of instances of that value (1)
digits.index(1) # returns the index of the first instance of that value (1)
# elements of a tuple cannot be modified
# digits[2] = 2 # throws an error
# concatenate tuples
digits = digits + (3, 4)
# create a single tuple with elements repeated (also works with lists)
(3, 4) * 2 # returns (3, 4, 3, 4)
# tuple unpacking
bart = ('male', 10, 'simpson') # create a tuple
###############################################################################
# Strings
# ~~~~~~~
#
# A sequence of characters, they are iterable, immutable
#
# create a string
s = str(42) # convert another data type into a string
s = 'I like you'
# examine a string
s[0] # returns 'I'
len(s) # returns 10
# string slicing like lists
s[:6] # returns 'I like'
s[7:] # returns 'you'
s[-1] # returns 'u'
# basic string methods (does not modify the original string)
s.lower() # returns 'i like you'
s.upper() # returns 'I LIKE YOU'
s.startswith('I') # returns True
s.endswith('you') # returns True
s.isdigit() # returns False (returns True if every character in the string is a digit)
s.find('like') # returns index of first occurrence (2), but doesn't support regex
s.find('hate') # returns -1 since not found
s.replace('like','love') # replaces all instances of 'like' with 'love'
# split a string into a list of substrings separated by a delimiter
s.split(' ') # returns ['I','like','you']
s.split() # same thing
s2 = 'a, an, the'
s2.split(',') # returns ['a',' an',' the']
# join a list of strings into one string using a delimiter
stooges = ['larry','curly','moe']
' '.join(stooges) # returns 'larry curly moe'
# concatenate strings
s3 = 'The meaning of life is'
s4 = '42'
s3 + ' ' + s4 # returns 'The meaning of life is 42'
s3 + ' ' + str(42) # same thing
# remove whitespace from start and end of a string
s5 = ' ham and cheese '
s5.strip() # returns 'ham and cheese'
# string substitutions: all of these return 'raining cats and dogs'
'raining %s and %s' % ('cats','dogs') # old way
'raining {} and {}'.format('cats','dogs') # new way
'raining {arg1} and {arg2}'.format(arg1='cats',arg2='dogs') # named arguments
# string formatting
# more examples: http://mkaz.com/2012/10/10/python-string-format/
'pi is {:.2f}'.format(3.14159) # returns 'pi is 3.14'
###############################################################################
# Strings 2/2
# ~~~~~~~~~~~
###############################################################################
# Normal strings allow for escaped characters
#
print('first line\nsecond line')
###############################################################################
# raw strings treat backslashes as literal characters
#
print(r'first line\nfirst line')
###############################################################################
# Sequence of bytes are not strings, should be decoded before some operations
#
s = b'first line\nsecond line'
print(s)
print(s.decode('utf-8').split())
###############################################################################
# Dictionaries
# ~~~~~~~~~~~~
#
# Dictionaries are structures which can contain multiple data types, and
# is ordered with key-value pairs: for each (unique) key, the dictionary
# outputs one value. Keys can be strings, numbers, or tuples, while the
# corresponding values can be any Python object. Dictionaries are:
# unordered, iterable, mutable
#
# create an empty dictionary (two ways)
empty_dict = {}
empty_dict = dict()
# create a dictionary (two ways)
family = {'dad':'homer', 'mom':'marge', 'size':6}
family = dict(dad='homer', mom='marge', size=6)
# convert a list of tuples into a dictionary
list_of_tuples = [('dad','homer'), ('mom','marge'), ('size', 6)]
family = dict(list_of_tuples)
# examine a dictionary
family['dad'] # returns 'homer'
len(family) # returns 3
family.keys() # returns list: ['dad', 'mom', 'size']
family.values() # returns list: ['homer', 'marge', 6]
family.items() # returns list of tuples:
# [('dad', 'homer'), ('mom', 'marge'), ('size', 6)]
'mom' in family # returns True
'marge' in family # returns False (only checks keys)
# modify a dictionary (does not return the dictionary)
family['cat'] = 'snowball' # add a new entry
family['cat'] = 'snowball ii' # edit an existing entry
del family['cat'] # delete an entry
family['kids'] = ['bart', 'lisa'] # value can be a list
family.pop('dad') # removes an entry and returns the value ('homer')
family.update({'baby':'maggie', 'grandpa':'abe'}) # add multiple entries
# accessing values more safely with 'get'
family['mom'] # returns 'marge'
family.get('mom') # same thing
try:
family['grandma'] # throws an error
except KeyError as e:
print("Error", e)
family.get('grandma') # returns None
family.get('grandma', 'not found') # returns 'not found' (the default)
# accessing a list element within a dictionary
family['kids'][0] # returns 'bart'
family['kids'].remove('lisa') # removes 'lisa'
# string substitution using a dictionary
'youngest child is %(baby)s' % family # returns 'youngest child is maggie'
###############################################################################
# Sets
# ~~~~
#
# Like dictionaries, but with unique keys only (no corresponding values).
# They are: unordered, iterable, mutable, can contain multiple data types
# made up of unique elements (strings, numbers, or tuples)
#
# create an empty set
empty_set = set()
# create a set
languages = {'python', 'r', 'java'} # create a set directly
snakes = set(['cobra', 'viper', 'python']) # create a set from a list
# examine a set
len(languages) # returns 3
'python' in languages # returns True
# set operations
languages & snakes # returns intersection: {'python'}
languages | snakes # returns union: {'cobra', 'r', 'java', 'viper', 'python'}
languages - snakes # returns set difference: {'r', 'java'}
snakes - languages # returns set difference: {'cobra', 'viper'}
# modify a set (does not return the set)
languages.add('sql') # add a new element
languages.add('r') # try to add an existing element (ignored, no error)
languages.remove('java') # remove an element
try:
languages.remove('c') # try to remove a non-existing element (throws an error)
except KeyError as e:
print("Error", e)
languages.discard('c') # removes an element if present, but ignored otherwise
languages.pop() # removes and returns an arbitrary element
languages.clear() # removes all elements
languages.update('go', 'spark') # add multiple elements (can also pass a list or set)
# get a sorted list of unique elements from a list
sorted(set([9, 0, 2, 1, 0])) # returns [0, 1, 2, 9]
###############################################################################
# Execution control statements
# ----------------------------
#
###############################################################################
# Conditional statements
# ~~~~~~~~~~~~~~~~~~~~~~
x = 3
# if statement
if x > 0:
print('positive')
# if/else statement
if x > 0:
print('positive')
else:
print('zero or negative')
# if/elif/else statement
if x > 0:
print('positive')
elif x == 0:
print('zero')
else:
print('negative')
# single-line if statement (sometimes discouraged)
if x > 0: print('positive')
# single-line if/else statement (sometimes discouraged)
# known as a 'ternary operator'
sign = 'positive' if x > 0 else 'zero or negative'
###############################################################################
# Loops
# ~~~~~
#
# Loops are a set of instructions which repeat until termination
# conditions are met. This can include iterating through all values in an
# object, go through a range of values, etc
#
# range returns a list of integers
range(0, 3) # returns [0, 1, 2]: includes first value but excludes second value
range(3) # same thing: starting at zero is the default
range(0, 5, 2) # returns [0, 2, 4]: third argument specifies the 'stride'
# for loop
fruits = ['apple', 'banana', 'cherry']
for i in range(len(fruits)):
print(fruits[i].upper())
# alternative for loop (recommended style)
for fruit in fruits:
print(fruit.upper())
# use range when iterating over a large sequence to avoid actually creating the integer list in memory
v = 0
for i in range(10 ** 6):
v += 1
###############################################################################
# List comprehensions, iterators, etc.
# ------------------------------------
#
# List comprehensions
# ~~~~~~~~~~~~~~~~~~~
#
# Process which affects whole lists without iterating through loops. For
# more:
# http://python-3-patterns-idioms-test.readthedocs.io/en/latest/Comprehensions.html
#
# for loop to create a list of cubes
nums = [1, 2, 3, 4, 5]
cubes = []
for num in nums:
cubes.append(num**3)
# equivalent list comprehension
cubes = [num**3 for num in nums] # [1, 8, 27, 64, 125]
# for loop to create a list of cubes of even numbers
cubes_of_even = []
for num in nums:
if num % 2 == 0:
cubes_of_even.append(num**3)
# equivalent list comprehension
# syntax: [expression for variable in iterable if condition]
cubes_of_even = [num**3 for num in nums if num % 2 == 0] # [8, 64]
# for loop to cube even numbers and square odd numbers
cubes_and_squares = []
for num in nums:
if num % 2 == 0:
cubes_and_squares.append(num**3)
else:
cubes_and_squares.append(num**2)
# equivalent list comprehension (using a ternary expression)
# syntax: [true_condition if condition else false_condition for variable in iterable]
cubes_and_squares = [num**3 if num % 2 == 0 else num**2 for num in nums] # [1, 8, 9, 64, 25]
# for loop to flatten a 2d-matrix
matrix = [[1, 2], [3, 4]]
items = []
for row in matrix:
for item in row:
items.append(item)
# equivalent list comprehension
items = [item for row in matrix
for item in row] # [1, 2, 3, 4]
# set comprehension
fruits = ['apple', 'banana', 'cherry']
unique_lengths = {len(fruit) for fruit in fruits} # {5, 6}
# dictionary comprehension
fruit_lengths = {fruit:len(fruit) for fruit in fruits} # {'apple': 5, 'banana': 6, 'cherry': 6}
###############################################################################
# Exercise: upper-case names and add 1 year to all simpsons
simpsons = {'Homer': 45, 'Marge': 45, 'Bart': 10, 'Lisa': 10}
simpsons_older = {k.upper(): v + 1 for k, v in simpsons.items()}
print(simpsons_older)
###############################################################################
# Exercice: count words in a sentence
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
quote = """Tick-tow
our incomes are like our shoes; if too small they gall and pinch us
but if too large they cause us to stumble and to trip
"""
count = {word: 0 for word in set(quote.split())}
for word in quote.split():
count[word] += 1
# iterate through two things at once (using tuple unpacking)
family = {'dad': 'homer', 'mom': 'marge', 'size': 6}
for key, value in family.items():
print(key, value)
# use enumerate if you need to access the index value within the loop
for index, fruit in enumerate(fruits):
print(index, fruit)
# for/else loop
for fruit in fruits:
if fruit == 'banana':
print("Found the banana!")
break # exit the loop and skip the 'else' block
else:
# this block executes ONLY if the for loop completes without hitting
# 'break'
print("Can't find the banana")
# while loop
count = 0
while count < 5:
print("This will print 5 times")
count += 1 # equivalent to 'count = count + 1'
###############################################################################
# Exceptions handling
# ~~~~~~~~~~~~~~~~~~~
#
dct = dict(a=[1, 2], b=[4, 5])
key = 'c'
try:
dct[key]
except:
print("Key %s is missing. Add it with empty value" % key)
dct['c'] = []
print(dct)
###############################################################################
# Functions
# ---------
#
# Functions are sets of instructions launched when called upon, they can
# have multiple input values and a return value
#
# define a function with no arguments and no return values
def print_text():
print('this is text')
# call the function
print_text()
# define a function with one argument and no return values
def print_this(x):
print(x)
# call the function
print_this(3) # prints 3
n = print_this(3) # prints 3, but doesn't assign 3 to n
# because the function has no return statement
def add(a, b):
return a + b
add(2, 3)
add("deux", "trois")
add(["deux", "trois"], [2, 3])
# define a function with one argument and one return value
def square_this(x):
return x ** 2
# include an optional docstring to describe the effect of a function
def square_this(x):
"""Return the square of a number."""
return x ** 2
# call the function
square_this(3) # prints 9
var = square_this(3) # assigns 9 to var, but does not print 9
# default arguments
def power_this(x, power=2):
return x ** power
power_this(2) # 4
power_this(2, 3) # 8
# use 'pass' as a placeholder if you haven't written the function body
def stub():
pass
# return two values from a single function
def min_max(nums):
return min(nums), max(nums)
# return values can be assigned to a single variable as a tuple
nums = [1, 2, 3]
min_max_num = min_max(nums) # min_max_num = (1, 3)
# return values can be assigned into multiple variables using tuple unpacking
min_num, max_num = min_max(nums) # min_num = 1, max_num = 3
###############################################################################
# Regular expression
# ------------------
#
import re
# 1. Compile regular expression with a patetrn
regex = re.compile("^.+(sub-.+)_(ses-.+)_(mod-.+)")
###############################################################################
# 2. Match compiled RE on string
#
# Capture the pattern ```anyprefixsub-<subj id>_ses-<session id>_<modality>```
strings = ["abcsub-033_ses-01_mod-mri", "defsub-044_ses-01_mod-mri", "ghisub-055_ses-02_mod-ctscan"]
print([regex.findall(s)[0] for s in strings])
###############################################################################
# Match methods on compiled regular expression
#
# +------------------+----------------------------------------------------------------------------+
# | Method/Attribute | Purpose |
# +==================+============================================================================+
# | match(string) | Determine if the RE matches at the beginning of the string. |
# +------------------+----------------------------------------------------------------------------+
# | search(string) | Scan through a string, looking for any location where this RE matches. |
# +------------------+----------------------------------------------------------------------------+
# | findall(string) | Find all substrings where the RE matches, and returns them as a list. |
# +------------------+----------------------------------------------------------------------------+
# | finditer(string) | Find all substrings where the RE matches, and returns them as an iterator. |
# +------------------+----------------------------------------------------------------------------+
###############################################################################
# 2. Replace compiled RE on string
regex = re.compile("(sub-[^_]+)") # match (sub-...)_
print([regex.sub("SUB-", s) for s in strings])
regex.sub("SUB-", "toto")
###############################################################################
# Remove all non-alphanumeric characters in a string
re.sub('[^0-9a-zA-Z]+', '', 'h^&ell`.,|o w]{+orld')
###############################################################################
# System programming
# ------------------
#
###############################################################################
# Operating system interfaces (os)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import os
###############################################################################
# Current working directory
#
# Get the current working directory
cwd = os.getcwd()
print(cwd)
# Set the current working directory
os.chdir(cwd)
###############################################################################
# Temporary directory
#
import tempfile
tmpdir = tempfile.gettempdir()
###############################################################################
# Join paths
#
mytmpdir = os.path.join(tmpdir, "foobar")
###############################################################################
# Create a directory
os.makedirs(os.path.join(tmpdir, "foobar", "plop", "toto"), exist_ok=True)
# list containing the names of the entries in the directory given by path.
os.listdir(mytmpdir)
###############################################################################
# File input/output
# ~~~~~~~~~~~~~~~~~
#
filename = os.path.join(mytmpdir, "myfile.txt")
print(filename)
# Write
lines = ["Dans python tout est bon", "Enfin, presque"]
## write line by line
fd = open(filename, "w")
fd.write(lines[0] + "\n")
fd.write(lines[1]+ "\n")
fd.close()
## use a context manager to automatically close your file
with open(filename, 'w') as f:
for line in lines:
f.write(line + '\n')
# Read
## read one line at a time (entire file does not have to fit into memory)
f = open(filename, "r")
f.readline() # one string per line (including newlines)
f.readline() # next line
f.close()
## read one line at a time (entire file does not have to fit into memory)
f = open(filename, 'r')
f.readline() # one string per line (including newlines)
f.readline() # next line
f.close()
## read the whole file at once, return a list of lines
f = open(filename, 'r')
f.readlines() # one list, each line is one string
f.close()
## use list comprehension to duplicate readlines without reading entire file at once
f = open(filename, 'r')
[line for line in f]
f.close()
## use a context manager to automatically close your file
with open(filename, 'r') as f:
lines = [line for line in f]
###############################################################################
# Explore, list directories
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
###############################################################################
# Walk
#
import os
WD = os.path.join(tmpdir, "foobar")
for dirpath, dirnames, filenames in os.walk(WD):
print(dirpath, dirnames, filenames)
###############################################################################
# glob, basename and file extension
import tempfile
import glob
tmpdir = tempfile.gettempdir()
filenames = glob.glob(os.path.join(tmpdir, "*", "*.txt"))
print(filenames)
# take basename then remove extension
basenames = [os.path.splitext(os.path.basename(f))[0] for f in filenames]
print(basenames)
###############################################################################
# shutil - High-level file operations
#
import shutil
src = os.path.join(tmpdir, "foobar", "myfile.txt")
dst = os.path.join(tmpdir, "foobar", "plop", "myfile.txt")
print("copy %s to %s" % (src, dst))
shutil.copy(src, dst)
print("File %s exists ?" % dst, os.path.exists(dst))
src = os.path.join(tmpdir, "foobar", "plop")
dst = os.path.join(tmpdir, "plop2")
print("copy tree %s under %s" % (src, dst))
try:
shutil.copytree(src, dst)
shutil.rmtree(dst)
shutil.move(src, dst)
except (FileExistsError, FileNotFoundError) as e:
pass
###############################################################################
# Command execution with subprocess
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - For more advanced use cases, the underlying Popen interface can be used directly.
# - Run the command described by args.
# - Wait for command to complete
# - return a CompletedProcess instance.
# - Does not capture stdout or stderr by default. To do so, pass PIPE for the stdout and/or stderr arguments.
import subprocess
# doesn't capture output
p = subprocess.run(["ls", "-l"])
print(p.returncode)
# Run through the shell.
subprocess.run("ls -l", shell=True)
# Capture output
out = subprocess.run(["ls", "-a", "/"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# out.stdout is a sequence of bytes that should be decoded into a utf-8 string
print(out.stdout.decode('utf-8').split("\n")[:5])
###############################################################################
# Multiprocessing and multithreading
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# **Process**
#
# A process is a name given to a program instance that has been loaded into memory
# and managed by the operating system.
#
# Process = address space + execution context (thread of control)
#
# Process address space (segments):
#
# - Code.
# - Data (static/global).
# - Heap (dynamic memory allocation).
# - Stack.
#
# Execution context:
#
# - Data registers.
# - Stack pointer (SP).
# - Program counter (PC).
# - Working Registers.
#
# OS Scheduling of processes: context switching (ie. save/load Execution context)
#
# Pros/cons
#
# - Context switching expensive.
# - (potentially) complex data sharing (not necessary true).
# - Cooperating processes - no need for memory protection (separate address spaces).
# - Relevant for parrallel computation with memory allocation.
#
# **Threads**
#
# - Threads share the same address space (Data registers): access to code, heap and (global) data.
# - Separate execution stack, PC and Working Registers.
#
# Pros/cons
#
# - Faster context switching only SP, PC and Working Registers.
# - Can exploit fine-grain concurrency
# - Simple data sharing through the shared address space.
# - Precautions have to be taken or two threads will write to the same memory at the same time. This is what the **global interpreter lock (GIL)** is for.
# - Relevant for GUI, I/O (Network, disk) concurrent operation
#
# **In Python**
#
# - The ``threading`` module uses threads.
# - The ``multiprocessing`` module uses processes.
###############################################################################
# Multithreading
#
import time
import threading
def list_append(count, sign=1, out_list=None):
if out_list is None:
out_list = list()
for i in range(count):
out_list.append(sign * i)
sum(out_list) # do some computation
return out_list
size = 10000 # Number of numbers to add
out_list = list() # result is a simple list
thread1 = threading.Thread(target=list_append, args=(size, 1, out_list, ))
thread2 = threading.Thread(target=list_append, args=(size, -1, out_list, ))
startime = time.time()
# Will execute both in parallel
thread1.start()
thread2.start()
# Joins threads back to the parent process
thread1.join()
thread2.join()
print("Threading ellapsed time ", time.time() - startime)
print(out_list[:10])
###############################################################################
# Multiprocessing
#
import multiprocessing
# Sharing requires specific mecanism
out_list1 = multiprocessing.Manager().list()
p1 = multiprocessing.Process(target=list_append, args=(size, 1, None))
out_list2 = multiprocessing.Manager().list()
p2 = multiprocessing.Process(target=list_append, args=(size, -1, None))
startime = time.time()
p1.start()
p2.start()
p1.join()
p2.join()
print("Multiprocessing ellapsed time ", time.time() - startime)
# print(out_list[:10]) is not availlable
###############################################################################
# Sharing object between process with Managers
#
# Managers provide a way to create data which can be shared between
# different processes, including sharing over a network between processes
# running on different machines. A manager object controls a server process
# which manages shared objects.
import multiprocessing
import time
size = int(size / 100) # Number of numbers to add
# Sharing requires specific mecanism
out_list = multiprocessing.Manager().list()
p1 = multiprocessing.Process(target=list_append, args=(size, 1, out_list))
p2 = multiprocessing.Process(target=list_append, args=(size, -1, out_list))
startime = time.time()
p1.start()
p2.start()
p1.join()
p2.join()
print(out_list[:10])
print("Multiprocessing with shared object ellapsed time ", time.time() - startime)
###############################################################################
# Scripts and argument parsing
# -----------------------------
#
# Example, the word count script ::
#
# import os
# import os.path
# import argparse
# import re
# import pandas as pd
#
# if __name__ == "__main__":
# # parse command line options
# output = "word_count.csv"
# parser = argparse.ArgumentParser()
# parser.add_argument('-i', '--input',
# help='list of input files.',
# nargs='+', type=str)
# parser.add_argument('-o', '--output',
# help='output csv file (default %s)' % output,
# type=str, default=output)
# options = parser.parse_args()
#
# if options.input is None :
# parser.print_help()
# raise SystemExit("Error: input files are missing")
# else:
# filenames = [f for f in options.input if os.path.isfile(f)]
#
# # Match words
# regex = re.compile("[a-zA-Z]+")
#
# count = dict()
# for filename in filenames:
# fd = open(filename, "r")
# for line in fd:
# for word in regex.findall(line.lower()):
# if not word in count:
# count[word] = 1
# else:
# count[word] += 1
#
# fd = open(options.output, "w")
#
# # Pandas
# df = pd.DataFrame([[k, count[k]] for k in count], columns=["word", "count"])
# df.to_csv(options.output, index=False)
###############################################################################
# Networking
# ----------
#
# TODO
###############################################################################
# FTP
# ~~~
#
# Full FTP features with ftplib
import ftplib
ftp = ftplib.FTP("ftp.cea.fr")
ftp.login()
ftp.cwd('/pub/unati/people/educhesnay/pystatml')
ftp.retrlines('LIST')
fd = open(os.path.join(tmpdir, "README.md"), "wb")
ftp.retrbinary('RETR README.md', fd.write)
fd.close()
ftp.quit()
# File download urllib
import urllib.request
ftp_url = 'ftp://ftp.cea.fr/pub/unati/people/educhesnay/pystatml/README.md'
urllib.request.urlretrieve(ftp_url, os.path.join(tmpdir, "README2.md"))
###############################################################################
# HTTP
# ~~~~
#
# TODO
###############################################################################
# Sockets
# ~~~~~~~
#
# TODO
###############################################################################
# xmlrpc
# ~~~~~~
#
# TODO
###############################################################################
# Modules and packages
# --------------------
#
# A module is a Python file.
# A package is a directory which MUST contain a special file called ``__init__.py``
#
# To import, extend variable `PYTHONPATH`::
#
# export PYTHONPATH=path_to_parent_python_module:${PYTHONPATH}
#
# Or
import sys
sys.path.append("path_to_parent_python_module")
###############################################################################
#
# The ``__init__.py`` file can be empty. But you can set which modules the
# package exports as the API, while keeping other modules internal,
# by overriding the __all__ variable, like so:
###############################################################################
# ``parentmodule/__init__.py`` file::
#
# from . import submodule1
# from . import submodule2
#
# from .submodule3 import function1
# from .submodule3 import function2
#
# __all__ = ["submodule1", "submodule2",
# "function1", "function2"]
#
# User can import::
#
# import parentmodule.submodule1
# import parentmodule.function1
###############################################################################
# Python Unit Testing
#
# TODO
###############################################################################
# Object Oriented Programming (OOP)
# ---------------------------------
#
# **Sources**
#
# - http://python-textbok.readthedocs.org/en/latest/Object\_Oriented\_Programming.html
#
# **Principles**
#
# - **Encapsulate** data (attributes) and code (methods) into objects.
#
# - **Class** = template or blueprint that can be used to create objects.
#
# - An **object** is a specific instance of a class.
#
# - **Inheritance**: OOP allows classes to inherit commonly used state
# and behaviour from other classes. Reduce code duplication
#
# - **Polymorphism**: (usually obtained through polymorphism) calling
# code is agnostic as to whether an object belongs to a parent class or
# one of its descendants (abstraction, modularity). The same method
# called on 2 objects of 2 different classes will behave differently.
#
import math
class Shape2D:
def area(self):
raise NotImplementedError()
# __init__ is a special method called the constructor
# Inheritance + Encapsulation
class Square(Shape2D):
def __init__(self, width):
self.width = width
def area(self):
return self.width ** 2
class Disk(Shape2D):
def __init__(self, radius):
self.radius = radius
def area(self):
return math.pi * self.radius ** 2
shapes = [Square(2), Disk(3)]
# Polymorphism
print([s.area() for s in shapes])
s = Shape2D()
try:
s.area()
except NotImplementedError as e:
print("NotImplementedError", e)
###############################################################################
# Style guide for Python programming
# ----------------------------------
#
# See `PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_
#
# - Spaces (four) are the preferred indentation method.
# - Two blank lines for top level function or classes definition.
# - One blank line to indicate logical sections.
# - Never use: ``from lib import *``
# - Bad: ``Capitalized_Words_With_Underscores``
# - Function and Variable Names: ``lower_case_with_underscores``
# - Class Names: ``CapitalizedWords`` (aka: ``CamelCase``)
###############################################################################
# Documenting
# -----------
#
# See `Documenting Python <https://realpython.com/documenting-python-code//>`_
# Documenting = comments + docstrings (Python documentation string)
#
# - `Docstrings <https://www.datacamp.com/community/tutorials/docstrings-python>`_
# are use as documentation for the class, module, and packages.
# See it as "living documentation".
# - Comments are used to explain non-obvious portions of the code. "Dead documentation".
#
# Docstrings for functions (same for classes and methods):
def my_function(a, b=2):
"""
This function ...
Parameters
----------
a : float
First operand.
b : float, optional
Second operand. The default is 2.
Returns
-------
Sum of operands.
Example
-------
>>> my_function(3)
5
"""
# Add a with b (this is a comment)
return a + b
print(help(my_function))
###############################################################################
# Docstrings for scripts:
#
# At the begining of a script add a pream::
#
# """
# Created on Thu Nov 14 12:08:41 CET 2019
#
# @author: firstname.lastname@email.com
#
# Some description
# """
###############################################################################
# Exercises
# ---------
#
###############################################################################
# Exercise 1: functions
# ~~~~~~~~~~~~~~~~~~~~~
#
# Create a function that acts as a simple calulator If the operation is
# not specified, default to addition If the operation is misspecified,
# return an prompt message Ex: ``calc(4,5,"multiply")`` returns 20 Ex:
# ``calc(3,5)`` returns 8 Ex: ``calc(1, 2, "something")`` returns error
# message
#
###############################################################################
# Exercise 2: functions + list + loop
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Given a list of numbers, return a list where all adjacent duplicate
# elements have been reduced to a single element. Ex: ``[1, 2, 2, 3, 2]``
# returns ``[1, 2, 3, 2]``. You may create a new list or modify the passed
# in list.
#
# Remove all duplicate values (adjacent or not) Ex: ``[1, 2, 2, 3, 2]``
# returns ``[1, 2, 3]``
#
###############################################################################
# Exercise 3: File I/O
# ~~~~~~~~~~~~~~~~~~~~
#
# 1. Copy/paste the BSD 4 clause license (https://en.wikipedia.org/wiki/BSD_licenses)
# into a text file. Read, the file and count the occurrences of each
# word within the file. Store the words' occurrence number in a dictionary.
#
# 2. Write an executable python command ``count_words.py`` that parse
# a list of input files provided after ``--input`` parameter.
# The dictionary of occurrence is save in a csv file provides by ``--output``.
# with default value word_count.csv.
# Use:
# - open
# - regular expression
# - argparse (https://docs.python.org/3/howto/argparse.html)
###############################################################################
# Exercise 4: OOP
# ~~~~~~~~~~~~~~~
#
# 1. Create a class ``Employee`` with 2 attributes provided in the
# constructor: ``name``, ``years_of_service``. With one method
# ``salary`` with is obtained by ``1500 + 100 * years_of_service``.
#
# 2. Create a subclass ``Manager`` which redefine ``salary`` method
# ``2500 + 120 * years_of_service``.
#
# 3. Create a small dictionary-nosed database where the key is the
# employee's name. Populate the database with: samples =
# Employee('lucy', 3), Employee('john', 1), Manager('julie', 10),
# Manager('paul', 3)
#
# 4. Return a table of made name, salary rows, i.e. a list of list [[name,
# salary]]
#
# 5. Compute the average salary
|
bot.py
|
import socket
import time
import threading
from lib.evil import bitcoin_mine, harvest_user_pass
from lib.p2p import find_bot, bot_server
from lib.files import download_from_pastebot, filestore, p2p_upload_file, save_valuable, upload_valuables_to_pastebot, valuables
def p2p_upload(fn):
sconn = find_bot()
sconn.send(bytes("FILE", "ascii"))
p2p_upload_file(sconn, fn)
def p2p_echo():
try:
sconn = find_bot()
# Set verbose to true so we can view the encoded packets
sconn.verbose = True
sconn.send(bytes("ECHO", "ascii"))
while 1:
# Read a message and send it to the other bot
msg = input("Echo> ")
byte_msg = bytes(msg, "ascii")
sconn.send(byte_msg)
# This other bot should echo it back to us
echo = sconn.recv()
# Ensure that what we sent is what we got back
assert(echo == byte_msg)
# If the msg is X, then terminate the connection
if msg.lower() == 'x' or msg.lower() == "exit" or msg.lower() == "quit":
sconn.close()
break
except socket.error:
print("Connection closed unexpectedly")
if __name__ == "__main__":
# Start a new thread to accept P2P echo or P2P upload requests
thr = threading.Thread(target=bot_server)
# Daemon threads exit when the main program exits
# This means the server will shut down automatically when we quit
thr.setDaemon(True)
thr.start()
# Wait for a small amount of time so that the output
# doesn't play around with our "command prompt"
time.sleep(0.3)
while 1:
# Naive command loop
# There are better ways to do this, but the code should be clear
raw_cmd = input("Enter command: ")
cmd = raw_cmd.split()
if not cmd:
print("You need to enter a command...")
continue
# P2P Commands
# Echo is primarily meant for testing (the receiver will echo what it hears back)
# Upload allows for peer-to-peer file transfer to other bots
if cmd[0].lower() == "p2p":
if len(cmd) > 1:
if cmd[1].lower() == "echo":
p2p_echo()
if cmd[1].lower() == "upload":
if len(cmd) == 3:
p2p_upload(cmd[2])
else:
print("Format is 'p2p upload <filename>'")
else:
print("The p2p command requires either 'echo' or 'upload' after it")
# Download a file (update or data) from pastebot.net
elif cmd[0].lower() == "download":
if len(cmd) == 2:
download_from_pastebot(cmd[1])
else:
print("The download command requires a filename afterwards")
# Upload the valuables/secrets the bot has discovered to pastebot.net
elif cmd[0].lower() == "upload":
if len(cmd) == 2:
upload_valuables_to_pastebot(cmd[1])
else:
print("The upload command requires a filename afterwards")
# Mine for Bitcoins
# This is far more intensive in the real world, but we'll just pretend ;)
elif cmd[0].lower() == "mine":
print("Mining for Bitcoins...")
bit_addr = bitcoin_mine()
save_valuable("Bitcoin: %s" % bit_addr)
print("Mined and found Bitcoin address: %s" % bit_addr)
# Harvest a user's username and password (userpass)
elif cmd[0].lower() == "harvest":
userpass = harvest_user_pass()
save_valuable("Username/Password: %s %s" % userpass)
print("Found user pass: %s" % (userpass,))
# List files and valuables (secrets such as userpass & bitcoins) the bot has
elif cmd[0].lower() == "list":
print("Files stored by this bot: %s" % ", ".join(filestore.keys()))
print("Valuables stored by this bot: %s" % valuables)
# Exit command
elif cmd[0].lower() == "quit" or cmd[0].lower() == "exit":
break
else:
print("Command not recognised")
|
Chap10_Example10.28.py
|
from threading import *
class abc:
def __init__(self,seat_available):
self.seat_available = seat_available
self.mylock = Lock()# LO1
print(type(self.mylock))
def abc_reserveseat(self, seat_required):
self.mylock.acquire()# LO2
print("Number of seats remaining : ", self.seat_available)
if self.seat_available >= seat_required:
print(f"{current_thread().name } was alloted the seat No. L{self.seat_available}")
self.seat_available = self.seat_available - 1
else:
print("All the seats are booked now Sorry !")
self.mylock.release()# LO3
obj_abc = abc(2)
myt1 = Thread(target = obj_abc.abc_reserveseat, args = (1,), name ='Saurabh')
myt2 = Thread(target = obj_abc.abc_reserveseat, args = (1,), name ='Nilesh')
myt3 = Thread(target = obj_abc.abc_reserveseat, args = (1,), name ='Divya')
myt1.start()
myt2.start()
myt3.start()
|
parasol.py
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
from queue import Empty, Queue
from shutil import which
from threading import Thread
from toil.batchSystems.abstractBatchSystem import (BatchSystemSupport,
UpdatedBatchJobInfo)
from toil.common import Toil
from toil.test import get_temp_file
from toil.lib.iterables import concat
logger = logging.getLogger(__name__)
class ParasolBatchSystem(BatchSystemSupport):
"""
The interface for Parasol.
"""
@classmethod
def supportsWorkerCleanup(cls):
return False
@classmethod
def supportsAutoDeployment(cls):
return False
def __init__(self, config, maxCores, maxMemory, maxDisk):
super(ParasolBatchSystem, self).__init__(config, maxCores, maxMemory, maxDisk)
if maxMemory != sys.maxsize:
logger.warning('The Parasol batch system does not support maxMemory.')
# Keep the name of the results file for the pstat2 command..
command = config.parasolCommand
if os.path.sep not in command:
try:
command = which(command)
except StopIteration:
raise RuntimeError("Can't find %s on PATH." % command)
logger.debug('Using Parasol at %s', command)
self.parasolCommand = command
jobStoreType, path = Toil.parseLocator(config.jobStore)
if jobStoreType != 'file':
raise RuntimeError("The parasol batch system doesn't currently work with any "
"jobStore type except file jobStores.")
self.parasolResultsDir = tempfile.mkdtemp(dir=os.path.abspath(path))
logger.debug("Using parasol results dir: %s", self.parasolResultsDir)
# In Parasol, each results file corresponds to a separate batch, and all jobs in a batch
# have the same cpu and memory requirements. The keys to this dictionary are the (cpu,
# memory) tuples for each batch. A new batch is created whenever a job has a new unique
# combination of cpu and memory requirements.
self.resultsFiles = dict()
self.maxBatches = config.parasolMaxBatches
# Allows the worker process to send back the IDs of jobs that have finished, so the batch
# system can decrease its used cpus counter
self.cpuUsageQueue = Queue()
# Also stores finished job IDs, but is read by getUpdatedJobIDs().
self.updatedJobsQueue = Queue()
# Use this to stop the worker when shutting down
self.running = True
self.worker = Thread(target=self.updatedJobWorker, args=())
self.worker.start()
self.usedCpus = 0
self.jobIDsToCpu = {}
# Set of jobs that have been issued but aren't known to have finished or been killed yet.
# Jobs that end by themselves are removed in getUpdatedJob, and jobs that are killed are
# removed in killBatchJobs.
self.runningJobs = set()
def _runParasol(self, command, autoRetry=True):
"""
Issues a parasol command using popen to capture the output. If the command fails then it
will try pinging parasol until it gets a response. When it gets a response it will
recursively call the issue parasol command, repeating this pattern for a maximum of N
times. The final exit value will reflect this.
"""
command = list(concat(self.parasolCommand, command))
while True:
logger.debug('Running %r', command)
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=-1)
stdout, stderr = process.communicate()
status = process.wait()
for line in stderr.decode('utf-8').split('\n'):
if line: logger.warning(line)
if status == 0:
return 0, stdout.decode('utf-8').split('\n')
message = 'Command %r failed with exit status %i' % (command, status)
if autoRetry:
logger.warning(message)
else:
logger.error(message)
return status, None
logger.warning('Waiting for a 10s, before trying again')
time.sleep(10)
parasolOutputPattern = re.compile("your job ([0-9]+).*")
def issueBatchJob(self, jobDesc):
"""
Issues parasol with job commands.
"""
self.checkResourceRequest(jobDesc.memory, jobDesc.cores, jobDesc.disk)
MiB = 1 << 20
truncatedMemory = jobDesc.memory // MiB * MiB
# Look for a batch for jobs with these resource requirements, with
# the memory rounded down to the nearest megabyte. Rounding down
# meams the new job can't ever decrease the memory requirements
# of jobs already in the batch.
if len(self.resultsFiles) >= self.maxBatches:
raise RuntimeError( 'Number of batches reached limit of %i' % self.maxBatches)
try:
results = self.resultsFiles[(truncatedMemory, jobDesc.cores)]
except KeyError:
results = get_temp_file(rootDir=self.parasolResultsDir)
self.resultsFiles[(truncatedMemory, jobDesc.cores)] = results
# Prefix the command with environment overrides, optionally looking them up from the
# current environment if the value is None
command = ' '.join(concat('env', self.__environment(), jobDesc.command))
parasolCommand = ['-verbose',
'-ram=%i' % jobDesc.memory,
'-cpu=%i' % jobDesc.cores,
'-results=' + results,
'add', 'job', command]
# Deal with the cpus
self.usedCpus += jobDesc.cores
while True: # Process finished results with no wait
try:
jobID = self.cpuUsageQueue.get_nowait()
except Empty:
break
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
while self.usedCpus > self.maxCores: # If we are still waiting
jobID = self.cpuUsageQueue.get()
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
# Now keep going
while True:
line = self._runParasol(parasolCommand)[1][0]
match = self.parasolOutputPattern.match(line)
if match is None:
# This is because parasol add job will return success, even if the job was not
# properly issued!
logger.debug('We failed to properly add the job, we will try again after a 5s.')
time.sleep(5)
else:
jobID = int(match.group(1))
self.jobIDsToCpu[jobID] = jobDesc.cores
self.runningJobs.add(jobID)
logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line))
return jobID
def setEnv(self, name, value=None):
if value and ' ' in value:
raise ValueError('Parasol does not support spaces in environment variable values.')
return super(ParasolBatchSystem, self).setEnv(name, value)
def __environment(self):
return (k + '=' + (os.environ[k] if v is None else v) for k, v in list(self.environment.items()))
def killBatchJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
if jobID in self.runningJobs:
self.runningJobs.remove(jobID)
exitValue = self._runParasol(['remove', 'job', str(jobID)],
autoRetry=False)[0]
logger.debug("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedBatchJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
break
logger.warning( 'Tried to kill some jobs, but something happened and they are still '
'going, will try againin 5s.')
time.sleep(5)
# Update the CPU usage, because killed jobs aren't written to the results file.
for jobID in jobIDs:
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
runningPattern = re.compile(r'r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+')
def getJobIDsForResultsFile(self, resultsFile):
"""
Get all queued and running jobs for a results file.
"""
jobIDs = []
for line in self._runParasol(['-extended', 'list', 'jobs'])[1]:
fields = line.strip().split()
if len(fields) == 0 or fields[-1] != resultsFile:
continue
jobID = fields[0]
jobIDs.append(int(jobID))
return set(jobIDs)
def getIssuedBatchJobIDs(self):
"""
Gets the list of jobs issued to parasol in all results files, but not including jobs
created by other users.
"""
issuedJobs = set()
for resultsFile in self.resultsFiles.values():
issuedJobs.update(self.getJobIDsForResultsFile(resultsFile))
return list(issuedJobs)
def getRunningBatchJobIDs(self):
"""
Returns map of running jobIDs and the time they have been running.
"""
# Example lines..
# r 5410186 benedictpaten worker 1247029663 localhost
# r 5410324 benedictpaten worker 1247030076 localhost
runningJobs = {}
issuedJobs = self.getIssuedBatchJobIDs()
for line in self._runParasol(['pstat2'])[1]:
if line != '':
match = self.runningPattern.match(line)
if match is not None:
jobID = int(match.group(1))
startTime = int(match.group(2))
if jobID in issuedJobs: # It's one of our jobs
runningJobs[jobID] = time.time() - startTime
return runningJobs
def getUpdatedBatchJob(self, maxWait):
while True:
try:
item = self.updatedJobsQueue.get(timeout=maxWait)
except Empty:
return None
try:
self.runningJobs.remove(item.jobID)
except KeyError:
# We tried to kill this job, but it ended by itself instead, so skip it.
pass
else:
return item
def updatedJobWorker(self):
"""
We use the parasol results to update the status of jobs, adding them
to the list of updated jobs.
Results have the following structure.. (thanks Mark D!)
int status; /* Job status - wait() return format. 0 is good. */
char *host; /* Machine job ran on. */
char *jobId; /* Job queuing system job ID */
char *exe; /* Job executable file (no path) */
int usrTicks; /* 'User' CPU time in ticks. */
int sysTicks; /* 'System' CPU time in ticks. */
unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */
unsigned startTime; /* Job start time in seconds since 1/1/1970 */
unsigned endTime; /* Job end time in seconds since 1/1/1970 */
char *user; /* User who ran job */
char *errFile; /* Location of stderr file on host */
Plus you finally have the command name.
"""
resultsFiles = set()
resultsFileHandles = []
try:
while self.running:
# Look for any new results files that have been created, and open them
newResultsFiles = set(os.listdir(self.parasolResultsDir)).difference(resultsFiles)
for newFile in newResultsFiles:
newFilePath = os.path.join(self.parasolResultsDir, newFile)
resultsFileHandles.append(open(newFilePath, 'r'))
resultsFiles.add(newFile)
for fileHandle in resultsFileHandles:
while self.running:
line = fileHandle.readline()
if not line:
break
assert line[-1] == '\n'
(status, host, jobId, exe, usrTicks, sysTicks, submitTime, startTime,
endTime, user, errFile, command) = line[:-1].split(None, 11)
status = int(status)
jobId = int(jobId)
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
else:
status = -status
self.cpuUsageQueue.put(jobId)
startTime = int(startTime)
endTime = int(endTime)
if endTime == startTime:
# Both, start and end time is an integer so to get sub-second
# accuracy we use the ticks reported by Parasol as an approximation.
# This isn't documented but what Parasol calls "ticks" is actually a
# hundredth of a second. Parasol does the unit conversion early on
# after a job finished. Search paraNode.c for ticksToHundreths. We
# also cheat a little by always reporting at least one hundredth of a
# second.
usrTicks = int(usrTicks)
sysTicks = int(sysTicks)
wallTime = float( max( 1, usrTicks + sysTicks) ) * 0.01
else:
wallTime = float(endTime - startTime)
self.updatedJobsQueue.put(UpdatedBatchJobInfo(jobID=jobId, exitStatus=status, wallTime=wallTime, exitReason=None))
time.sleep(1)
except:
logger.warning("Error occurred while parsing parasol results files.")
raise
finally:
for fileHandle in resultsFileHandles:
fileHandle.close()
def shutdown(self):
self.killBatchJobs(self.getIssuedBatchJobIDs()) # cleanup jobs
for results in self.resultsFiles.values():
exitValue = self._runParasol(['-results=' + results, 'clear', 'sick'],
autoRetry=False)[0]
if exitValue is not None:
logger.warning("Could not clear sick status of the parasol batch %s" % results)
exitValue = self._runParasol(['-results=' + results, 'flushResults'],
autoRetry=False)[0]
if exitValue is not None:
logger.warning("Could not flush the parasol batch %s" % results)
self.running = False
logger.debug('Joining worker thread...')
self.worker.join()
logger.debug('... joined worker thread.')
for results in list(self.resultsFiles.values()):
os.remove(results)
os.rmdir(self.parasolResultsDir)
@classmethod
def setOptions(cls, setOption):
from toil.common import iC
setOption("parasolCommand", None, None, 'parasol')
setOption("parasolMaxBatches", int, iC(1), 10000)
|
lam_helper.py
|
import threading
import collections
from rplidar import RPLidar
try:
import Queue
except ImportError:
import queue as Queue
class AsynchronousGenerator:
"""
The AsynchronousGenerator class is used to buffer output of a
generator between iterable.__next__ or iterable.next calls. This
allows the generator to continue producing output even if the
previous output has not yet been consumed. The buffered structure is
particularly useful when a process that consumes data from a
generator is unable to finish its task at a rate comparable to which
data is produced such as writing a large amount of data to a
low-bandwidth I/O stream at the same time the data is produced.
>>> for chunk in AsynchronousGenerator(function=makes_lots_of_data):
... really_slow_iostream.write(chunk)
source: https://www.reddit.com/r/Python/comments/ew9is/buffered_asynchronous_generators_for_parallel/
"""
def __init__(self, function, args=(), kwargs={}, start=True, maxsize=0):
self.generator = iter(function(*args, **kwargs))
self.thread = threading.Thread(target=self._generatorcall)
self.q = Queue.Queue(maxsize=maxsize)
self.next = self.__next__
if start:
self.thread.start()
def __iter__(self):
return self
def __next__(self):
done, item = self.q.get()
if done:
raise StopIteration
else:
return item
def _generatorcall(self):
try:
for output in self.generator:
self.q.put((False, output))
finally:
self.q.put((True, None))
class wrapper(object):
def __init__(self, generator):
self.__gen = generator()
def __iter__(self):
return self
def __next__(self):
self.current = None
while self.current == None:
try:
self.current = next(self.__gen)
except:
print("ERROR: Lidar init failed. Please restart.")
quit()
return self.current
def __call__(self):
return self
PORT_NAME = 'COM15'
@wrapper
def gen():
lidar = RPLidar(PORT_NAME)
return lidar.iter_scans()
|
chameleon.py
|
# -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans, see LICENSE.rst.
import RPi.GPIO as GPIO
import base64
import json
import random
import time
import urllib2
from datetime import datetime
from threading import Thread
BASE_URL = 'https://jenkins.lizard.net/job/nens/job'
USERNAME = 'sa_stoplicht_jenk'
PASSWORD = 'A9TXRfzy6QwoZnGrMFI2'
STATUS = 'startup'
ALIVE = True
NORTH_RED = 5
NORTH_GREEN = 3
EAST_RED = 19
EAST_ORANGE = 21
EAST_GREEN = 15
SOUTH_RED = 11
SOUTH_GREEN = 7
WEST_RED = 13
WEST_ORANGE = 29
WEST_GREEN = 23
ALL = [3, 5, 7, 11, 13, 15, 19, 21, 23, 29]
REDS = [5, 19, 11, 13]
ORANGES = [21, 29]
GREENS = [3, 15, 7, 23]
NORTH = [5, 3]
EAST = [19, 21, 15]
SOUTH = [11, 7]
WEST = [13, 29, 23]
NONE = []
NEIGHBOURS = {
NORTH_RED : [NORTH_GREEN, EAST_RED, EAST_ORANGE],
NORTH_GREEN : [NORTH_RED, EAST_GREEN, EAST_ORANGE],
EAST_RED : [EAST_ORANGE, NORTH_RED],
EAST_ORANGE : [EAST_RED, EAST_GREEN] + NORTH,
EAST_GREEN : [EAST_ORANGE, NORTH_GREEN]
}
ACROSS = {
NORTH_RED : 11,
NORTH_GREEN : 7,
EAST_RED : 13,
EAST_ORANGE : 29,
EAST_GREEN : 23
}
ON = 8
OFF = 10
MANUAL = 12
AUTO = 16
BUTTON = 18
IN = [ON, OFF, MANUAL, AUTO, BUTTON]
BUTTON_PRESSED = False
MODE_OFF = 1
MODE_MANUAL = 2
MODE_STANDUP = 3
MODE_LUNCH = 4
MODE_STATUS = 5
MORSE = {
'a':'.-',
'b':'-...',
'c':'-.-.',
'd':'-..',
'e':'.',
'f':'..-.',
'g':'--.',
'h':'....',
'i':'..',
'j':'.---',
'k':'-.-',
'l':'.-..',
'm':'--',
'n':'-.',
'o':'---',
'p':'.--.',
'q':'--.-',
'r':'.-.',
's':'...',
't':'-',
'u':'..-',
'v':'...-',
'w':'.--',
'x':'-..-',
'y':'-.--',
'z':'--..',
}
def getjenkins(uri):
req = urllib2.Request("{}/{}".format(BASE_URL, uri))
base64string = base64.b64encode('{}:{}'.format(USERNAME, PASSWORD))
req.add_header("Authorization", "Basic {}".format(base64string))
f = urllib2.urlopen(req)
r = f.read()
return json.loads(r)
def fetchstatus():
global STATUS
while ALIVE:
jobs = ["hydra-core", "lizard-client", "lizard-nxt", "threedi"]
response = []
for job in jobs:
res = getjenkins("{}/api/json?pretty=true".format(job))
for branch in res["jobs"]:
if branch["name"] == "master" or branch["name"].startswith("fixes"):
uri = "{}/job/{}/lastBuild/api/json?pretty=true".format(job, branch["name"])
try:
res = getjenkins(uri)
response.append(res["result"])
if res["building"]:
response.append("BUILDING")
except:
response.append("ERROR")
new = 'none'
for str, status in [('Failure', 'broken'), ('Aborted', 'broken'), ('Building', 'building'), ('Unstable', 'unstable'), ('Disabled', 'unstable'), ('Success', 'stable')]:
if str.upper() in response:
new = status
break
STATUS = new
time.sleep(15)
def setup():
GPIO.setmode(GPIO.BOARD)
for pin in ALL:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
for pin in IN:
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(BUTTON, GPIO.RISING, callback=pressed, bouncetime=500)
def setall(on):
if not get(ON):
on = NONE
for pin in ALL:
GPIO.output(pin, pin in on)
def get(pin):
return True if GPIO.input(pin) == 0 else False
def pressed(channel):
global BUTTON_PRESSED
BUTTON_PRESSED = True
def getmode():
if not get(ON):
return MODE_OFF
if get(MANUAL):
return MODE_MANUAL
now = datetime.now()
if now.weekday() > 4:
return MODE_OFF
if now.hour < 7 or now.hour > 17:
return MODE_OFF
if now.hour == 12 and now.minute > 14 and now.minute < 30:
return MODE_STANDUP
if now.hour == 12 and now.minute > 29:
return MODE_LUNCH
return MODE_STATUS
def loop(t, lists, mode):
while getmode() == mode:
walk(t, lists)
def walk(t, lists):
for lights in lists:
setall(lights)
time.sleep(t)
def dance(mode):
prev_last = EAST_GREEN
last = EAST_ORANGE
i = 0
while getmode() == mode and STATUS == 'building':
n = list(NEIGHBOURS[last])
n.remove(prev_last)
r = int(random.random() * len(n))
setall([last, ACROSS[last], n[r], ACROSS[n[r]]])
time.sleep(0.08)
setall([n[r], ACROSS[n[r]]])
time.sleep(0.08)
prev_last = last
last = n[r]
i += 1
def traffic(mode):
global BUTTON_PRESSED
if getmode() != mode:
return
setall([NORTH_RED, EAST_GREEN, SOUTH_RED, WEST_GREEN])
while getmode() == mode and not BUTTON_PRESSED:
time.sleep(0.1)
BUTTON_PRESSED = False
time.sleep(1)
if getmode() != mode:
return
setall([NORTH_RED, EAST_ORANGE, SOUTH_RED, WEST_ORANGE])
time.sleep(2)
if getmode() != mode:
return
setall(REDS)
time.sleep(1)
if getmode() != mode:
return
setall([NORTH_GREEN, EAST_RED, SOUTH_GREEN, WEST_RED])
while getmode() == mode and not BUTTON_PRESSED:
time.sleep(0.1)
BUTTON_PRESSED = False
time.sleep(1)
if getmode() != mode:
return
for i in range(0, 5):
walk(0.2, [[EAST_RED, WEST_RED], [NORTH_GREEN, EAST_RED, SOUTH_GREEN, WEST_RED]])
if getmode() != mode:
return
setall(REDS)
time.sleep(1)
def morse(unit, msg, lights, mode):
if getmode() == mode:
for char in msg:
if char == ' ':
walk(unit * 7, [NONE])
continue
for code in MORSE[char]:
if code == '.':
walk(unit, [lights])
if code == '-':
walk(unit * 3, [lights])
walk(unit, [NONE])
walk(unit * 2, [NONE])
walk(unit * 11, [NONE])
def available():
now = datetime.now()
return now.hour < 17
def dontwalk(lights):
return intersect(lights, NORTH + EAST + WEST) + [SOUTH_RED]
def intersect(a, b):
return list(set(a) & set(b))
def status(mode):
while getmode() == mode:
if STATUS == 'building':
dance(mode)
setall(NONE)
elif STATUS == 'stable':
setall(GREENS if available() else dontwalk(GREENS))
elif STATUS == 'unstable':
setall(ORANGES if available() else dontwalk(ORANGES))
elif STATUS == 'startup':
ZERO = []
ONE = [WEST_GREEN, EAST_GREEN]
TWO = ONE + ORANGES
THREE = TWO + [WEST_RED, EAST_RED]
walk(0.37, [ZERO, ONE, TWO, THREE])
elif STATUS == 'none':
HELP = [SOUTH_RED, WEST_ORANGE]
FLASH = HELP + NORTH + EAST
walk(0.02, [FLASH, HELP, HELP, HELP, FLASH, HELP])
else:
setall(REDS)
time.sleep(1)
setup()
t = Thread(target=fetchstatus)
t.start()
try:
while True:
loop(0.37, [NONE], MODE_OFF)
traffic(MODE_MANUAL)
loop(0.74, [NONE, ORANGES], MODE_STANDUP)
morse(0.125, 'de gebruiker is pi het wachtwoord is niethuilen', ORANGES, MODE_LUNCH)
status(MODE_STATUS)
except KeyboardInterrupt:
GPIO.cleanup()
ALIVE = False
print (" - Killed! Waiting for the kid to die...")
except Exception as e:
GPIO.cleanup()
ALIVE = False
raise e
|
wxTerminal.py
|
#!/usr/bin/env python
#
# A simple terminal application with wxPython.
#
# (C) 2001-2020 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
from serial.tools.miniterm import unichr
import serial
import threading
import wx
import wx.lib.newevent
import wxSerialConfigDialog
import time
from datetime import datetime
try:
unichr
except NameError:
unichr = chr
# ----------------------------------------------------------------------
# Create an own event type, so that GUI updates can be delegated
# this is required as on some platforms only the main thread can
# access the GUI without crashing. wxMutexGuiEnter/wxMutexGuiLeave
# could be used too, but an event is more elegant.
SerialRxEvent, EVT_SERIALRX = wx.lib.newevent.NewEvent()
SerialTxEvent, EVT_SERIALTX = wx.lib.newevent.NewEvent()
SERIALRX = wx.NewEventType()
SERIALTX = wx.NewEventType()
# ----------------------------------------------------------------------
ID_CLEAR = wx.NewIdRef()
ID_SAVEAS = wx.NewIdRef()
ID_SETTINGS = wx.NewIdRef()
ID_TERM = wx.NewIdRef()
ID_EXIT = wx.NewIdRef()
ID_RTS = wx.NewIdRef()
ID_DTR = wx.NewIdRef()
NEWLINE_CR = 0
NEWLINE_LF = 1
NEWLINE_CRLF = 2
class TerminalSetup:
"""
Placeholder for various terminal settings. Used to pass the
options to the TerminalSettingsDialog.
"""
def __init__(self):
self.echo = False
self.unprintable = False
self.newline = NEWLINE_CRLF
class TerminalSettingsDialog(wx.Dialog):
"""Simple dialog with common terminal settings like echo, newline mode."""
def __init__(self, *args, **kwds):
self.settings = kwds['settings']
del kwds['settings']
# begin wxGlade: TerminalSettingsDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.checkbox_echo = wx.CheckBox(self, -1, "Local Echo")
self.checkbox_unprintable = wx.CheckBox(self, -1, "Show unprintable characters")
self.radio_box_newline = wx.RadioBox(self, -1, "Newline Handling", choices=["CR only", "LF only", "CR+LF"], majorDimension=0, style=wx.RA_SPECIFY_ROWS)
self.sizer_4_staticbox = wx.StaticBox(self, -1, "Input/Output")
self.button_ok = wx.Button(self, wx.ID_OK, "")
self.button_cancel = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
# end wxGlade
self.__attach_events()
self.checkbox_echo.SetValue(self.settings.echo)
self.checkbox_unprintable.SetValue(self.settings.unprintable)
self.radio_box_newline.SetSelection(self.settings.newline)
def __set_properties(self):
# begin wxGlade: TerminalSettingsDialog.__set_properties
self.SetTitle("Terminal Settings")
self.radio_box_newline.SetSelection(0)
self.button_ok.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: TerminalSettingsDialog.__do_layout
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_4_staticbox.Lower()
sizer_4 = wx.StaticBoxSizer(self.sizer_4_staticbox, wx.VERTICAL)
sizer_4.Add(self.checkbox_echo, 0, wx.ALL, 4)
sizer_4.Add(self.checkbox_unprintable, 0, wx.ALL, 4)
sizer_4.Add(self.radio_box_newline, 0, 0, 0)
sizer_2.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_3.Add(self.button_ok, 0, 0, 0)
sizer_3.Add(self.button_cancel, 0, 0, 0)
sizer_2.Add(sizer_3, 0, wx.ALL | wx.ALIGN_RIGHT, 4)
self.SetSizer(sizer_2)
sizer_2.Fit(self)
self.Layout()
# end wxGlade
def __attach_events(self):
self.Bind(wx.EVT_BUTTON, self.OnOK, id=self.button_ok.GetId())
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=self.button_cancel.GetId())
def OnOK(self, events):
"""Update data wil new values and close dialog."""
self.settings.echo = self.checkbox_echo.GetValue()
self.settings.unprintable = self.checkbox_unprintable.GetValue()
self.settings.newline = self.radio_box_newline.GetSelection()
self.EndModal(wx.ID_OK)
def OnCancel(self, events):
"""Do not update data but close dialog."""
self.EndModal(wx.ID_CANCEL)
# end of class TerminalSettingsDialog
class TerminalFrame(wx.Frame):
"""Simple terminal program for wxPython"""
def __init__(self, *args, **kwds):
self.serial = serial.Serial()
self.serial.timeout = 0.5 # make sure that the alive event can be checked from time to time
self.settings = TerminalSetup() # placeholder for the settings
self.rcv_thread = None
self.trns_thread = None
self.rcv_alive = threading.Event()
self.trns_alive = threading.Event()
# begin wxGlade: TerminalFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.frame_terminal_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_CLEAR, "&Clear", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(ID_SAVEAS, "&Save Text As...", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_TERM, "&Terminal Settings...", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_EXIT, "&Exit", "", wx.ITEM_NORMAL)
self.frame_terminal_menubar.Append(wxglade_tmp_menu, "&File")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_RTS, "RTS", "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_DTR, "&DTR", "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_SETTINGS, "&Port Settings...", "", wx.ITEM_NORMAL)
self.frame_terminal_menubar.Append(wxglade_tmp_menu, "Serial Port")
self.SetMenuBar(self.frame_terminal_menubar)
# Menu Bar end
self.text_ctrl_output = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE | wx.TE_READONLY)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.OnClear, id=ID_CLEAR)
self.Bind(wx.EVT_MENU, self.OnSaveAs, id=ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.OnTermSettings, id=ID_TERM)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_EXIT)
self.Bind(wx.EVT_MENU, self.OnRTS, id=ID_RTS)
self.Bind(wx.EVT_MENU, self.OnDTR, id=ID_DTR)
self.Bind(wx.EVT_MENU, self.OnPortSettings, id=ID_SETTINGS)
# end wxGlade
self.__attach_events() # register events
self.OnPortSettings(None) # call setup dialog on startup, opens port
if not self.rcv_alive.isSet():
self.Close()
def StartRcvThread(self):
"""Start the receiver thread"""
self.rcv_thread = threading.Thread(target=self.ComPortRcvThread)
self.rcv_thread.setDaemon(1)
self.rcv_alive.set()
self.rcv_thread.start()
self.serial.rts = True
self.serial.dtr = True
self.frame_terminal_menubar.Check(ID_RTS, self.serial.rts)
self.frame_terminal_menubar.Check(ID_DTR, self.serial.dtr)
def StopRcvThread(self):
"""Stop the receiver thread, wait until it's finished."""
if self.rcv_thread is not None:
self.rcv_alive.clear() # clear alive event for thread
self.rcv_thread.join() # wait until thread has finished
self.rcv_thread = None
def StartTrnsThread(self):
"""Start the receiver thread"""
self.trns_thread = threading.Thread(target=self.ComPortTrnsThread)
self.trns_thread.setDaemon(1)
self.trns_alive.set()
self.trns_thread.start()
def StopTrnsThread(self):
"""Stop the transmission thread, wait until it's finished."""
if self.trns_thread is not None:
self.trns_alive.clear() # clear alive event for thread
self.trns_thread.join() # wait until thread has finished
self.trns_thread = None
def __set_properties(self):
# begin wxGlade: TerminalFrame.__set_properties
self.SetTitle("Serial Terminal")
self.SetSize((546, 383))
self.text_ctrl_output.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: TerminalFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(self.text_ctrl_output, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def __attach_events(self):
# register events at the controls
self.Bind(wx.EVT_MENU, self.OnClear, id=ID_CLEAR)
self.Bind(wx.EVT_MENU, self.OnSaveAs, id=ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_EXIT)
self.Bind(wx.EVT_MENU, self.OnPortSettings, id=ID_SETTINGS)
self.Bind(wx.EVT_MENU, self.OnTermSettings, id=ID_TERM)
self.text_ctrl_output.Bind(wx.EVT_CHAR, self.OnKey)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKey)
self.Bind(EVT_SERIALRX, self.OnSerialRead)
self.Bind(EVT_SERIALTX, self.OnSerialWrite)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnExit(self, event): # wxGlade: TerminalFrame.<event_handler>
"""Menu point Exit"""
self.Close()
def OnClose(self, event):
"""Called on application shutdown."""
self.StopRcvThread() # stop reader thread
self.serial.close() # cleanup
self.Destroy() # close windows, exit app
def OnSaveAs(self, event): # wxGlade: TerminalFrame.<event_handler>
"""Save contents of output window."""
with wx.FileDialog(
None,
"Save Text As...",
".",
"",
"Text File|*.txt|All Files|*",
wx.SAVE) as dlg:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
with codecs.open(filename, 'w', encoding='utf-8') as f:
text = self.text_ctrl_output.GetValue().encode("utf-8")
f.write(text)
def OnClear(self, event): # wxGlade: TerminalFrame.<event_handler>
"""Clear contents of output window."""
self.text_ctrl_output.Clear()
def OnPortSettings(self, event): # wxGlade: TerminalFrame.<event_handler>
"""
Show the port settings dialog. The reader thread is stopped for the
settings change.
"""
if event is not None: # will be none when called on startup
self.StopRcvThread()
self.serial.close()
ok = False
while not ok:
with wxSerialConfigDialog.SerialConfigDialog(
self,
-1,
"",
show=wxSerialConfigDialog.SHOW_BAUDRATE | wxSerialConfigDialog.SHOW_FORMAT | wxSerialConfigDialog.SHOW_FLOW,
serial=self.serial) as dialog_serial_cfg:
dialog_serial_cfg.CenterOnParent()
result = dialog_serial_cfg.ShowModal()
# open port if not called on startup, open it on startup and OK too
if result == wx.ID_OK or event is not None:
try:
self.serial.open()
except serial.SerialException as e:
with wx.MessageDialog(self, str(e), "Serial Port Error", wx.OK | wx.ICON_ERROR)as dlg:
dlg.ShowModal()
else:
self.StartRcvThread()
self.StartTrnsThread()
self.SetTitle("Serial Terminal on {} [{},{},{},{}{}{}]".format(
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits,
' RTS/CTS' if self.serial.rtscts else '',
' Xon/Xoff' if self.serial.xonxoff else '',
))
ok = True
else:
# on startup, dialog aborted
self.rcv_alive.clear()
ok = True
def OnTermSettings(self, event): # wxGlade: TerminalFrame.<event_handler>
"""\
Menu point Terminal Settings. Show the settings dialog
with the current terminal settings.
"""
with TerminalSettingsDialog(self, -1, "", settings=self.settings) as dialog:
dialog.CenterOnParent()
dialog.ShowModal()
def OnKey(self, event):
"""\
Key event handler. If the key is in the ASCII range, write it to the
serial port. Newline handling and local echo is also done here.
"""
code = event.GetUnicodeKey()
# if code < 256: # XXX bug in some versions of wx returning only capital letters
# code = event.GetKeyCode()
if code == 13: # is it a newline? (check for CR which is the RETURN key)
if self.settings.echo: # do echo if needed
self.text_ctrl_output.AppendText('\n')
if self.settings.newline == NEWLINE_CR:
self.serial.write(b'\r') # send CR
elif self.settings.newline == NEWLINE_LF:
self.serial.write(b'\n') # send LF
elif self.settings.newline == NEWLINE_CRLF:
self.serial.write(b'\r\n') # send CR+LF
else:
char = unichr(code)
if self.settings.echo: # do echo if needed
self.WriteText(char)
self.serial.write(char.encode('UTF-8', 'replace')) # send the character
event.StopPropagation()
def WriteText(self, text):
if self.settings.unprintable:
text = ''.join([c if (c >= ' ' and c != '\x7f') else unichr(0x2400 + ord(c)) for c in text])
self.text_ctrl_output.AppendText(text)
def OnSerialRead(self, event):
"""Handle input from the serial port."""
self.WriteText("<<" + event.data.decode('UTF-8', 'replace'))
def OnSerialWrite(self, event):
"""Handle output to serial port."""
self.WriteText(">>" + str(event.data) + "\r\n")
def ComPortRcvThread(self):
"""\
Thread that handles the incoming traffic. Does the basic input
transformation (newlines) and generates an SerialRxEvent
"""
while self.rcv_alive.isSet():
b = self.serial.read(self.serial.in_waiting or 1)
if b:
# newline transformation
if self.settings.newline == NEWLINE_CR:
b = b.replace(b'\r', b'\n')
elif self.settings.newline == NEWLINE_LF:
pass
elif self.settings.newline == NEWLINE_CRLF:
b = b.replace(b'\r\n', b'\n')
wx.PostEvent(self, SerialRxEvent(data=b))
def GetTime(self):
now = datetime.now() # current date and time
time = now.strftime("%H:%M:%S")
return time
def GetHour(self):
now = datetime.now() # current date and time
hour = now.strftime("%H")
return int(hour)
def GetMin(self):
now = datetime.now() # current date and time
min = now.strftime("%M")
return int(min)
def GetSec(self):
now = datetime.now() # current date and time
sec = now.strftime("%S")
return int(sec)
def ComPortTrnsThread(self):
"""\
Thread that handles the outgoing traffic
"""
i = 0;
while self.trns_alive.isSet():
#trans = 'a';
val = i % 3
if val is 0:
trans = self.GetHour()
elif val is 1:
trans = self.GetMin()
elif val is 2:
trans = self.GetSec()
i=i+1
#b = self.serial.write(bytes(trans, 'utf-8'))
b = self.serial.write(trans.to_bytes(1, byteorder='big'))
if b > 0:
wx.PostEvent(self, SerialTxEvent(data=trans))
time.sleep(1);
def OnRTS(self, event): # wxGlade: TerminalFrame.<event_handler>
self.serial.rts = event.IsChecked()
def OnDTR(self, event): # wxGlade: TerminalFrame.<event_handler>
self.serial.dtr = event.IsChecked()
# end of class TerminalFrame
class MyApp(wx.App):
def OnInit(self):
frame_terminal = TerminalFrame(None, -1, "")
self.SetTopWindow(frame_terminal)
frame_terminal.Show(True)
return 1
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
config.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
A Python module to maintain unique, run-wide *nibabies* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<output_dir>/sub-<participant_id>/log/<run_unique_id>/nibabies.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~nibabies.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../nibabies/data/tests/config.toml
:language: toml
:name: nibabies.toml
:caption: **Example file representation of nibabies settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~nibabies.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from nibabies import config
config_file = config.execution.work_dir / '.nibabies.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
import sys
from multiprocessing import set_start_method
# Disable NiPype etelemetry always
_disable_et = bool(
os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None
)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
CONFIG_FILENAME = "nibabies.toml"
try:
set_start_method("forkserver")
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import random
from uuid import uuid4
from time import strftime
from pathlib import Path
from nipype import __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
(
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("NIBABIES_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
)
):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("NIBABIES_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
# https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
else:
import logging
logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
# Ping NiPype eTelemetry once if env var was not set
# workers on the pool will have the env variable set from the master process
if not _disable_et:
# Just get so analytics track one hit
from contextlib import suppress
from requests import get as _get_url, ConnectionError, ReadTimeout
with suppress((ConnectionError, ReadTimeout)):
_get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05)
# Execution environment
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv("IS_DOCKER_8395080871"):
_exec_env = "singularity"
_cgroup = Path("/proc/1/cgroup")
if _cgroup.exists() and "docker" in _cgroup.read_text():
_docker_ver = os.getenv("DOCKER_VERSION_8395080871")
_exec_env = "nibabies-docker" if _docker_ver else "docker"
del _cgroup
_fs_license = os.getenv("FS_LICENSE")
if not _fs_license and os.getenv("FREESURFER_HOME"):
_fs_home = os.getenv("FREESURFER_HOME")
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(
os.getenv(
"TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow")
)
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024 ** 3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = "n/a"
_oc_policy = "n/a"
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path("/proc/sys/vm/overcommit_memory")
if _proc_oc_path.exists():
_oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get(
_proc_oc_path.read_text().strip(), "unknown"
)
if _oc_policy != "never":
_proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes")
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if (
_oc_limit in ("0", "n/a")
and Path("/proc/sys/vm/overcommit_ratio").exists()
):
_oc_limit = "{}%".format(
Path("/proc/sys/vm/overcommit_ratio").read_text().strip()
)
except Exception:
pass
# Debug modes are names that influence the exposure of internal details to
# the user, either through additional derivatives or increased verbosity
DEBUG_MODES = ("compcor", "registration", "fieldmaps")
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError("Configuration type is not instantiable.")
@classmethod
def load(cls, settings, init=True, ignore=None):
"""Store settings from a dictionary."""
ignore = ignore or {}
for k, v in settings.items():
if k in ignore or v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
elif hasattr(cls, k):
setattr(cls, k, v)
if init and hasattr(cls, 'init'):
cls.init()
@classmethod
def get(cls):
"""Return defined settings."""
from niworkflows.utils.spaces import SpatialReferences, Reference
out = {}
for k, v in cls.__dict__.items():
if k.startswith("_") or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *nibabies* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*NiBabies*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = "txt"
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = "MultiProc"
"""NiPype's execution plugin."""
plugin_args = {
"maxtasksperchild": 1,
"raise_insufficient": False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
"plugin": cls.plugin,
"plugin_args": cls.plugin_args,
}
if cls.plugin in ("MultiProc", "LegacyMultiProc"):
out["plugin_args"]["n_procs"] = int(cls.nprocs)
if cls.memory_gb:
out["plugin_args"]["memory_gb"] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config(
{
"monitoring": {
"enabled": cls.resource_monitor,
"sample_frequency": "0.5",
"summary_append": True,
}
}
)
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config(
{
"execution": {
"crashdump_dir": str(execution.log_dir),
"crashfile_format": cls.crashfile_format,
"get_linked_libs": cls.get_linked_libs,
"stop_on_first_crash": cls.stop_on_first_crash,
"check_version": False, # disable future telemetry
}
}
)
if cls.omp_nthreads is None:
cls.omp_nthreads = min(
cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8
)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_database_dir = None
"""Path to the directory containing SQLite database indices for the input BIDS dataset."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
sloppy = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
debug = []
"""Debug mode(s)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
nibabies_dir = None
"""Root of NiBabies BIDS Derivatives dataset. Depends on output_layout."""
notrack = False
"""Do not monitor *nibabies* using Sentry.io."""
output_dir = None
"""Folder where derivatives will be stored."""
output_layout = None
"""Layout of derivatives within output_dir."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}"
"""Unique identifier of this particular run."""
segmentation_atlases_dir = None
"""Directory with atlases to use for JLF segmentations"""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path("work").absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
"anat_derivatives",
"bids_dir",
"bids_database_dir",
"fs_license_file",
"fs_subjects_dir",
"layout",
"log_dir",
"nibabies_dir",
"output_dir",
"segmentation_atlases_dir",
"templateflow_home",
"work_dir",
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout
_db_path = cls.bids_database_dir or (
cls.work_dir / cls.run_uuid / "bids_db"
)
_db_path.mkdir(exist_ok=True, parents=True)
cls._layout = BIDSLayout(
str(cls.bids_dir),
validate=False,
database_path=_db_path,
reset_database=cls.bids_database_dir is None,
ignore=(
"code",
"stimuli",
"sourcedata",
"models",
re.compile(r"^\."),
),
)
cls.bids_database_dir = _db_path
cls.layout = cls._layout
if cls.bids_filters:
from bids.layout import Query
# unserialize pybids Query enum values
for acq, filters in cls.bids_filters.items():
cls.bids_filters[acq] = {
k: getattr(Query, v[7:-4])
if not isinstance(v, Query) and "Query" in v
else v
for k, v in filters.items()
}
if "all" in cls.debug:
cls.debug = list(DEBUG_MODES)
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
age_months = None
"""Age (in months)"""
anat_only = False
"""Execute the anatomical preprocessing only."""
aroma_err_on_warn = None
"""Cast AROMA warnings to errors."""
aroma_melodic_dim = None
"""Number of ICA components to be estimated by MELODIC
(positive = exact, negative = maximum)."""
bold2t1w_dof = None
"""Degrees of freedom of the BOLD-to-T1w registration steps."""
bold2t1w_init = "register"
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
BOLD image-header ('header')."""
cifti_output = None
"""Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``."""
dummy_scans = None
"""Set a number of initial scans to be considered nonsteady states."""
fd_radius = 45
"""Head radius in mm for framewise displacement calculation"""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run FreeSurfer ``recon-all`` with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *nibabies*."""
longitudinal = False
"""Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag."""
medial_surface_nan = None
"""Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling."""
regressors_all_comps = None
"""Return all CompCor components."""
regressors_dvars_th = None
"""Threshold for DVARS."""
regressors_fd_th = None
"""Threshold for :abbr:`FD (frame-wise displacement)`."""
run_reconall = True
"""Run FreeSurfer's surface reconstruction."""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "UNCInfant:cohort-1"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*nibabies* will run brain extraction of the T1w)."""
spaces = None
"""Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_aroma = None
"""Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`."""
use_bbr = False
"""Run boundary-based registration for BOLD-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger("cli")
"""Command-line interface logging."""
workflow = logging.getLogger("nipype.workflow")
"""NiPype's workflow logger."""
interface = logging.getLogger("nipype.interface")
"""NiPype's interface logger."""
utils = logging.getLogger("nipype.utils")
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config(
{"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}}
)
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
_random_seed = None
master = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
@classmethod
def init(cls):
if cls._random_seed is not None:
cls.master = cls._random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ["ANTS_RANDOM_SEED"] = str(val)
return val
def from_dict(settings):
"""Read settings from a flat dictionary."""
nipype.load(settings)
execution.load(settings)
workflow.load(settings)
seeds.load(settings)
loggers.init()
def load(filename, skip=None):
"""Load settings from file."""
from toml import loads
skip = skip or {}
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != "environment":
section = getattr(sys.modules[__name__], sectionname)
ignore = skip.get(sectionname)
section.load(configs, ignore=ignore)
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
"environment": environment.get(),
"execution": execution.get(),
"workflow": workflow.get(),
"nipype": nipype.get(),
"seeds": seeds.get(),
}
if not flat:
return settings
return {
".".join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()
}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(" ") for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
if workflow.age_months is not None:
from .utils.misc import cohort_by_months
# cohort workaround
if any(
"MNIInfant" in space.split(":")[0]
for space in spaces.get_spaces(nonstandard=False, dim=(3,))
):
cohort = cohort_by_months("MNIInfant", workflow.age_months)
spaces.add(Reference("MNIInfant", {"cohort": cohort}))
# Ensure user-defined spatial references for outputs are correctly parsed.
# Certain options require normalization to a space not explicitly defined by users.
# These spaces will not be included in the final outputs.
if workflow.use_aroma:
# Make sure there's a normalization to FSL for AROMA to use.
spaces.add(Reference("MNI152NLin6Asym", {"res": "2"}))
if workflow.cifti_output:
# CIFTI grayordinates to corresponding FSL-MNI resolutions.
vol_res = "2" if workflow.cifti_output == "91k" else "1"
spaces.add(Reference("fsaverage", {"den": "164k"}))
spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res}))
# Make the SpatialReferences object available
workflow.spaces = spaces
|
scanner1.py
|
import socket
import sys
import threading
import queue
import time
from datetime import datetime
print("""
*************************************************************
* ____ ____ ____ ___ ____ ____ ____ _ _ _ _ ____ ____ *
* |__] | | |__/ | [__ | |__| |\ | |\ | |___ |__/ *
* | |__| | \ | ___] |___ | | | \| | \| |___ | \ *
* *
*************************************************************""")
print("Made By: MOHAMMED FAHAD MUSHAHID")
#Defining Dictionary of common ports
common_ports = {
"21": "FTP",
"22": "SSH",
"23": "Telnet",
"25": "SMTP",
"53": "DNS",
"67":"DHCP",
"68":"DHCP",
"69":"TFTP",
"80": "HTTP",
"110":"POPv3",
"123":"NTP",
"143":"IMAP",
"194": "IRC",
"389":"LDAP",
"443": "HTTPS",
"3306": "MySQL",
"25565": "Minecraft"
}
#returns the value of host , start port and end port
#printing basic info about the scans
print("\n[*]Host: {} IP: {} ".format(sys.argv[1], socket.gethostbyname(sys.argv[1])))
def get_scan_args():
if len(sys.argv) == 2:
print("\n[*]Starting Port: {} Ending Port: {}".format(0, 1024))
return (sys.argv[1], 0, 1024)
elif len(sys.argv) == 3:
print("\n[*]Starting Port: {} Ending Port: {}".format(sys.argv[2], 1024))
return (sys.argv[1], int(sys.argv[2], 1024))
elif len(sys.argv) == 4:
print("\n[*]Starting Port: {} Ending Port: {}".format(sys.argv[2], sys.argv[3]))
return (sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
def is_port_open(host, port): #Return boolean
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
sock.connect((host, port))
except socket.error:
return False
return True
def scanner_worker_thread(host):
while True:
port = port_queue.get()
if is_port_open(host, port):
if str(port) in common_ports:
print("{}({}) is OPEN!".format(str(port), common_ports[str(port)]))
else:
print("{} is OPEN!".format(port))
port_queue.task_done()
scan_args = get_scan_args()
port_queue = queue.Queue()
for _ in range(20):
t = threading.Thread(target=scanner_worker_thread, kwargs={"host": scan_args[0]})
t.daemon = True
t.start()
start_time = time.time()
for port in range(scan_args[1], scan_args[2]):
port_queue.put(port)
port_queue.join()
end_time = time.time()
print("Done! Scanning took {:.3f} seconds.".format(end_time - start_time))
|
main .py
|
import threading
import tkinter as tk
from tkinter import messagebox
import time
import queue
def running(queue):
for x in range(5):
text = 'message ' + str(x)
print('PUT:', text)
queue.put(text)
time.sleep(4)
queue.put('last')
def check_queue():
global t
text = ''
if not queue.empty():
text = queue.get()
print('get:', text)
l['text'] = text
else:
print('get: - empty -')
if text == 'last':
t = None
else:
root.after(500, check_queue)
def on_click():
global t
if not t:
t = threading.Thread(target=running, args=(queue,))
t.start()
check_queue()
else:
messagebox.showinfo('INFO', 'Process still running')
# --- main ---
t = None
queue = queue.Queue()
root = tk.Tk()
l = tk.Label(root, text='', width=15, height=2)
l.pack()
b = tk.Button(root, text='Start', command=on_click, width=15, height=2)
b.pack()
root.mainloop()
|
setup_window.py
|
#
# Copyright (C) 2016 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import sys
import glob
import time
import threading
import copy
from .widgets import show_error, get_monospace_font
from PyQt5.QtWidgets import QComboBox, QCompleter, QDialog, QDirModel, QFileDialog, QGroupBox, QHBoxLayout, QLabel, \
QLineEdit, QPushButton, QSpinBox, QVBoxLayout, QGridLayout
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QIntValidator
from logging import getLogger
from collections import OrderedDict
from itertools import count
STANDARD_BAUD_RATES = 9600, 115200, 460800, 921600, 1000000, 3000000
DEFAULT_BAUD_RATE = 115200
assert DEFAULT_BAUD_RATE in STANDARD_BAUD_RATES
RUNNING_ON_LINUX = 'linux' in sys.platform.lower()
logger = getLogger(__name__)
def _linux_parse_proc_net_dev(out_ifaces):
with open('/proc/net/dev') as f:
for line in f:
if ':' in line:
name = line.split(':')[0].strip()
out_ifaces.insert(0 if 'can' in name else len(out_ifaces), name)
return out_ifaces
def _linux_parse_ip_link_show(out_ifaces):
import re
import subprocess
import tempfile
with tempfile.TemporaryFile() as f:
proc = subprocess.Popen('ip link show', shell=True, stdout=f)
if 0 != proc.wait(10):
raise RuntimeError('Process failed')
f.seek(0)
out = f.read().decode()
return re.findall(r'\d+?: ([a-z0-9]+?): <[^>]*UP[^>]*>.*\n *link/can', out) + out_ifaces
def list_ifaces():
"""Returns dictionary, where key is description, value is the OS assigned name of the port"""
logger.debug('Updating iface list...')
if RUNNING_ON_LINUX:
# Linux system
ifaces = glob.glob('/dev/serial/by-id/*')
try:
ifaces = list(sorted(ifaces,
key=lambda s: not ('zubax' in s.lower() and 'babel' in s.lower())))
except Exception:
logger.warning('Sorting failed', exc_info=True)
# noinspection PyBroadException
try:
ifaces = _linux_parse_ip_link_show(ifaces) # Primary
except Exception as ex:
logger.warning('Could not parse "ip link show": %s', ex, exc_info=True)
ifaces = _linux_parse_proc_net_dev(ifaces) # Fallback
out = OrderedDict()
for x in ifaces:
out[x] = x
return out
else:
# Windows, Mac, whatever
from PyQt5 import QtSerialPort
out = OrderedDict()
for port in QtSerialPort.QSerialPortInfo.availablePorts():
out[port.description()] = port.systemLocation()
return out
class BackgroundIfaceListUpdater:
UPDATE_INTERVAL = 0.5
def __init__(self):
self._ifaces = list_ifaces()
self._thread = threading.Thread(target=self._run, name='iface_lister', daemon=True)
self._keep_going = True
self._lock = threading.Lock()
def __enter__(self):
logger.debug('Starting iface list updater')
self._thread.start()
return self
def __exit__(self, *_):
logger.debug('Stopping iface list updater...')
self._keep_going = False
self._thread.join()
logger.debug('Stopped iface list updater')
def _run(self):
while self._keep_going:
time.sleep(self.UPDATE_INTERVAL)
new_list = list_ifaces()
with self._lock:
self._ifaces = new_list
def get_list(self):
with self._lock:
return copy.copy(self._ifaces)
class DirectorySelectionWidget(QGroupBox):
def __init__(self, parent, dsdl_path=None):
super(DirectorySelectionWidget, self).__init__('Location of custom DSDL definitions [optional]', parent)
self._dir_selection = dsdl_path
dir_textbox = QLineEdit(self)
dir_textbox.setText(self._dir_selection)
dir_text_completer = QCompleter(self)
dir_text_completer.setCaseSensitivity(Qt.CaseSensitive)
dir_text_completer.setModel(QDirModel(self))
dir_textbox.setCompleter(dir_text_completer)
def on_edit():
self._dir_selection = str(dir_textbox.text())
dir_textbox.textChanged.connect(on_edit)
dir_browser = QPushButton('Browse', self)
def on_browse():
self._dir_selection = str(QFileDialog.getExistingDirectory(self, 'Select Directory'))
dir_textbox.setText(self._dir_selection)
dir_browser.clicked.connect(on_browse)
layout = QHBoxLayout(self)
layout.addWidget(dir_textbox)
layout.addWidget(dir_browser)
self.setLayout(layout)
def get_selection(self):
return self._dir_selection
def run_setup_window(icon, dsdl_path=None):
win = QDialog()
win.setWindowTitle('Application Setup')
win.setWindowIcon(icon)
win.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowCloseButtonHint)
win.setAttribute(Qt.WA_DeleteOnClose) # This is required to stop background timers!
combo = QComboBox(win)
combo.setEditable(True)
combo.setInsertPolicy(QComboBox.NoInsert)
combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)
combo.setFont(get_monospace_font())
combo_completer = QCompleter()
combo_completer.setCaseSensitivity(Qt.CaseSensitive)
combo_completer.setModel(combo.model())
combo.setCompleter(combo_completer)
bitrate = QSpinBox(win)
bitrate.setMaximum(1000000)
bitrate.setMinimum(10000)
bitrate.setValue(1000000)
baudrate = QComboBox(win)
baudrate.setEditable(True)
baudrate.setInsertPolicy(QComboBox.NoInsert)
baudrate.setSizeAdjustPolicy(QComboBox.AdjustToContents)
baudrate.setFont(get_monospace_font())
baudrate_completer = QCompleter(win)
baudrate_completer.setModel(baudrate.model())
baudrate.setCompleter(baudrate_completer)
baudrate.setValidator(QIntValidator(min(STANDARD_BAUD_RATES), max(STANDARD_BAUD_RATES)))
baudrate.insertItems(0, map(str, STANDARD_BAUD_RATES))
baudrate.setCurrentText(str(DEFAULT_BAUD_RATE))
dir_selection = DirectorySelectionWidget(win, dsdl_path)
ok = QPushButton('OK', win)
def update_slcan_options_visibility():
if RUNNING_ON_LINUX:
slcan_active = '/' in combo.currentText()
else:
slcan_active = True
slcan_group.setEnabled(slcan_active)
combo.currentTextChanged.connect(update_slcan_options_visibility)
ifaces = None
def update_iface_list():
nonlocal ifaces
ifaces = iface_lister.get_list()
known_keys = set()
remove_indices = []
was_empty = combo.count() == 0
# Marking known and scheduling for removal
for idx in count():
tx = combo.itemText(idx)
if not tx:
break
known_keys.add(tx)
if tx not in ifaces:
logger.debug('Removing iface %r', tx)
remove_indices.append(idx)
# Removing - starting from the last item in order to retain indexes
for idx in remove_indices[::-1]:
combo.removeItem(idx)
# Adding new items - starting from the last item in order to retain the final order
for key in list(ifaces.keys())[::-1]:
if key not in known_keys:
logger.debug('Adding iface %r', key)
combo.insertItem(0, key)
# Updating selection
if was_empty:
combo.setCurrentIndex(0)
result = None
kwargs = {}
def on_ok():
nonlocal result, kwargs
try:
baud_rate_value = int(baudrate.currentText())
except ValueError:
show_error('Invalid parameters', 'Could not parse baud rate', 'Please specify correct baud rate',
parent=win)
return
if not (min(STANDARD_BAUD_RATES) <= baud_rate_value <= max(STANDARD_BAUD_RATES)):
show_error('Invalid parameters', 'Baud rate is out of range',
'Baud rate value should be within [%s, %s]' %
(min(STANDARD_BAUD_RATES), max(STANDARD_BAUD_RATES)),
parent=win)
return
kwargs['baudrate'] = baud_rate_value
kwargs['bitrate'] = int(bitrate.value())
result_key = str(combo.currentText()).strip()
if not result_key:
show_error('Invalid parameters', 'Interface name cannot be empty', 'Please select a valid interface',
parent=win)
return
try:
result = ifaces[result_key]
except KeyError:
result = result_key
win.close()
ok.clicked.connect(on_ok)
can_group = QGroupBox('CAN interface setup', win)
can_layout = QVBoxLayout()
can_layout.addWidget(QLabel('Select CAN interface'))
can_layout.addWidget(combo)
slcan_group = QGroupBox('SLCAN adapter settings', win)
slcan_layout = QGridLayout()
slcan_layout.addWidget(QLabel('CAN bus bit rate:'), 0, 0)
slcan_layout.addWidget(bitrate, 0, 1)
slcan_layout.addWidget(QLabel('Adapter baud rate (not applicable to USB-CAN adapters):'), 1, 0)
slcan_layout.addWidget(baudrate, 1, 1)
slcan_group.setLayout(slcan_layout)
can_layout.addWidget(slcan_group)
can_group.setLayout(can_layout)
layout = QVBoxLayout()
layout.addWidget(can_group)
layout.addWidget(dir_selection)
layout.addWidget(ok)
layout.setSizeConstraint(layout.SetFixedSize)
win.setLayout(layout)
with BackgroundIfaceListUpdater() as iface_lister:
update_slcan_options_visibility()
update_iface_list()
timer = QTimer(win)
timer.setSingleShot(False)
timer.timeout.connect(update_iface_list)
timer.start(int(BackgroundIfaceListUpdater.UPDATE_INTERVAL / 2 * 1000))
win.exec()
return result, kwargs, dir_selection.get_selection()
|
test_cuda.py
|
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
BlinkCount.py
|
import threading as thread;
import time;
import Project.OpenCV.mysql as db;
def StartThreading():
global second, flag;
flag = True;
second =0;
t = thread.Thread(target=Timer)
t.start();
def Timer ():
global second ,flag;
check5min, sum, avg = 0, 0, 0.0;
while True :
while flag :
if second < 60 and (check5min % 5 != 0 or check5min == 0): # 1분
second += 1;
time.sleep(1);
else :
#db에 저장
if check5min > 4 :
avg = sum / 5;
print("평균 눈깜빡임 : ", avg)
#현재 날짜와 시간
now = time.localtime()
nowdate = "%04d-%02d-%02d" % (now.tm_year, now.tm_mon, now.tm_mday);
nowtime = "%02d:%02d:%02d" % (now.tm_hour, now.tm_min, now.tm_sec);
# DB에 저장
db.init()
db.insert_data(getName() ,nowdate ,nowtime ,avg); # def insert_data(id_, date_, time_, count_):
sum = 0;
check5min = -1;
setCount(0);
sum += getCount();
print("sum : ", sum)
check5min += 1;
second = 0;
setCount(0);
def Pause() :
global flag;
flag = False;
def ReStart() :
global flag;
flag = True;
def getSecond() :
global second;
return second;
def setCount(cnt) :
global count;
count = cnt;
def getCount():
global count;
print("getcount : " , count);
return count;
def setName(n) :
global name;
name = n;
def getName() :
global name;
return name;
|
server.py
|
import select
import socket
import threading
import copy
import uuid
from ceptic.network import SocketCeptic
from ceptic.common import CepticRequest, CepticResponse, CepticStatusCode
from ceptic.common import command_settings
from ceptic.endpointmanager import EndpointManager, EndpointManagerException
from ceptic.certificatemanager import CertificateManager, CertificateManagerException, create_ssl_config
from ceptic.streammanager import StreamManager, StreamException, StreamTotalDataSizeException
from ceptic.encode import EncodeGetter, UnknownEncodingException
def server_settings(port=9000, version="1.0.0",
headers_min_size=1024000, headers_max_size=1024000,
frame_min_size=1024000, frame_max_size=1024000,
body_max=102400000,
stream_min_timeout=5, stream_timeout=5,
send_buffer_size=102400000, read_buffer_size=102400000,
handler_max_count=0, block_on_start=False,
request_queue_size=10, verbose=False):
settings = {
"port": int(port),
"version": str(version),
"headers_min_size": int(headers_min_size),
"headers_max_size": int(headers_max_size),
"frame_min_size": int(frame_min_size),
"frame_max_size": int(frame_max_size),
"body_max": int(body_max),
"stream_min_timeout": int(stream_min_timeout),
"stream_timeout": int(stream_timeout),
"send_buffer_size": int(send_buffer_size),
"read_buffer_size": int(read_buffer_size),
"handler_max_count": int(handler_max_count),
"block_on_start": bool(block_on_start),
"request_queue_size": int(request_queue_size),
"verbose": bool(verbose)
}
if settings["frame_min_size"] > settings["frame_max_size"]:
settings["frame_min_size"] = settings["frame_max_size"]
if settings["frame_min_size"] < 1000:
raise ValueError("frame_min_size must be at least 1000; was {}".format(settings["frame_min_size"]))
if settings["send_buffer_size"] < settings["frame_max_size"] + 38 or \
settings["read_buffer_size"] < settings["frame_max_size"] + 38:
raise ValueError("send and read buffer size must be greater than "
"frame_max_size+38 ({}); were {} and {}".format(settings["frame_max_size"] + 38,
settings["send_buffer_size"],
settings["read_buffer_size"]))
return settings
def begin_exchange(request):
"""
Sends CepticResponse to client to start continuous exchange with server
:param request: CepticRequest instance
:return: StreamHandler instance from CepticRequest (request.stream)
"""
response = CepticResponse(status=200)
response.exchange = True
request.stream.send_response(response)
return request.stream
def basic_server_command(stream, request, endpoint_func, endpoint_dict):
# get body if content length header is present
if request.content_length:
# TODO: Add file transfer functionality
try:
request.body = stream.get_full_data(max_length=request.content_length)
except StreamTotalDataSizeException:
stream.send_close("body received is greater than reported content_length")
return
except StreamException:
return
# set request stream to local stream
request.stream = stream
# perform command function with appropriate params
try:
response = endpoint_func(request, **endpoint_dict)
except Exception as e:
stream.send_close("ENDPOINT_FUNC caused Exception {},{}".format(type(e), str(e)))
return
# if CepticResponse not returned, try to parse as tuple and create CepticResponse
if not isinstance(response, CepticResponse):
try:
if not isinstance(response, int):
response_tuple = tuple(response)
else:
response_tuple = (response,)
status = int(response_tuple[0])
body = None
headers = None
errors = None
# if error status, assume error message included
if len(response_tuple) > 1:
if CepticStatusCode.is_error(status):
errors = str(response_tuple[1])
else:
body = str(response_tuple[1])
# assume third item is headers
if len(response_tuple) > 2:
if not isinstance(response_tuple[2], dict):
raise ValueError("3rd argument must be type dict")
headers = response_tuple[2]
response = CepticResponse(status, body, headers, errors)
except Exception as e:
error_response = CepticResponse(500,
errors="endpoint returned invalid data type '{}'' on server".format(
type(response)))
if request.config_settings["verbose"]:
print("Exception type ({}) raised while generating response: {}".format(type(e), str(e)))
stream.send_response(error_response)
return
stream.send_response(response)
# if Content-Length header present, send response body
if response.content_length:
# TODO: Add file transfer functionality
try:
stream.send_data(response.body)
except StreamException as e:
stream.send_close("SERVER STREAM EXCEPTION: {},{}".format(type(e), str(e)))
if request.config_settings["verbose"]:
print("StreamException type ({}) raised while sending response body: {}".format(type(e), str(e)))
# close connection
stream.send_close("BASIC_SERVER_COMMAND COMPLETE")
def check_if_setting_bounded(client_min, client_max, server_min, server_max, name):
error = None
value = None
if client_max <= server_max:
if client_max < server_min:
error = "client max {0} ({1}) is less than server's min {0} ({2})".format(
name, client_max, server_min)
else:
value = client_max
else:
# since client max is greater than server max, check if server max is appropriate
if client_min > server_max:
# client min greater than server max, so not compatible
error = "client min {0} ({1}) is greater than server's max {0} ({2})".format(
name, client_min, server_max)
# otherwise use server version
else:
value = server_max
return error, value
class CepticServer(object):
def __init__(self, settings=None, certfile=None, keyfile=None, cafile=None, secure=True):
"""
General purpose Ceptic server
:param settings: dict generated by server_settings
:param certfile: server public key file (certificate)
:param keyfile: server private key file
:param cafile: optional client certificate for client verification
:param secure: toggles if certificates/encryption should be used (default: True); recommended to be kept True
except for specific development purposes
"""
if not settings:
self.settings = server_settings()
else:
self.settings = settings
self.shouldStop = threading.Event()
self.isDoneRunning = threading.Event()
# set up endpoint manager
self.endpointManager = EndpointManager.server()
# set up certificate manager
self.certificateManager = CertificateManager.server()
self.setup_certificate_manager(certfile, keyfile, cafile, secure)
# create StreamManager dict
self.managerDict = {}
self.manager_closed_event = threading.Event()
self.clean_timeout = 0.5
# initialize
self.initialize()
def setup_certificate_manager(self, certfile=None, keyfile=None, cafile=None, secure=True):
if certfile is None or keyfile is None:
secure = False
ssl_config = create_ssl_config(certfile=certfile, keyfile=keyfile, cafile=cafile, secure=secure)
self.certificateManager.set_ssl_config(ssl_config)
def initialize(self):
"""
Initialize server configuration and processes
:return: None
"""
# set up config
self.certificateManager.generate_context_tls()
# add get command
self.add_command("get")
# add post command
self.add_command("post")
# add update command
self.add_command("update")
# add delete command
self.add_command("delete")
def add_command(self, command):
"""
Add command name to endpoint manager; registers as a basic_client_command
:param command: string command name
:return: None
"""
self.endpointManager.add_command(
str(command),
basic_server_command,
command_settings(body_max=self.settings["body_max"])
)
def start(self):
"""
Start running server
:return: None
"""
# run processes
try:
self.start_server()
except Exception as e:
self.stop()
raise e
def start_server(self):
if self.settings["block_on_start"]:
self.run_server()
else:
server_thread = threading.Thread(target=self.run_server)
server_thread.daemon = True
server_thread.start()
def run_server(self, delay_time=0.1):
"""
Start server loop, with the option to run a function repeatedly and set delay time in seconds
:param delay_time: time to wait for a connection before repeating, default is 0.1 seconds
:return: None
"""
if self.settings["verbose"]:
print('ceptic server started - version {} on port {}'.format(
self.settings["version"], self.settings["port"]))
# create a socket object
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
server_socket.settimeout(5)
socket_list = []
# get local machine name
host = ""
port = self.settings["port"]
# bind to the port
try:
server_socket.bind((host, port))
except Exception as e:
if self.settings["verbose"]:
print("Error while binding server_socket: {}".format(str(e)))
self.shouldStop.set()
# queue up to specified number of requests
server_socket.listen(self.settings["request_queue_size"])
socket_list.append(server_socket)
# start clean thread
clean_thread = threading.Thread(target=self.clean_managers)
clean_thread.daemon = True
clean_thread.start()
while not self.shouldStop.is_set():
ready_to_read, ready_to_write, in_error = select.select(socket_list, [], [], delay_time)
for sock in ready_to_read:
# establish a connection
if sock == server_socket:
s, addr = server_socket.accept()
# enable socket blocking
s.setblocking(True)
# s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
new_thread = threading.Thread(target=self.handle_new_socket, args=(s, addr))
new_thread.daemon = True
new_thread.start()
# shut down managers
self.close_all_managers()
# wait for clean thread to finish
clean_thread.join()
# shut down server socket
try:
server_socket.shutdown(socket.SHUT_RDWR)
except IOError as e:
if self.settings["verbose"]:
print("Error while shutting down server_socket: {}".format(str(e)))
server_socket.close()
self.isDoneRunning.set()
def handle_new_socket(self, s, addr):
"""
Handles a particular request, to be executed by another thread of process to not block main server loop
:param s: basic socket instance
:param addr: socket address
:return: None
"""
if self.settings["verbose"]:
print("Got a connection from {}".format(addr))
# wrap socket with TLS, handshaking happens automatically
try:
s = self.certificateManager.wrap_socket(s)
except CertificateManagerException as e:
if self.settings["verbose"]:
print("CertificateManagerException caught, connection terminated: {}".format(str(e)))
s.close()
return
# wrap socket with SocketCeptic, to send length of message first
s = SocketCeptic.wrap_socket(s)
# get version
client_version = s.recv_raw(16).strip()
# get client frame_min_size
client_frame_min_size_str = s.recv_raw(16).strip()
# get client frame_max_size
client_frame_max_size_str = s.recv_raw(16).strip()
# get client headers_min_size
client_headers_min_size_str = s.recv_raw(16).strip()
# get client headers_max_size
client_headers_max_size_str = s.recv_raw(16).strip()
# get client stream_min_timeout
client_stream_min_timeout_str = s.recv_raw(4).strip()
# get client stream timeout
client_stream_timeout_str = s.recv_raw(4).strip()
# see if values are acceptable
stream_settings = {"verbose": self.settings["verbose"],
"send_buffer_size": self.settings["send_buffer_size"],
"read_buffer_size": self.settings["read_buffer_size"]}
errors = []
# convert received values to int
client_frame_min_size = None
client_frame_max_size = None
client_headers_min_size = None
client_headers_max_size = None
client_stream_min_timeout = None
client_stream_timeout = None
try:
client_frame_min_size = int(client_frame_min_size_str)
client_frame_max_size = int(client_frame_max_size_str)
client_headers_min_size = int(client_headers_min_size_str)
client_headers_max_size = int(client_headers_max_size_str)
client_stream_min_timeout = int(client_stream_min_timeout_str)
client_stream_timeout = int(client_stream_timeout_str)
except ValueError:
errors.append("received value must be an int, not string")
if not errors:
# check if server's frame size is acceptable
error, value = check_if_setting_bounded(client_frame_min_size, client_frame_max_size,
self.settings["frame_min_size"], self.settings["frame_max_size"],
"frame size")
if error:
errors.append(error)
else:
stream_settings["frame_max_size"] = value
# check if server's header size is acceptable
error, value = check_if_setting_bounded(client_headers_min_size, client_headers_max_size,
self.settings["headers_min_size"],
self.settings["headers_max_size"],
"headers size")
if error:
errors.append(error)
else:
stream_settings["headers_max_size"] = value
# check if server's timeout is acceptable
error, value = check_if_setting_bounded(client_stream_min_timeout, client_stream_timeout,
self.settings["stream_min_timeout"],
self.settings["stream_timeout"],
"stream timeout")
if error:
errors.append(error)
else:
stream_settings["stream_timeout"] = value
# send response
# if errors present, send negative response with explanation
if errors:
s.send_raw("n")
error_string = str(errors)[:1024]
s.sendall(error_string)
if self.settings["verbose"]:
print("client not compatible with server settings, connection terminated")
s.close()
return
# otherwise send positive response along with decided values
else:
stream_settings["handler_max_count"] = self.settings["handler_max_count"]
s.send_raw("y")
s.send_raw(format(stream_settings["frame_max_size"], ">16"))
s.send_raw(format(stream_settings["headers_max_size"], ">16"))
s.send_raw(format(stream_settings["stream_timeout"], ">4"))
s.send_raw(format(stream_settings["handler_max_count"], ">4"))
# create StreamManager
manager_uuid = str(uuid.uuid4())
manager = StreamManager.server(s, manager_uuid, stream_settings, CepticServer.handle_new_connection,
(self.endpointManager,), self.manager_closed_event)
self.managerDict[manager_uuid] = manager
manager.daemon = True
manager.start()
@staticmethod
def handle_new_connection(stream, local_settings, endpoint_manager):
# store errors in request
errors = []
# get request from request data
request = None
command_func = handler = variable_dict = None
try:
request = CepticRequest.from_data(stream.get_full_header_data())
except UnknownEncodingException as e:
errors.append(str(e))
if not errors:
# began checking validity of request
# check that command and endpoint are of valid length
if len(request.command) > 128:
errors.append(
"command too long; should be no more than 128 characters, but was {}".format(len(request.command)))
if len(request.endpoint) > 128:
errors.append(
"endpoint too long; should be no more than 128 characters, but was {}".format(
len(request.endpoint)))
# try to get endpoint objects from endpointManager
try:
command_func, handler, variable_dict, settings, settings_override = endpoint_manager.get_endpoint(
request.command, request.endpoint)
# merge settings; endpoint settings take precedence over command settings
settings_merged = copy.deepcopy(settings)
if settings_override is not None:
settings_merged.update(settings_override)
# set request settings to merged settings
request.settings = settings_merged
# set server settings as request's config settings
request.config_settings = local_settings
except KeyError as e:
errors.append(str(e))
except EndpointManagerException as e:
errors.append(str(e))
# check that headers are valid/proper
errors.extend(CepticServer.check_new_connection_headers(request))
# if no errors, send positive response and continue
if not errors:
stream.send_response(CepticResponse(200))
# set stream compression, based on request header
stream.set_encode(request.encoding)
command_func(stream, request, handler, variable_dict)
# otherwise send info back
else:
# send frame with error and bad status
stream.send_response(CepticResponse(400, errors=errors))
stream.send_close()
@staticmethod
def check_new_connection_headers(request):
errors = []
# check that content_length is of allowed length
if request.content_length:
# if content length is longer than set max body length, invalid
if request.content_length > request.max_content_length:
errors.append("Content-Length ({}) exceeds server's allowed max body length of {}".format(
request.content_length,
request.max_content_length))
# check that encoding is recognized and valid
if request.encoding:
valid, error = EncodeGetter.check(request.encoding)
if not valid:
errors.append("Encoding is not valid; {}".format(error))
return errors
def route(self, endpoint, command, settings=None):
"""
Decorator for adding endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param command: string Ceptic command name (get, post, update, delete)
:param settings: optional dict generate by command_settings
"""
def decorator_route(func):
self.add_route(endpoint, command, func, settings)
return func
return decorator_route
def route_get(self, endpoint, settings=None):
"""
Decorator for adding GET endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param settings: optional dict generate by command_settings
"""
def decorator_route(func):
self.add_route(endpoint, "get", func, settings)
return func
return decorator_route
def route_post(self, endpoint, settings=None):
"""
Decorator for adding POST endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param settings: optional dict generate by command_settings
"""
def decorator_route(func):
self.add_route(endpoint, "post", func, settings)
return func
return decorator_route
def route_update(self, endpoint, settings=None):
"""
Decorator for adding UPDATE endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param settings: optional dict generate by command_settings
"""
def decorator_route(func):
self.add_route(endpoint, "update", func, settings)
return func
return decorator_route
def route_delete(self, endpoint, settings=None):
"""
Decorator for adding DELETE endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param settings: optional dict generate by command_settings
"""
def decorator_route(func):
self.add_route(endpoint, "delete", func, settings)
return func
return decorator_route
def add_route(self, endpoint, command, func, settings=None):
"""
Non-decorator func for adding endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param command: string Ceptic command name (get, post, update, delete)
:param func: endpoint function
:param settings: optional dict generate by command_settings
:return:
"""
self.endpointManager.add_endpoint(command, endpoint, func, settings)
def add_route_get(self, endpoint, func, settings=None):
"""
Non-decorator func for adding GET endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param func: endpoint function
:param settings: optional dict generate by command_settings
:return:
"""
self.add_route(endpoint, "get", func, settings)
def add_route_post(self, endpoint, func, settings=None):
"""
Non-decorator func for adding POST endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param func: endpoint function
:param settings: optional dict generate by command_settings
:return:
"""
self.add_route(endpoint, "post", func, settings)
def add_route_update(self, endpoint, func, settings=None):
"""
Non-decorator func for adding UPDATE endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param func: endpoint function
:param settings: optional dict generate by command_settings
:return:
"""
self.add_route(endpoint, "update", func, settings)
def add_route_delete(self, endpoint, func, settings=None):
"""
Non-decorator func for adding DELETE endpoints to server instance
:param endpoint: string url for route to be added as an endpoint
:param func: endpoint function
:param settings: optional dict generate by command_settings
:return:
"""
self.add_route(endpoint, "delete", func, settings)
def stop(self, blocking=True):
"""
Properly begin to stop server; tells server loop to stop
If blocking is True, waits until server is done closing
:param blocking: boolean for if should block until server fully closes
:return: None
"""
self.shouldStop.set()
if blocking and not self.is_stopped():
self.wait_until_not_running()
def wait_until_not_running(self):
"""
Blocks until server fully closes
:return: None
"""
self.isDoneRunning.wait()
def is_stopped(self):
"""
Returns True if server is not running
"""
return self.shouldStop.is_set() and self.isDoneRunning.is_set()
def is_running(self):
"""
Returns True if server is running
"""
return not self.shouldStop.is_set() and not self.isDoneRunning.is_set()
def close_all_managers(self):
"""
Stops and removes all managers
:return: None
"""
keys = list(self.managerDict)
for key in keys:
try:
self.managerDict[key].stop()
except KeyError:
pass
for key in keys:
try:
self.managerDict[key].wait_until_not_running()
except KeyError:
pass
self.remove_manager(key)
def clean_managers(self):
"""
Loop for cleaning closed or timed out managers until server is signalled to stop
:return: None
"""
while not self.shouldStop.is_set():
manager_closed = self.manager_closed_event.wait(self.clean_timeout)
if manager_closed:
self.manager_closed_event.clear()
managers = list(self.managerDict)
for manager_name in managers:
manager = self.managerDict.get(manager_name)
if manager and manager.is_stopped():
self.remove_manager(manager_name)
def remove_manager(self, manager_uuid):
"""
Removes manager with corresponding UUID from managerDict
:param manager_uuid: string form of UUID
:return: None
"""
try:
self.managerDict.pop(manager_uuid)
except KeyError:
pass
|
__init__.py
|
"""
Cobra RMI Framework
Cobra is a remote method invocation interface that is very "pythony". It is
MUCH like its inspiration pyro, but slimmer and safer for things like threading
and object de-registration. Essentially, cobra allows you to call methods from
and get/set attributes on objects that exist on a remote system.
"""
# Copyright (C) 2011 Invisigoth - See LICENSE file for details
import os
import sys
import json
import time
import errno
import types
import Queue
import socket
import struct
import urllib2
import traceback
try:
import msgpack
dumpargs = {}
loadargs = {'use_list':0}
if msgpack.version >= (0,4,1):
dumpargs['use_bin_type'] = 1
loadargs['encoding'] = 'utf-8'
except ImportError:
msgpack = None
import cPickle as pickle
from threading import currentThread,Thread,RLock,Timer,Lock,Event
from SocketServer import ThreadingTCPServer, BaseRequestHandler
daemon = None
verbose = False
version = "Cobra2"
COBRA_PORT=5656
COBRASSL_PORT=5653
cobra_retrymax = None # Optional *global* retry max count
socket_builders = {} # Registered socket builders
# Message Types
COBRA_HELLO = 0
COBRA_CALL = 1
COBRA_GETATTR = 2
COBRA_SETATTR = 3
COBRA_ERROR = 4
COBRA_GOODBYE = 5
COBRA_AUTH = 6
COBRA_NEWOBJ = 7 # Used to return object references
SFLAG_MSGPACK = 0x0001
SFLAG_JSON = 0x0002
class CobraException(Exception):
"""Base for Cobra exceptions"""
pass
class CobraClosedException(CobraException):
"""Raised when a connection is unexpectedly closed."""
pass
class CobraRetryException(CobraException):
"""Raised when the retrymax (if present) for a proxy object is exceeded."""
pass
class CobraPickleException(CobraException):
"""Raised when pickling fails."""
pass
class CobraAuthException(CobraException):
'''Raised when specified auth data is rejected'''
pass
class CobraPermDenied(CobraException):
'''Raised when a call/setattr/getattr is not allowed'''
class CobraErrorException(Exception):
'''
Raised when we receive a COBRA_ERROR message and the current options
dont support serializing exception objects.
'''
def connectSocket(host, port, timeout=None):
"""
Make the long names go away....
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout is not None:
s.settimeout(timeout)
s.connect((host, port))
return s
def getCallerInfo():
"""
This function may be used from *inside* a method being called
by a remote caller. It will return a tuple of host,port for the
other side of the connection... use wisely ;)
"""
return getattr(currentThread(), "_cobra_caller_info", None)
def getLocalInfo():
"""
This function returns the local host,port combination being
used in the socket servicing the current request
"""
return getattr(currentThread(), "_cobra_local_info", None)
def getUserInfo():
'''
Get the cobra authenticated username of the current user
( or None if no user was authenticated )
'''
return getattr(currentThread(), "_cobra_authuser", None)
def setCallerInfo(callerinfo):
"""
This is necessary because of crazy python method call
name munging for thread attributes ;)
"""
currentThread()._cobra_caller_info = callerinfo
def setUserInfo(authuser):
currentThread()._cobra_authuser = authuser
def setLocalInfo(localinfo):
currentThread()._cobra_local_info = localinfo
def nocobra(f):
f.__no_cobra__ = True
return f
def newobj(f):
f._cobra_newobj = True
return f
def newobjwith(f):
f._cobra_newobj = True
f._cobra_newobjwith = True
return f
class CobraMethod:
def __init__(self, proxy, methname):
self.proxy = proxy
self.methname = methname
self.__name__ = methname
def __call__(self, *args, **kwargs):
name = self.proxy._cobra_name
if verbose: print "CALLING:",name,self.methname,repr(args)[:20],repr(kwargs)[:20]
async = kwargs.pop('_cobra_async',None)
if async:
csock = self.proxy._cobra_getsock()
return csock.cobraAsyncTransaction(COBRA_CALL, name, (self.methname, args, kwargs))
with self.proxy._cobra_getsock() as csock:
mtype, name, data = csock.cobraTransaction(COBRA_CALL, name, (self.methname, args, kwargs))
if mtype == COBRA_CALL:
return data
if mtype == COBRA_NEWOBJ:
uri = swapCobraObject(self.proxy._cobra_uri,data)
return CobraProxy(uri)
raise data
def pickledumps(o):
return pickle.dumps( o, protocol=pickle.HIGHEST_PROTOCOL )
def jsonloads(b):
return json.loads(b)
def jsondumps(b):
return json.dumps(b)
def toUtf8(s):
if type(s) == unicode:
return s.encode('utf8')
return s
class CobraSocket:
def __init__(self, socket, sflags=0):
self.sflags = sflags
self.socket = socket
self.dumps = pickledumps
self.loads = pickle.loads
if sflags & SFLAG_MSGPACK:
if not msgpack:
raise Exception('Missing "msgpack" python module ( http://visi.kenshoto.com/viki/Msgpack )')
def msgpackloads(b):
return msgpack.loads(b, **loadargs)
def msgpackdumps(b):
return msgpack.dumps(b, **dumpargs)
self.dumps = msgpackdumps
self.loads = msgpackloads
if sflags & SFLAG_JSON:
self.dumps = jsondumps
self.loads = jsonloads
def getSockName(self):
return self.socket.getsockname()
def getPeerName(self):
return self.socket.getpeername()
def sendMessage(self, mtype, objname, data):
"""
Send message is responsable for transmission of cobra messages,
and socket reconnection in the event that the send fails for network
reasons.
"""
#NOTE: for errors while using msgpack, we must send only the str
if mtype == COBRA_ERROR and self.sflags & (SFLAG_MSGPACK | SFLAG_JSON):
data = str(data)
try:
buf = self.dumps(data)
except Exception, e:
raise CobraPickleException("The arguments/attributes must be serializable: %s" % e)
objname = toUtf8(objname)
self.sendExact(struct.pack("<III", mtype, len(objname), len(buf)) + objname + buf)
def recvMessage(self):
"""
Returns tuple of mtype, objname, and data
This method is *NOT* responsable for re-connection, because there
is not context on the server side for what to send on re-connect.
Client side uses of the CobraSocket object should use cobraTransaction
to ensure re-tranmission of the request on reception errors.
"""
s = self.socket
hdr = self.recvExact(12)
mtype, nsize, dsize = struct.unpack("<III", hdr)
name = self.recvExact(nsize)
data = self.loads(self.recvExact(dsize))
#NOTE: for errors while using msgpack, we must send only the str
if mtype == COBRA_ERROR and self.sflags & (SFLAG_MSGPACK | SFLAG_JSON):
data = CobraErrorException(data)
return (mtype, name, data)
def recvExact(self, size):
buf = ""
s = self.socket
while len(buf) != size:
x = s.recv(size - len(buf))
if len(x) == 0:
raise CobraClosedException("Socket closed in recvExact...")
buf += x
return buf
def sendExact(self, buf):
self.socket.sendall(buf)
class SocketBuilder:
def __init__(self, host, port, timeout=None):
self.host = host
self.port = port
self.timeout = timeout
self.retrymax = None
self.ssl = False
self.sslca = None
self.sslcrt = None
self.sslkey = None
def setTimeout(self, timeout):
'''
Set the timeout for newly created sockets.
'''
self.timeout = timeout
def setSslEnabled(self, status):
self.ssl = status
def setSslCa(self, crtfile):
'''
Set the SSL Certificate Authority for this socket builder.
( This enables checking the server's presented cert )
'''
self.ssl = True
self.sslca = crtfile
def setSslClientCert(self, crtfile, keyfile):
'''
Set the cert/key used by this client to negotiate SSL.
'''
self.ssl = True
self.sslcrt = crtfile
self.sslkey = keyfile
def __call__(self):
host = self.host
port = self.port
timeout = self.timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
sock.settimeout(self.timeout)
if self.ssl:
import ssl
sslkwargs = {}
if self.sslca:
sslkwargs['ca_certs'] = self.sslca
sslkwargs['cert_reqs']=ssl.CERT_REQUIRED
if self.sslcrt and self.sslkey:
sslkwargs['keyfile'] = self.sslkey
sslkwargs['certfile'] = self.sslcrt
sock = ssl.wrap_socket(sock, **sslkwargs)
sock.connect((self.host, self.port))
return sock
class CobraAsyncTrans:
def __init__(self, csock, mtype, objname, data):
self.data = data
self.csock = csock
self.mtype = mtype
self.objname = objname
# Issue the call..
self.asyncCobraTransaction()
def asyncCobraTransaction(self):
"""
This is an API for clients to use. It will retransmit
a sendMessage() automagically on recpt of an exception
in recvMessage()
"""
while True:
try:
self.csock.sendMessage(self.mtype, self.objname, self.data)
return
except CobraAuthException, e:
raise
except (socket.error,CobraClosedException), e:
self.csock.reConnect()
def wait(self):
try:
while True:
try:
mtype,name,data = self.csock.recvMessage()
if mtype == COBRA_CALL:
return data
raise data
except CobraAuthException, e:
raise
except (socket.error,CobraClosedException), e:
# force a reconnect
self.csock.reConnect()
self.asyncCobraTransaction()
finally:
if self.csock.pool:
self.csock.pool.put(self.csock)
self.csock = None
class CobraClientSocket(CobraSocket):
def __init__(self, sockctor, retrymax=cobra_retrymax, sflags=0, authinfo=None, pool=None):
CobraSocket.__init__(self, sockctor(), sflags=sflags)
self.sockctor = sockctor
self.retries = 0
self.trashed = False
self.retrymax = retrymax
self.authinfo = authinfo
self.pool = pool
def __enter__(self):
return self
def __exit__(self, extype, value, tb):
if self.pool:
self.pool.put(self)
def reConnect(self):
"""
Handle the event where we need to reconnect
"""
while self.retrymax is None or self.retries < self.retrymax:
if verbose: sys.stderr.write("COBRA: Reconnection Attempt\n")
try:
self.socket = self.sockctor()
# A bit messy but... a fix for now...
# If we have authinfo lets authenticate
authinfo = self.authinfo
if authinfo != None:
self.sendMessage(COBRA_AUTH, '', authinfo)
mtype,rver,data = self.recvMessage()
if mtype != COBRA_AUTH:
raise CobraAuthException('Authentication Failed!')
self.retries = 0
return
except CobraAuthException, e:
raise
except Exception, e:
traceback.print_exc()
time.sleep( max(2 ** self.retries, 10) )
self.retries += 1
self.trashed = True
raise CobraRetryException()
def cobraAsyncTransaction(self, mtype, objname, data):
return CobraAsyncTrans(self, mtype, objname, data)
def cobraTransaction(self, mtype, objname, data):
"""
This is an API for clients to use. It will retransmit
a sendMessage() automagically on recpt of an exception
in recvMessage()
"""
while True:
try:
self.sendMessage(mtype, objname, data)
return self.recvMessage()
except CobraAuthException, e:
raise
except CobraClosedException, e:
self.reConnect()
except socket.error, e:
self.reConnect()
class CobraDaemon(ThreadingTCPServer):
def __init__(self, host="", port=COBRA_PORT, sslcrt=None, sslkey=None, sslca=None, msgpack=False, json=False):
'''
Construct a cobra daemon object.
Parameters:
host - Optional hostname/ip to bind the service to (default: inaddr_any)
port - The port to bind (Default: COBRA_PORT)
msgpack - Use msgpack serialization
# SSL Options
sslcrt / sslkey - Specify sslcrt and sslkey to enable SSL server side
sslca - Specify an SSL CA key to use validating client certs
'''
self.thr = None
self.run = True
self.shared = {}
self.dowith = {}
self.host = host
self.port = port
self.reflock = RLock()
self.refcnts = {}
self.authmod = None
self.sflags = 0
if msgpack and json:
raise Exception('CobraDaemon can not use both msgpack *and* json!')
if msgpack:
requireMsgpack()
self.sflags |= SFLAG_MSGPACK
if json:
self.sflags |= SFLAG_JSON
# SSL Options
self.sslca = sslca
self.sslcrt = sslcrt
self.sslkey = sslkey
self.cansetattr = True
self.cangetattr = True
if sslcrt and not os.path.isfile(sslcrt):
raise Exception('CobraDaemon: sslcrt param must be a file!')
if sslkey and not os.path.isfile(sslkey):
raise Exception('CobraDaemon: sslkey param must be a file!')
if sslca and not os.path.isfile(sslca):
raise Exception('CobraDaemon: sslca param must be a file!')
self.allow_reuse_address = True
ThreadingTCPServer.__init__(self, (host, port), CobraRequestHandler)
if port == 0:
self.port = self.socket.getsockname()[1]
self.daemon_threads = True
self.recvtimeout = None
def logCallerError(self, oname, args, msg=""):
pass
def setGetAttrEnabled(self, status):
self.cangetattr = status
def setSetAttrEnabled(self, status):
self.cansetattr = status
def setSslCa(self, crtfile):
'''
Set the SSL Certificate Authority by this server.
( to validate client certs )
'''
self.sslca = crtfile
def setSslServerCert(self, crtfile, keyfile):
'''
Set the cert/key used by this server to negotiate SSL.
'''
self.sslcrt = crtfile
self.sslkey = keyfile
def fireThread(self):
self.thr = Thread(target=self.serve_forever)
self.thr.setDaemon(True)
self.thr.start()
def stopServer(self):
self.run = False
self.server_close()
self.thr.join()
def serve_forever(self):
try:
ThreadingTCPServer.serve_forever(self)
except Exception, e:
if not self.run:
return
raise
def setAuthModule(self, authmod):
'''
Enable an authentication module for this server
( all connections *must* be authenticated through the authmod )
NOTE: See cobra.auth.* for various auth module implementations
Example:
import cobra.auth.shadow as c_a_shadow
authmod = c_a_shadow.ShadowFileAuth('passwdfile.txt')
cdaemon = CobraDaemon()
cdaemon.setAuthModule()
'''
self.authmod = authmod
def getSharedObject(self, name):
return self.shared.get(name, None)
def getSharedObjects(self):
'''
Return a list of (name, obj) for the currently shared objects.
Example:
for name,obj in daemon.getSharedObjects():
print('%s: %r' % (name,obj))
'''
return self.shared.items()
def getSharedName(self, obj):
'''
If this object is shared already, get the name...
'''
for name, sobj in self.shared.items():
if sobj == obj:
return name
return None
def getRandomName(self):
ret = ""
for byte in os.urandom(16):
ret += "%.2x" % ord(byte)
return ret
def shareObject(self, obj, name=None, doref=False, dowith=False):
"""
Share an object in this cobra server. By specifying
doref=True you will let CobraProxy objects decide that
the object is done and should be un-shared. Also, if
name == None a random name is chosen. Use dowith=True
to cause sharing/unsharing to enter/exit (requires doref=True).
Returns: name (or the newly generated random one)
"""
refcnt = None
if dowith and not doref:
raise Exception('dowith *requires* doref!')
if doref:
refcnt = 0
if dowith:
obj.__enter__()
if name == None:
name = self.getRandomName()
self.shared[name] = obj
self.dowith[name] = dowith
self.refcnts[name] = refcnt
return name
def getObjectRefCount(self, name):
return self.refcnts.get(name)
def decrefObject(self, name, ok=True):
"""
Decref this object and if it reaches 0, unshare it.
"""
if verbose: print "DECREF:",name
self.reflock.acquire()
try:
refcnt = self.refcnts.get(name, None)
if refcnt != None:
refcnt -= 1
self.refcnts[name] = refcnt
if refcnt == 0:
self.unshareObject(name,ok=ok)
finally:
self.reflock.release()
def increfObject(self, name):
if verbose: print "INCREF:",name
self.reflock.acquire()
try:
refcnt = self.refcnts.get(name, None)
if refcnt != None:
refcnt += 1
self.refcnts[name] = refcnt
finally:
self.reflock.release()
def unshareObject(self, name, ok=True):
if verbose: print 'UNSHARE',name
self.refcnts.pop(name, None)
obj = self.shared.pop(name, None)
# If we are using a with block, notify it
if self.dowith.pop(name, False):
args = (None,None,None)
if not ok:
args = (Exception, Exception('with boom'), None)
obj.__exit__(*args)
return obj
class CobraRequestHandler(BaseRequestHandler):
def handle(self):
c = CobraConnectionHandler(self.server, self.request)
c.handleClient()
class CobraConnectionHandler:
def __init__(self, daemon, socket):
self.daemon = daemon
self.socket = socket
self.handlers = (
self.handleHello,
self.handleCall,
self.handleGetAttr,
self.handleSetAttr,
self.handleError,
self.handleGoodbye,
self.handleError,
)
def handleClient(self):
peer = self.socket.getpeername()
me = self.socket.getsockname()
if verbose: print "GOT A CONNECTIONN",peer
sock = self.socket
if self.daemon.sslkey:
import ssl
sslca = self.daemon.sslca
keyfile = self.daemon.sslkey
certfile = self.daemon.sslcrt
sslreq = ssl.CERT_NONE
# If they specify a CA key, require valid client certs
if sslca:
sslreq=ssl.CERT_REQUIRED
sock = ssl.wrap_socket(sock,
keyfile=keyfile, certfile=certfile,
ca_certs=sslca, cert_reqs=sslreq,
server_side=True)
if self.daemon.recvtimeout:
sock.settimeout( self.daemon.recvtimeout )
authuser = None
csock = CobraSocket(sock, sflags=self.daemon.sflags)
setCallerInfo(peer)
setLocalInfo(me)
# If we have an authmod, they must send an auth message first
if self.daemon.authmod:
mtype,name,data = csock.recvMessage()
if mtype != COBRA_AUTH:
csock.sendMessage(COBRA_ERROR, '', CobraAuthException('Authentication Required!'))
return
authuser = self.daemon.authmod.authCobraUser( data )
if not authuser:
csock.sendMessage(COBRA_ERROR, '', CobraAuthException('Authentication Failed!'))
return
csock.sendMessage(COBRA_AUTH, '', authuser)
setUserInfo( authuser )
while True:
try:
mtype,name,data = csock.recvMessage()
except CobraClosedException:
break
except socket.error:
if verbose: traceback.print_exc()
break
# If they re-auth ( app layer ) later, lets handle it...
if mtype == COBRA_AUTH and self.daemon.authmod:
authuser = self.daemon.authmod.authCobraUser(data)
if not authuser:
csock.sendMessage(COBRA_ERROR,'',CobraAuthException('Authentication Failed!'))
continue
setUserInfo(authuser)
csock.sendMessage(COBRA_AUTH, '', authuser)
continue
if self.daemon.authmod and not self.daemon.authmod.checkUserAccess( authuser, name ):
csock.sendMessage(COBRA_ERROR, name, Exception('Access Denied For User: %s' % authuser))
continue
obj = self.daemon.getSharedObject(name)
if verbose: print "MSG FOR:",name,type(obj)
if obj == None:
try:
csock.sendMessage(COBRA_ERROR, name, Exception("Unknown object requested: %s" % name))
except CobraClosedException:
pass
if verbose: print "WARNING: Got request for unknown object",name
continue
try:
handler = self.handlers[mtype]
except:
try:
csock.sendMessage(COBRA_ERROR, name, Exception("Invalid Message Type"))
except CobraClosedException:
pass
if verbose: print "WARNING: Got Invalid Message Type: %d for %s" % (mtype, data)
continue
try:
handler(csock, name, obj, data)
except Exception, e:
if verbose: traceback.print_exc()
try:
csock.sendMessage(COBRA_ERROR, name, e)
except TypeError, typee:
# Probably about pickling...
csock.sendMessage(COBRA_ERROR, name, Exception(str(e)))
except CobraClosedException:
pass
def handleError(self, csock, oname, obj, data):
print "THIS SHOULD NEVER HAPPEN"
def handleHello(self, csock, oname, obj, data):
"""
Hello messages are used to get the initial cache of
method names for the newly connected object.
"""
if verbose: print "GOT A HELLO"
self.daemon.increfObject(oname)
ret = {}
for name in dir(obj):
attr = getattr(obj, name, None)
if isinstance(attr, (types.MethodType, types.BuiltinMethodType, types.FunctionType, CobraMethod)):
ret[name] = True
try:
csock.sendMessage(COBRA_HELLO, version, ret)
except CobraClosedException:
pass
def handleCall(self, csock, oname, obj, data):
if verbose: print "GOT A CALL",data
methodname, args, kwargs = data
meth = getattr(obj, methodname)
if getattr(meth,'__no_cobra__',False):
raise CobraPermDenied('%s is tagged nocall!' % methodname)
try:
ret = meth(*args, **kwargs)
if getattr(meth,'_cobra_newobj',None):
dowith = getattr(meth,'_cobra_newobjwith',False)
objname = self.daemon.shareObject(ret, doref=True, dowith=dowith)
csock.sendMessage(COBRA_NEWOBJ, "", objname)
return
csock.sendMessage(COBRA_CALL, "", ret)
except CobraClosedException:
pass
except Exception as e:
self.daemon.logCallerError(oname, data, msg=traceback.format_exc())
raise
def handleGetAttr(self, csock, oname, obj, name):
if verbose: print "GETTING ATTRIBUTE:",name
if not self.daemon.cangetattr: raise CobraPermDenied('getattr disallowed!')
try:
csock.sendMessage(COBRA_GETATTR, "", getattr(obj, name))
except CobraClosedException:
pass
def handleSetAttr(self, csock, oname, obj, data):
if verbose: print "SETTING ATTRIBUTE:",data
if not self.daemon.cansetattr: raise CobraPermDenied('setattr disallowed!')
name,value = data
setattr(obj, name, value)
try:
csock.sendMessage(COBRA_SETATTR, "", "")
except CobraClosedException:
pass
def handleGoodbye(self, csock, oname, obj, data):
if verbose: print 'GOODBYE!',oname,obj,data
self.daemon.decrefObject(oname,ok=data)
try:
csock.sendMessage(COBRA_GOODBYE, "", "")
except CobraClosedException:
pass
def isCobraUri(uri):
try:
x = urllib2.Request(uri)
if x.get_type() not in ["cobra","cobrassl"]:
return False
except Exception, e:
return False
return True
def chopCobraUri(uri):
req = urllib2.Request(uri)
scheme = req.get_type()
host = req.get_host()
sel = req.get_selector()
# URL options are parsed later
selparts = sel.split('?', 1)
name = selparts[0].strip("/")
port = COBRA_PORT
if host.find(':') != -1:
host,portstr = host.split(":")
port = int(portstr)
# Do we have any URL options?
urlparams = {}
if len(selparts) > 1:
for urlopt in selparts[1].split('&'):
urlval = 1
if urlopt.find('=') != -1:
urlopt,urlval = urlopt.split('=',1)
urlopt = urlopt.lower()
urlparams[urlopt] = urlval
return scheme,host,port,name,urlparams
class CobraProxy:
'''
A proxy object for remote objects shared with Cobra
A few optional keyword arguments are handled by all cobra protocols:
retrymax - Max transparent reconnect attempts
timeout - Socket timeout for a cobra socket
authinfo - A dict, probably like {'user':'username','passwd':'mypass'}
( but it can be auth module specific )
msgpack - Use msgpack serialization
sockpool - Fixed sized pool of cobra sockets (not socket per thread)
Also, the following protocol options may be passed through the URI:
msgpack=1
authinfo=<base64( json( <authinfo dict> ))>
'''
def __init__(self, URI, retrymax=None, timeout=None, **kwargs):
scheme, host, port, name, urlparams = chopCobraUri( URI )
if verbose: print "HOST",host,"PORT",port,"OBJ",name
self._cobra_uri = URI
self._cobra_scheme = scheme
self._cobra_host = host
self._cobra_port = port
self._cobra_slookup = (host,port)
self._cobra_name = name
self._cobra_retrymax = urlparams.get('retrymax', retrymax)
self._cobra_timeout = urlparams.get('timeout', timeout)
self._cobra_kwargs = kwargs
self._cobra_gothello = False
self._cobra_sflags = 0
self._cobra_spoolcnt = int(urlparams.get('sockpool', 0))
self._cobra_sockpool = None
if self._cobra_timeout != None:
self._cobra_timeout = int(self._cobra_timeout)
if self._cobra_retrymax != None:
self._cobra_retrymax = int(self._cobra_retrymax)
if urlparams.get('msgpack'):
requireMsgpack()
self._cobra_sflags |= SFLAG_MSGPACK
if urlparams.get('json'):
self._cobra_sflags |= SFLAG_JSON
urlauth = urlparams.get('authinfo')
if urlauth:
authinfo = json.loads(urlauth.decode('base64'))
self._cobra_kwargs['authinfo'] = authinfo
# If they asked for msgpack
if kwargs.get('msgpack'):
requireMsgpack()
self._cobra_sflags |= SFLAG_MSGPACK
if kwargs.get('json'):
self._cobra_sflags |= SFLAG_JSON
if self._cobra_spoolcnt:
self._cobra_sockpool = Queue.Queue()
# timeout reqeuired for pool usage
if not self._cobra_timeout:
self._cobra_timeout = 60
# retry max required on pooling
if not self._cobra_retrymax:
self._cobra_retrymax = 3
[self._cobra_sockpool.put(self._cobra_newsock()) for i in range(self._cobra_spoolcnt)]
# If we got passed as user/passwd in our kwargs
with self._cobra_getsock() as csock:
mtype,rver,data = csock.cobraTransaction(COBRA_HELLO, name, "")
if mtype == COBRA_ERROR:
csock.trashed = True
if self._cobra_sflags & (SFLAG_MSGPACK|SFLAG_JSON):
data = Exception(data)
raise data
if rver != version:
csock.trashed = True
raise Exception("Server Version Not Supported: %s" % rver)
if mtype != COBRA_HELLO:
csock.trashed = True
raise Exception("Invalid Cobra Hello Response")
self._cobra_gothello = True
self._cobra_methods = data
def cobraAuthenticate(self, authinfo):
'''
Re-authenticate to the server ( and store auth info for reconnect ).
'''
with self._cobra_getsock() as csock:
mtype,rver,data = csock.cobraTransaction(COBRA_AUTH, '', authinfo)
if mtype == COBRA_AUTH:
self._cobra_kwargs['authinfo'] = authinfo
return True
return False
def _cobra_getsock(self, thr=None):
if self._cobra_spoolcnt:
sock = self._cobra_sockpool.get()
else:
if not thr: # if thread isn't specified, use the current thread
thr = currentThread()
tsocks = getattr(thr, 'cobrasocks', None)
if tsocks == None:
tsocks = {}
thr.cobrasocks = tsocks
sock = tsocks.get(self._cobra_slookup)
if not sock or sock.trashed:
# Lets build a new socket... shall we?
sock = self._cobra_newsock()
# If we have authinfo lets authenticate
authinfo = self._cobra_kwargs.get('authinfo')
if authinfo != None:
mtype,rver,data = sock.cobraTransaction(COBRA_AUTH, '', authinfo)
if mtype != COBRA_AUTH:
raise CobraAuthException('Authentication Failed!')
if not self._cobra_spoolcnt:
tsocks[self._cobra_slookup] = sock
return sock
def _cobra_newsock(self):
"""
This is only used by *clients*
"""
host = self._cobra_host
port = self._cobra_port
timeout = self._cobra_timeout
retrymax = self._cobra_retrymax
builder = getSocketBuilder(host,port)
if builder == None:
builder = SocketBuilder(host,port)
builder.setTimeout(timeout) # Might be None...
if self._cobra_scheme == 'cobrassl':
builder.setSslEnabled(True)
addSocketBuilder(host, port, builder)
authinfo = self._cobra_kwargs.get('authinfo')
return CobraClientSocket(builder, retrymax=retrymax, sflags=self._cobra_sflags, authinfo=authinfo, pool=self._cobra_sockpool)
def __dir__(self):
'''
return a list of proxied method names
'''
return self._cobra_methods.keys()
def __getstate__(self):
return self.__dict__
def __setstate__(self, sdict):
self.__dict__.update(sdict)
def __hash__(self):
return hash(self._cobra_uri)
def __nonzero__(self):
return True
def __repr__(self):
return str(self)
def __str__(self):
return "<CobraProxy %s>" % self._cobra_uri
def __eq__(self, obj):
ouri = getattr(obj, '_cobra_uri', None)
return self._cobra_uri == ouri
def __ne__(self, obj):
if self == obj:
return False
return True
def __setattr__(self, name, value):
if verbose: print "SETATTR %s %s" % (name, repr(value)[:20])
if name.startswith('_cobra_'):
self.__dict__[name] = value
return
with self._cobra_getsock() as csock:
mtype,name,data = csock.cobraTransaction(COBRA_SETATTR, self._cobra_name, (name, value))
if mtype == COBRA_ERROR:
raise data
elif mtype == COBRA_SETATTR:
return
else:
raise Exception("Invalid Cobra Response")
def __getattr__(self, name):
if verbose: print "GETATTR",name
if name == "__getinitargs__":
raise AttributeError()
# Handle methods
if self._cobra_methods.get(name, False):
return CobraMethod(self, name)
with self._cobra_getsock() as csock:
mtype,name,data = csock.cobraTransaction(COBRA_GETATTR, self._cobra_name, name)
if mtype == COBRA_ERROR:
raise data
return data
# For use with ref counted proxies
def __enter__(self):
return self
def __exit__(self, extype, value, tb):
with self._cobra_getsock() as csock:
#print traceback.print_tb(tb)
ok = True
if extype != None: # Tell the server we broke...
ok = False
csock.cobraTransaction(COBRA_GOODBYE, self._cobra_name, ok)
def addSocketBuilder( host, port, builder ):
'''
Register a global socket builder which should be used
when constructing sockets to the given host/port.
'''
socket_builders[ (host,port) ] = builder
def getSocketBuilder(host, port):
'''
Retrieve the registered socket builder for the given host/port.
'''
return socket_builders.get((host,port))
def initSocketBuilder(host,port):
'''
Retrieve or initialize a socket builder for the host/port.
'''
builder = socket_builders.get((host,port))
if builder == None:
builder = SocketBuilder(host,port)
socket_builders[ (host,port) ] = builder
return builder
def startCobraServer(host="", port=COBRA_PORT):
global daemon
if daemon == None:
daemon = CobraDaemon(host,port)
daemon.fireThread()
return daemon
def runCobraServer(host='', port=COBRA_PORT):
daemon = CobraDaemon(host,port)
daemon.serve_forever()
def shareObject(obj, name=None, doref=False):
"""
If shareObject is called before startCobraServer
or startCobraSslServer, it will call startCobraServer
"""
global daemon
if daemon == None:
startCobraServer()
return daemon.shareObject(obj, name, doref=doref)
def unshareObject(name):
return daemon.unshareObject(name)
def swapCobraObject(uri, newname):
'''
Parse out the object name from a given cobra
URI and return a newly constructed URI for
the shared object <newname> on the same server.
'''
scheme, host, port, name, urlparams = chopCobraUri( uri )
paramstr = ''
if urlparams:
paramstr = '?' + ('&'.join(['%s=%s' % (k,v) for (k,v) in urlparams.items()]))
return '%s://%s:%d/%s%s' % (scheme,host,port,newname,paramstr)
def requireMsgpack():
try:
import msgpack
except ImportError, e:
raise Exception('Missing "msgpack" python module ( http://visi.kenshoto.com/viki/Msgpack )')
|
test_file2k.py
|
import sys
import os
import unittest
import itertools
import time
from array import array
from weakref import proxy
try:
import threading
except ImportError:
threading = None
from test import test_support
from test.test_support import TESTFN, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
with test_support.check_py3k_warnings():
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
with test_support.check_py3k_warnings():
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testRepr(self):
# verify repr works
self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
def testErrors(self):
self.f.close()
self.f = open(TESTFN, 'rb')
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', '__iter__']
deprecated_methods = ['xreadlines']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
with test_support.check_py3k_warnings():
for methodname in deprecated_methods:
method = getattr(self.f, methodname)
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1 // 0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
def testNastyWritelinesGenerator(self):
def nasty():
for i in range(5):
if i == 3:
self.f.close()
yield str(i)
self.assertRaises(ValueError, self.f.writelines, nasty())
def testIssue5677(self):
# Remark: Do not perform more than one test per open file,
# since that does NOT catch the readline error on Windows.
data = 'xxx'
for mode in ['w', 'wb', 'a', 'ab']:
for attr in ['read', 'readline', 'readlines']:
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, getattr(self.f, attr))
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, lambda: [line for line in self.f])
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, self.f.readinto, bytearray(len(data)))
self.f.close()
for mode in ['r', 'rb', 'U', 'Ub', 'Ur', 'rU', 'rbU', 'rUb']:
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.write, data)
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.writelines, [data, data])
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.truncate)
self.f.close()
class OtherFileTests(unittest.TestCase):
def testOpenDir(self):
this_dir = os.path.dirname(__file__) or os.curdir
for mode in (None, "w"):
try:
if mode:
f = open(this_dir, mode)
else:
f = open(this_dir)
except IOError as e:
self.assertEqual(e.filename, this_dir)
else:
self.fail("opening a directory didn't raise an IOError")
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
# Some invalid modes fail on Windows, but pass on Unix
# Issue3965: avoid a crash on Windows when filename is unicode
for name in (TESTFN, unicode(TESTFN), unicode(TESTFN + '\t')):
try:
f = open(name, "rr")
except (IOError, ValueError):
pass
else:
f.close()
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print >>sys.__stdout__, (
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.')
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = open(unicode(TESTFN), "w")
self.assertTrue(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = open(TESTFN, bad_mode)
except ValueError, msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may
# be no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError, msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
os.unlink(TESTFN)
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
f = open(TESTFN,'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = open(TESTFN)
if f.next() != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = open(TESTFN)
for i in range(nchunks):
f.next()
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class FileSubclassTests(unittest.TestCase):
def testExit(self):
# test that exiting with context calls subclass' close
class C(file):
def __init__(self, *args):
self.subclass_closed = False
file.__init__(self, *args)
def close(self):
self.subclass_closed = True
file.close(self)
with C(TESTFN, 'w') as f:
pass
self.assertTrue(f.subclass_closed)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FileThreadingTests(unittest.TestCase):
# These tests check the ability to call various methods of file objects
# (including close()) concurrently without crashing the Python interpreter.
# See #815646, #595601
def setUp(self):
self._threads = test_support.threading_setup()
self.f = None
self.filename = TESTFN
with open(self.filename, "w") as f:
f.write("\n".join("0123456789"))
self._count_lock = threading.Lock()
self.close_count = 0
self.close_success_count = 0
self.use_buffering = False
def tearDown(self):
if self.f:
try:
self.f.close()
except (EnvironmentError, ValueError):
pass
try:
os.remove(self.filename)
except EnvironmentError:
pass
test_support.threading_cleanup(*self._threads)
def _create_file(self):
if self.use_buffering:
self.f = open(self.filename, "w+", buffering=1024*16)
else:
self.f = open(self.filename, "w+")
def _close_file(self):
with self._count_lock:
self.close_count += 1
self.f.close()
with self._count_lock:
self.close_success_count += 1
def _close_and_reopen_file(self):
self._close_file()
# if close raises an exception thats fine, self.f remains valid so
# we don't need to reopen.
self._create_file()
def _run_workers(self, func, nb_workers, duration=0.2):
with self._count_lock:
self.close_count = 0
self.close_success_count = 0
self.do_continue = True
threads = []
try:
for i in range(nb_workers):
t = threading.Thread(target=func)
t.start()
threads.append(t)
for _ in xrange(100):
time.sleep(duration/100)
with self._count_lock:
if self.close_count-self.close_success_count > nb_workers+1:
if test_support.verbose:
print 'Q',
break
time.sleep(duration)
finally:
self.do_continue = False
for t in threads:
t.join()
def _test_close_open_io(self, io_func, nb_workers=5):
def worker():
self._create_file()
funcs = itertools.cycle((
lambda: io_func(),
lambda: self._close_and_reopen_file(),
))
for f in funcs:
if not self.do_continue:
break
try:
f()
except (IOError, ValueError):
pass
self._run_workers(worker, nb_workers)
if test_support.verbose:
# Useful verbose statistics when tuning this test to take
# less time to run but still ensuring that its still useful.
#
# the percent of close calls that raised an error
percent = 100. - 100.*self.close_success_count/self.close_count
print self.close_count, ('%.4f ' % percent),
def test_close_open(self):
def io_func():
pass
self._test_close_open_io(io_func)
def test_close_open_flush(self):
def io_func():
self.f.flush()
self._test_close_open_io(io_func)
def test_close_open_iter(self):
def io_func():
list(iter(self.f))
self._test_close_open_io(io_func)
def test_close_open_isatty(self):
def io_func():
self.f.isatty()
self._test_close_open_io(io_func)
def test_close_open_print(self):
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_print_buffered(self):
self.use_buffering = True
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_read(self):
def io_func():
self.f.read(0)
self._test_close_open_io(io_func)
def test_close_open_readinto(self):
def io_func():
a = array('c', 'xxxxx')
self.f.readinto(a)
self._test_close_open_io(io_func)
def test_close_open_readline(self):
def io_func():
self.f.readline()
self._test_close_open_io(io_func)
def test_close_open_readlines(self):
def io_func():
self.f.readlines()
self._test_close_open_io(io_func)
def test_close_open_seek(self):
def io_func():
self.f.seek(0, 0)
self._test_close_open_io(io_func)
def test_close_open_tell(self):
def io_func():
self.f.tell()
self._test_close_open_io(io_func)
def test_close_open_truncate(self):
def io_func():
self.f.truncate()
self._test_close_open_io(io_func)
def test_close_open_write(self):
def io_func():
self.f.write('')
self._test_close_open_io(io_func)
def test_close_open_writelines(self):
def io_func():
self.f.writelines('')
self._test_close_open_io(io_func)
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
# Issue 3242: sys.stdout can be replaced (and freed) during a
# print statement; prevent a segfault in this case
save_stdout = sys.stdout
class File:
def write(self, data):
if '\n' in data:
sys.stdout = save_stdout
try:
sys.stdout = File()
print "some text"
finally:
sys.stdout = save_stdout
def test_del_stdout_before_print(self):
# Issue 4597: 'print' with no argument wasn't reporting when
# sys.stdout was deleted.
save_stdout = sys.stdout
del sys.stdout
try:
print
except RuntimeError as e:
self.assertEqual(str(e), "lost sys.stdout")
else:
self.fail("Expected RuntimeError")
finally:
sys.stdout = save_stdout
def test_unicode(self):
import subprocess
def get_message(encoding, *code):
code = '\n'.join(code)
env = os.environ.copy()
env['PYTHONIOENCODING'] = encoding
process = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
return stdout
def check_message(text, encoding, expected):
stdout = get_message(encoding,
"import sys",
"sys.stdout.write(%r)" % text,
"sys.stdout.flush()")
self.assertEqual(stdout, expected)
# test the encoding
check_message(u'15\u20ac', "iso-8859-15", "15\xa4")
check_message(u'15\u20ac', "utf-8", '15\xe2\x82\xac')
check_message(u'15\u20ac', "utf-16-le", '1\x005\x00\xac\x20')
# test the error handler
check_message(u'15\u20ac', "iso-8859-1:ignore", "15")
check_message(u'15\u20ac', "iso-8859-1:replace", "15?")
check_message(u'15\u20ac', "iso-8859-1:backslashreplace", "15\\u20ac")
# test the buffer API
for objtype in ('buffer', 'bytearray'):
stdout = get_message('ascii',
'import sys',
r'sys.stdout.write(%s("\xe9"))' % objtype,
'sys.stdout.flush()')
self.assertEqual(stdout, "\xe9")
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
FileThreadingTests, StdoutTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
socketTestServer.py
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
import sys
import socket
import threading
import time
def tcplink(sock, addr):
print 'Accept new connection from %s:%s...' % addr
sock.send('Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if data == 'exit' or not data:
break
sock.send('Hello, %s!' % data)
sock.close()
print 'Connection from %s:%s closed.' % addr
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 9999))
s.listen(5)
print 'Waiting for connection...'
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
gui.py
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Raspberry Pi Sense HAT Emulator library for the Raspberry Pi
# Copyright (c) 2016 Raspberry Pi Foundation <info@raspberrypi.org>
#
# This package is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This package is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
nstr = str
str = type('')
import io
import os
import sys
import atexit
import struct
import math
import errno
import subprocess
import webbrowser
import datetime as dt
from time import time, sleep
from threading import Thread, Lock, Event
import gi
gi.require_version('cairo', '1.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GdkPixbuf', '2.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, Gio, GLib, GObject, cairo
import numpy as np
import pkg_resources
from . import __project__, __version__, __author__, __author_email__, __url__
from .i18n import init_i18n, _
from .screen import ScreenClient
from .imu import IMUServer
from .pressure import PressureServer
from .humidity import HumidityServer
from .stick import StickServer, SenseStick
from .lock import EmulatorLock
from .common import HEADER_REC, DATA_REC, DataRecord, slow_pi
def main():
init_i18n()
# threads_init isn't required since PyGObject 3.10.2, but just in case
# we're on something ancient...
GObject.threads_init()
app = EmuApplication()
app.run(sys.argv)
def load_image(filename, format='png'):
loader = GdkPixbuf.PixbufLoader.new_with_type(format)
loader.write(pkg_resources.resource_string(__name__, filename))
loader.close()
return loader.get_pixbuf()
class EmuApplication(Gtk.Application):
def __init__(self, *args, **kwargs):
super(EmuApplication, self).__init__(
*args, application_id='org.raspberrypi.sense_emu_gui',
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,
**kwargs)
GLib.set_application_name(_('Sense HAT Emulator'))
self.window = None
def do_startup(self):
# super-call needs to be in this form?!
Gtk.Application.do_startup(self)
# Get the emulator lock and terminate if something already has it
self.lock = EmulatorLock('sense_emu_gui')
try:
self.lock.acquire()
except:
dialog = Gtk.MessageDialog(
message_type=Gtk.MessageType.ERROR,
title=_('Error'),
text=_(
'Another process is currently acting as the Sense HAT '
'emulator'),
buttons=Gtk.ButtonsType.CLOSE)
try:
dialog.run()
finally:
dialog.destroy()
self.quit()
return
def make_action(action_id, handler, param_type=None):
action = Gio.SimpleAction.new(action_id, param_type)
action.connect('activate', handler)
self.add_action(action)
make_action('example', self.on_example, GLib.VariantType.new('s'))
make_action('play', self.on_play)
make_action('prefs', self.on_prefs)
make_action('help', self.on_help)
make_action('about', self.on_about)
make_action('quit', self.on_quit)
builder = Gtk.Builder(translation_domain=__project__)
builder.add_from_string(
pkg_resources.resource_string(__name__, 'menu.ui').decode('utf-8'))
self.props.menubar = builder.get_object('app-menu')
# Construct the open examples sub-menu
for directory, label in [
# I18N: Easy examples
('basic', _('Simple')),
# I18N: Intermediate skill examples
('intermediate', _('Intermediate')),
# I18N: Difficult examples
('advanced', _('Advanced')),
]:
examples = Gio.Menu.new()
# NOTE: The use of literal "/" below is correct; resource paths
# are not file-system paths and always use "/"
for example in sorted(
pkg_resources.resource_listdir(__name__, 'examples/%s' % directory)):
if example.endswith('.py'):
examples.append(
example.replace('_', '__'),
Gio.Action.print_detailed_name(
'app.example',
GLib.Variant.new_string('%s/%s' % (directory, example))
)
)
builder.get_object('example-submenu').append_submenu(label, examples)
# Construct the settings database and tweak initial value of
# simulate-imu and simulate-env if we're running on a slow Pi, and the
# user hasn't explicitly set a value yet
if pkg_resources.resource_exists(__name__, 'gschemas.compiled'):
source = Gio.SettingsSchemaSource.new_from_directory(
os.path.dirname(pkg_resources.resource_filename(__name__, 'gschemas.compiled')),
Gio.SettingsSchemaSource.get_default(), True)
else:
source = Gio.SettingsSchemaSource.get_default()
schema = Gio.SettingsSchemaSource.lookup(
source, self.props.application_id, False)
assert schema is not None
self.settings = Gio.Settings.new_full(schema, None, None)
if self.settings.get_user_value('simulate-imu') is None:
enable_simulators = not slow_pi()
self.settings.set_boolean('simulate-imu', enable_simulators)
self.settings.set_boolean('simulate-env', enable_simulators)
# Construct the emulator servers
self.imu = IMUServer(simulate_world=self.settings.get_boolean('simulate-imu'))
self.pressure = PressureServer(simulate_noise=self.settings.get_boolean('simulate-env'))
self.humidity = HumidityServer(simulate_noise=self.settings.get_boolean('simulate-env'))
self.screen = ScreenClient()
self.stick = StickServer()
# Connect the settings to the components
self.settings.connect('changed', self.settings_changed)
def settings_changed(self, settings, key):
if key == 'simulate-env':
self.pressure.simulate_noise = settings.get_boolean(key)
self.humidity.simulate_noise = settings.get_boolean(key)
elif key == 'simulate-imu':
self.imu.simulate_world = settings.get_boolean(key)
elif key == 'orientation-scale':
# Force the orientation sliders to redraw
self.window.ui.yaw_scale.queue_draw()
self.window.ui.pitch_scale.queue_draw()
self.window.ui.roll_scale.queue_draw()
elif key == 'screen-fps':
self.window.ui.screen_widget.screen_update_delay = 1 / settings.get_int(key)
def do_shutdown(self):
if self.lock.mine:
self.lock.release()
if self.window:
self.window.destroy()
self.window = None
self.stick.close()
self.screen.close()
self.humidity.close()
self.pressure.close()
self.imu.close()
Gtk.Application.do_shutdown(self)
def do_activate(self):
if not self.window and self.lock.mine:
self.window = MainWindow(application=self)
# Force a read of settings specific to the main window
self.settings_changed(self.settings, 'screen-fps')
self.settings_changed(self.settings, 'orientation-scale')
# Position the window according to the settings
self.window.set_default_size(
self.settings.get_int('window-width'),
self.settings.get_int('window-height')
)
if self.settings.get_boolean('window-maximized'):
self.window.maximize()
if self.window:
self.window.present()
def do_command_line(self, command_line):
options = command_line.get_options_dict()
# do stuff with switches
self.activate()
return 0
def on_help(self, action, param):
local_help = '/usr/share/doc/python-sense-emu-doc/html/index.html'
remote_help = 'https://sense-emu.readthedocs.io/'
if os.path.exists(local_help):
webbrowser.open('file://' + local_help)
else:
webbrowser.open(remote_help)
def on_about(self, action, param):
logo = load_image('sense_emu_gui.svg', format='svg')
about_dialog = Gtk.AboutDialog(
transient_for=self.window,
authors=['%s <%s>' % (__author__, __author_email__)],
license_type=Gtk.License.GPL_2_0, logo=logo,
version=__version__, website=__url__)
about_dialog.run()
about_dialog.destroy()
def on_example(self, action, param):
# NOTE: The use of a bare "/" below is correct: resource paths are
# *not* file-system paths and always use "/" path separators
filename = param.unpack()
source = pkg_resources.resource_stream(
__name__, '/'.join(('examples', filename)))
# Write to a filename in the user's home-dir with the timestamp
# appended to ensure uniqueness (ish)
filename = os.path.splitext(os.path.basename(filename))[0]
filename = '{filename}-{timestamp:%Y-%m-%d-%H-%M-%S}.py'.format(
filename=filename, timestamp=dt.datetime.now())
filename = os.path.join(os.path.expanduser('~'), filename)
target = io.open(filename, 'w', encoding='utf-8')
# Write a note at the top of the file to explain things
target.write("""\
# This file has been written to your home directory for convenience. It is
# saved as "{filename}"
""".format(filename=filename))
target.write(source.read().decode('utf-8'))
# Spawn IDLE; if this seems like a crazy way to spawn IDLE, you're
# right but it's also cross-platform and cross-version compatible
# (works on Py 2.x on Windows and UNIX, and Py 3.x on Windows and UNIX;
# almost any other variant fails for some combination)
subprocess.Popen([
sys.executable,
'-c', 'from idlelib.PyShell import main; main()',
filename])
def on_play(self, action, param):
open_dialog = Gtk.FileChooserDialog(
transient_for=self.window,
title=_('Select the recording to play'),
action=Gtk.FileChooserAction.OPEN)
open_dialog.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
open_dialog.add_button(Gtk.STOCK_OPEN, Gtk.ResponseType.ACCEPT)
try:
response = open_dialog.run()
open_dialog.hide()
if response == Gtk.ResponseType.ACCEPT:
self.window.play(open_dialog.get_filename())
finally:
open_dialog.destroy()
def on_prefs(self, action, param):
prefs_dialog = PrefsDialog(
transient_for=self.window,
title=_('Preferences'),
settings=self.settings)
try:
prefs_dialog.run()
finally:
prefs_dialog.destroy()
def on_quit(self, action, param):
self.quit()
class BuilderUi(object):
def __init__(self, owner, filename):
# Load the GUI definitions (see __getattr__ for how we tie the loaded
# objects into instance variables) and connect all handlers to methods
# on this object
self._builder = Gtk.Builder(translation_domain=__project__)
self._builder.add_from_string(
pkg_resources.resource_string(__name__, filename).decode('utf-8'))
self._builder.connect_signals(owner)
def __getattr__(self, name):
result = self._builder.get_object(name)
if result is None:
raise AttributeError(_('No such attribute %r') % name)
setattr(self, name, result)
return result
class ScreenWidget(Gtk.DrawingArea):
__gtype_name__ = 'ScreenWidget'
def __init__(self, *args, **kwargs):
super(ScreenWidget, self).__init__(*args, **kwargs)
self.set_has_window(True)
self.set_size_request(265, 265)
# Load graphics assets
self._board_full = load_image('sense_emu.png')
self._board_scaled = self._board_full
self._orient_full = load_image('orientation.png')
self._orient_image = self._orient_full
self._grid_full = load_image('pixel_grid.png')
self._grid_scaled = self._grid_full
# Set up a thread to constantly refresh the pixels from the screen
# client object
self.screen_update_delay = 0.04
self._size_lock = Lock()
self._ratio = 1.0
self._rotation = 0
self._show_orientation = False
self._draw_pending = Event()
self._draw_image = None
self._draw_timestamp = 0.0
self._stop = Event()
self._update_thread = Thread(target=self._update_run)
self._update_thread.daemon = True
self.connect('realize', self.realized)
self.connect('size-allocate', self.resized)
self.connect('draw', self.drawn)
def realized(self, widget):
self._update_thread.start()
def resized(self, widget, rect):
with self._size_lock:
if self._rotation in (0, 180):
ratio = min(
rect.width / self._board_full.props.width,
rect.height / self._board_full.props.height)
else:
ratio = min(
rect.width / self._board_full.props.height,
rect.height / self._board_full.props.width)
ratio = min(ratio, 1.0) # never resize larger than native
if ratio != self._ratio:
# Only resize if necessary (plenty of resizes wind up with the
# same ratio)
self._board_scaled = self._board_full.scale_simple(
self._board_full.props.width * ratio,
self._board_full.props.height * ratio,
GdkPixbuf.InterpType.BILINEAR)
self._grid_scaled = self._grid_full.scale_simple(
self._grid_full.props.width * ratio,
self._grid_full.props.height * ratio,
GdkPixbuf.InterpType.BILINEAR)
self._orient_scaled = self._orient_full.scale_simple(
self._orient_full.props.width * ratio,
self._orient_full.props.height * ratio,
GdkPixbuf.InterpType.BILINEAR)
self._ratio = ratio
def drawn(self, widget, cr):
if self._draw_image is None:
return
with self._size_lock:
img = self._draw_image
if self._show_orientation:
img = img.copy()
self._orient_scaled.composite(
img, 0, 0, img.props.width, img.props.height, 0, 0, 1, 1,
GdkPixbuf.InterpType.NEAREST, 215)
img = img.rotate_simple(self._rotation)
rect = self.get_allocation()
Gdk.cairo_set_source_pixbuf(cr, img,
(rect.width - img.props.width) // 2,
(rect.height - img.props.height) // 2)
cr.paint()
self._draw_pending.clear()
@GObject.Property(type=object)
def client(self):
return self._screen_client
@client.setter
def client(self, value):
self._screen_client = value
@GObject.Property(type=int, default=0)
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, value):
self._rotation = value
self.resized(self, self.get_allocation())
self._force_update()
@GObject.Property(type=bool, default=False)
def orientation(self):
return self._show_orientation
@orientation.setter
def orientation(self, value):
self._show_orientation = value
self._force_update()
def _force_update(self):
self._draw_pending.clear()
self._draw_timestamp = 0
# Wait for the background thread to update the pixels image (this
# should never take more than a second)
self._draw_pending.wait(1)
self.props.window.invalidate_rect(None, False)
def _update_run(self):
# This method runs in the background _update_thread
while True:
# Only update the screen if do_draw's finished the last update;
# this effectively serves to "drop frames" if the system's too
# busy
if self._draw_pending.wait(self.screen_update_delay):
# The wait period above enforces the maximum update rate; if
# a draw is still pending, wait on the stop event instead
if self._stop.wait(self.screen_update_delay):
break
else:
# Only update if the screen's modification timestamp indicates
# that the data has changed since last time
ts = self._screen_client.timestamp
if ts > self._draw_timestamp:
with self._size_lock:
img = self._board_scaled.copy()
pixels = GdkPixbuf.Pixbuf.new_from_bytes(
GLib.Bytes.new(self._screen_client.rgb_array.tostring()),
colorspace=GdkPixbuf.Colorspace.RGB, has_alpha=False,
bits_per_sample=8, width=8, height=8, rowstride=8 * 3)
pixel_rect = Gdk.Rectangle()
pixel_rect.x = int(126 * self._ratio)
pixel_rect.y = int(155 * self._ratio)
pixel_rect.width = int(512 * self._ratio)
pixel_rect.height = pixel_rect.width
pixels.composite(
img,
pixel_rect.x, pixel_rect.y,
pixel_rect.width, pixel_rect.height,
pixel_rect.x, pixel_rect.y,
# Why 8.1? With 8.0 (which is what it should be),
# registration errors crop up at the far right (try
# it and see); no idea why 8.1 is required to
# correct them, but I'm too knackered to argue with
# Gdk any more...
pixel_rect.width / 8.1, pixel_rect.height / 8.1,
GdkPixbuf.InterpType.NEAREST, 255)
self._grid_scaled.composite(
img,
pixel_rect.x, pixel_rect.y,
pixel_rect.width, pixel_rect.height,
pixel_rect.x, pixel_rect.y,
1, 1,
GdkPixbuf.InterpType.NEAREST, 255)
self._draw_image = img
self._draw_timestamp = ts
self._draw_pending.set()
# Schedule a redraw when the app is next idle; like Gtk
# methods, Gdk methods must only be called from the main
# thread (otherwise the app locks up)
try:
GLib.idle_add(self.props.window.invalidate_rect, None, False)
except AttributeError:
# Our Gdk window has been destroyed; don't whinge, just
# exit the thread as we're obviously shutting down
break
def do_destroy(self):
self._stop.set()
self._update_thread.join()
class MainWindow(Gtk.ApplicationWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# Build the UI; this is a bit round-about because of Gtk's weird UI
# handling. One can't just use a UI file to setup an existing Window
# instance (as in Qt); instead one must use a separate handler object
# (in which case overriding do_destroy is impossible) or construct a
# whole new Window, remove its components, add them to ourselves and
# then ditch the Window.
self._ui = BuilderUi(self, 'main_window.ui')
self.ui.window.remove(self.ui.root_grid)
self.add(self.ui.root_grid)
self.ui.window.destroy()
# Set the window icon
icon = load_image('sense_emu_gui.png')
self.props.icon = icon
# Set up the objects for the playback thread
self._play_update_lock = Lock()
self._play_update_id = 0
self._play_event = Event()
self._play_thread = None
self._play_restore = (True, True, True)
# Set up the custom screen widget
self.ui.screen_widget = ScreenWidget(visible=True, client=self.props.application.screen)
self.ui.screen_box.pack_start(self.ui.screen_widget, True, True, 0)
self.ui.screen_widget.show()
# Set initial positions on sliders (and add some marks)
self.ui.pitch_scale.add_mark(0, Gtk.PositionType.BOTTOM, None)
self.ui.roll_scale.add_mark(0, Gtk.PositionType.BOTTOM, None)
self.ui.yaw_scale.add_mark(0, Gtk.PositionType.BOTTOM, None)
self.ui.roll.props.value = self.props.application.imu.orientation[0]
self.ui.pitch.props.value = self.props.application.imu.orientation[1]
self.ui.yaw.props.value = self.props.application.imu.orientation[2]
self.ui.humidity.props.value = self.props.application.humidity.humidity
self.ui.pressure.props.value = self.props.application.pressure.pressure
self.ui.temperature.props.value = self.props.application.humidity.temperature
# Set up attributes for the joystick buttons
self._stick_held_lock = Lock()
self._stick_held_id = 0
self.ui.left_button.direction = SenseStick.KEY_LEFT
self.ui.right_button.direction = SenseStick.KEY_RIGHT
self.ui.up_button.direction = SenseStick.KEY_UP
self.ui.down_button.direction = SenseStick.KEY_DOWN
self.ui.enter_button.direction = SenseStick.KEY_ENTER
self._stick_map = {
Gdk.KEY_Return: self.ui.enter_button,
Gdk.KEY_Left: self.ui.left_button,
Gdk.KEY_Right: self.ui.right_button,
Gdk.KEY_Up: self.ui.up_button,
Gdk.KEY_Down: self.ui.down_button,
}
# Set up attributes for the screen rotation controls
self.ui.screen_rotate_clockwise.angle = -90
self.ui.screen_rotate_anticlockwise.angle = 90
self._stick_rotations = {
SenseStick.KEY_LEFT: SenseStick.KEY_UP,
SenseStick.KEY_UP: SenseStick.KEY_RIGHT,
SenseStick.KEY_RIGHT: SenseStick.KEY_DOWN,
SenseStick.KEY_DOWN: SenseStick.KEY_LEFT,
SenseStick.KEY_ENTER: SenseStick.KEY_ENTER,
}
# Connect some handlers for window size and state
self._current_width = -1
self._current_height = -1
self._is_maximized = False
self.connect('size-allocate', self.window_resized)
self.connect('window-state-event', self.window_state_changed)
def window_resized(self, widget, rect):
if not self.is_maximized():
self.props.application.settings.set_int('window-width', rect.width)
self.props.application.settings.set_int('window-height', rect.height)
def window_state_changed(self, widget, event):
if event.type == Gdk.EventType.WINDOW_STATE:
self.props.application.settings.set_boolean(
'window-maximized', event.new_window_state & Gdk.WindowState.MAXIMIZED)
return False
@property
def ui(self):
return self._ui
def do_destroy(self):
try:
self._play_stop()
except AttributeError:
# do_destroy gets called multiple times, and subsequent times lacks
# the Python-added instance attributes
pass
Gtk.ApplicationWindow.do_destroy(self)
def format_pressure(self, scale, value):
return '%.1fmbar' % value
def pressure_changed(self, adjustment):
if not self._play_thread:
self.props.application.pressure.set_values(
self.ui.pressure.props.value,
self.ui.temperature.props.value,
)
def format_humidity(self, scale, value):
return '%.1f%%' % value
def humidity_changed(self, adjustment):
if not self._play_thread:
self.props.application.humidity.set_values(
self.ui.humidity.props.value,
self.ui.temperature.props.value,
)
def format_temperature(self, scale, value):
return '%.1f°C' % value
def temperature_changed(self, adjustment):
if not self._play_thread:
self.pressure_changed(adjustment)
self.humidity_changed(adjustment)
def format_orientation(self, scale, value):
mode = self.props.application.settings.get_string('orientation-scale')
return '%.1f°' % (
value if mode == 'balance' else
value + 180 if mode == 'circle' else
value % 360 if mode == 'modulo' else
999 # should never happen
)
def orientation_changed(self, adjustment):
if not self._play_thread:
self.props.application.imu.set_orientation((
self.ui.roll.props.value,
self.ui.pitch.props.value,
self.ui.yaw.props.value,
))
def stick_key_pressed(self, button, event):
try:
button = self._stick_map[event.keyval]
except KeyError:
return False
else:
self.stick_pressed(button, event)
return True
def stick_key_released(self, button, event):
try:
button = self._stick_map[event.keyval]
except KeyError:
return False
else:
self.stick_released(button, event)
return True
def stick_pressed(self, button, event):
# When a button is double-clicked, GTK fires two pressed events for the
# second click with no intervening released event (so there's one
# pressed event for the first click, followed by a released event, then
# two pressed events for the second click followed by a single released
# event). This isn't documented, so it could be a bug, but it seems
# more like a deliberate behaviour. Anyway, we work around the
# redundant press by detecting it with the non-zero stick_held_id and
# ignoring the redundant event
button.grab_focus()
with self._stick_held_lock:
if self._stick_held_id:
return True
self._stick_held_id = GLib.timeout_add(250, self.stick_held_first, button)
self._stick_send(button.direction, SenseStick.STATE_PRESS)
button.set_active(True)
return True
def stick_released(self, button, event):
with self._stick_held_lock:
if self._stick_held_id:
GLib.source_remove(self._stick_held_id)
self._stick_held_id = 0
self._stick_send(button.direction, SenseStick.STATE_RELEASE)
button.set_active(False)
return True
def stick_held_first(self, button):
with self._stick_held_lock:
self._stick_held_id = GLib.timeout_add(50, self.stick_held, button)
self._stick_send(button.direction, SenseStick.STATE_HOLD)
return False
def stick_held(self, button):
self._stick_send(button.direction, SenseStick.STATE_HOLD)
return True
def _stick_send(self, direction, action):
tv_usec, tv_sec = math.modf(time())
tv_usec *= 1000000
r = self.ui.screen_widget.props.rotation // 90
while r:
direction = self._stick_rotations[direction]
r -= 1
event_rec = struct.pack(SenseStick.EVENT_FORMAT,
int(tv_sec), int(tv_usec), SenseStick.EV_KEY, direction, action)
self.props.application.stick.send(event_rec)
def rotate_screen(self, button):
self.ui.screen_widget.props.rotation = (self.ui.screen_widget.props.rotation + button.angle) % 360
self.ui.screen_rotate_label.props.label = '%d°' % self.ui.screen_widget.props.rotation
def toggle_orientation(self, button):
self.ui.screen_widget.props.orientation = not self.ui.screen_widget.props.orientation
def _play_run(self, f):
err = None
try:
# Calculate how many records are in the file; we'll use this later
# when updating the progress bar
rec_total = (f.seek(0, io.SEEK_END) - HEADER_REC.size) // DATA_REC.size
f.seek(0)
skipped = 0
for rec, data in enumerate(self._play_source(f)):
now = time()
if data.timestamp < now:
skipped += 1
continue
else:
if self._play_event.wait(data.timestamp - now):
break
self.props.application.pressure.set_values(data.pressure, data.ptemp)
self.props.application.humidity.set_values(data.humidity, data.htemp)
self.props.application.imu.set_imu_values(
(data.ax, data.ay, data.az),
(data.gx, data.gy, data.gz),
(data.cx, data.cy, data.cz),
(data.ox, data.oy, data.oz),
)
# Again, would be better to use custom signals here but
# attempting to do so just results in seemingly random
# segfaults during playback
with self._play_update_lock:
if self._play_update_id == 0:
self._play_update_id = GLib.idle_add(self._play_update_controls, rec / rec_total)
except Exception as e:
err = e
finally:
f.close()
# Must ensure that controls are only re-enabled *after* all pending
# control updates have run
with self._play_update_lock:
if self._play_update_id:
GLib.source_remove(self._play_update_id)
self._play_update_id = 0
# Get the main thread to re-enable the controls at the end of
# playback
GLib.idle_add(self._play_controls_finish, err)
def _play_update_controls(self, fraction):
with self._play_update_lock:
self._play_update_id = 0
self.ui.play_progressbar.props.fraction = fraction
if not math.isnan(self.props.application.humidity.temperature):
self.ui.temperature.props.value = self.props.application.humidity.temperature
if not math.isnan(self.props.application.pressure.pressure):
self.ui.pressure.props.value = self.props.application.pressure.pressure
if not math.isnan(self.props.application.humidity.humidity):
self.ui.humidity.props.value = self.props.application.humidity.humidity
self.ui.yaw.props.value = math.degrees(self.props.application.imu.orientation[2])
self.ui.pitch.props.value = math.degrees(self.props.application.imu.orientation[1])
self.ui.roll.props.value = math.degrees(self.props.application.imu.orientation[0])
return False
def play_stop_clicked(self, button):
self._play_stop()
def _play_stop(self):
if self._play_thread:
self._play_event.set()
self._play_thread.join()
self._play_thread = None
def _play_source(self, f):
magic, ver, offset = HEADER_REC.unpack(f.read(HEADER_REC.size))
if magic != b'SENSEHAT':
raise IOError(_('%s is not a Sense HAT recording') % f.name)
if ver != 1:
raise IOError(_('%s has unrecognized file version number') % f.name)
offset = time() - offset
while True:
buf = f.read(DATA_REC.size)
if not buf:
break
elif len(buf) < DATA_REC.size:
raise IOError(_('Incomplete data record at end of %s') % f.name)
else:
data = DataRecord(*DATA_REC.unpack(buf))
yield data._replace(timestamp=data.timestamp + offset)
def _play_controls_setup(self, filename):
# Disable all the associated user controls while playing back
self.ui.environ_box.props.sensitive = False
self.ui.gyro_grid.props.sensitive = False
# Disable simulation threads as we're going to manipulate the
# values precisely
self._play_restore = (
self.props.application.pressure.simulate_noise,
self.props.application.humidity.simulate_noise,
self.props.application.imu.simulate_world,
)
self.props.application.pressure.simulate_noise = False
self.props.application.humidity.simulate_noise = False
self.props.application.imu.simulate_world = False
# Show the playback bar
self.ui.play_label.props.label = _('Playing %s') % os.path.basename(filename)
self.ui.play_progressbar.props.fraction = 0.0
self.ui.play_box.props.visible = True
def _play_controls_finish(self, exc):
# Reverse _play_controls_setup
self.ui.play_box.props.visible = False
( self.props.application.pressure.simulate_noise,
self.props.application.humidity.simulate_noise,
self.props.application.imu.simulate_world,
) = self._play_restore
self.ui.environ_box.props.sensitive = True
self.ui.gyro_grid.props.sensitive = True
self._play_thread = None
# If an exception occurred in the background thread, display the
# error in an appropriate dialog
if exc:
dialog = Gtk.MessageDialog(
transient_for=self,
message_type=Gtk.MessageType.ERROR,
title=_('Error'),
text=_('Error while replaying recording'),
buttons=Gtk.ButtonsType.CLOSE)
dialog.format_secondary_text(str(exc))
dialog.run()
dialog.destroy()
def play(self, filename):
self._play_stop()
self._play_controls_setup(filename)
self._play_thread = Thread(target=self._play_run, args=(io.open(filename, 'rb'),))
self._play_event.clear()
self._play_thread.start()
class PrefsDialog(Gtk.Dialog):
def __init__(self, *args, **kwargs):
self.settings = kwargs.pop('settings')
super(PrefsDialog, self).__init__(*args, **kwargs)
# See comments in MainWindow...
self._ui = BuilderUi(self, 'prefs_dialog.ui')
self.ui.window.remove(self.ui.dialog_vbox)
self.remove(self.get_content_area())
self.add(self.ui.dialog_vbox)
self.ui.window.destroy()
self.props.resizable = False
self.ui.close_button.grab_default()
self.settings.bind(
'simulate-env', self.ui.env_check, 'active', Gio.SettingsBindFlags.DEFAULT)
self.settings.bind(
'simulate-imu', self.ui.imu_check, 'active', Gio.SettingsBindFlags.DEFAULT)
self.settings.bind(
'screen-fps', self.ui.screen_fps, 'value', Gio.SettingsBindFlags.DEFAULT)
self.ui.orientation_balance.value = 'balance'
self.ui.orientation_circle.value = 'circle'
self.ui.orientation_modulo.value = 'modulo'
s = self.settings.get_string('orientation-scale')
for c in self.ui.orientation_balance.get_group():
c.props.active = (c.value == s)
@property
def ui(self):
return self._ui
def close_clicked(self, button):
self.response(Gtk.ResponseType.ACCEPT)
def orientation_changed(self, button):
if button.props.active:
self.settings.set_string('orientation-scale', button.value)
|
server_ping_pong_task.py
|
import csv
import json
import os
import threading
from time import time
import pygame
from common import record_metadata, request_clients_end
from config import CLIENT_WINDOW_HEIGHT, CLIENT_WINDOW_WIDTH, UPDATE_RATE
from network import receive, send
from .config_ping_pong_task import (COUNT_DOWN_MESSAGE, SECONDS_COUNT_DOWN,
SESSION_TIME_SECONDS)
from .utils import (BALL_SIZE, LEFT_TEAM, RIGHT_TEAM, WINDOW_HEIGHT,
WINDOW_WIDTH, Ball, Paddle)
class ServerPingPongTask:
def __init__(self, to_client_connections: list, from_client_connection_teams: dict, easy_mode: bool = True, session_name: str = '') -> None:
self._to_client_connections = to_client_connections
if easy_mode:
from . import config_easy_mode as cfg
else:
from . import config_hard_mode as cfg
self._paddle_height = cfg.PADDLE_HEIGHT
self._paddle_width = cfg.PADDLE_WIDTH
self._ball_bounce_on_paddle_scale = cfg.BALL_BOUNCE_ON_PADDLE_SCALE
self._game_y_lower_bound = int((CLIENT_WINDOW_HEIGHT - WINDOW_HEIGHT) / 2)
self._game_y_upper_bound = self._game_y_lower_bound + WINDOW_HEIGHT
self._game_x_lower_bound = int((CLIENT_WINDOW_WIDTH - WINDOW_WIDTH) / 2)
self._game_x_upper_bound = self._game_x_lower_bound + WINDOW_WIDTH
metadata = {}
metadata["left_team"] = []
metadata["right_team"] = []
self._from_client_connections = {}
self._paddles = {}
from_client_connection_team_left, from_client_connection_team_right = from_client_connection_teams
# spread the segment distance uniformly
segment_length_left = int((self._game_y_upper_bound - self._paddle_height - self._game_y_lower_bound) /
(len(from_client_connection_team_left) + 1))
segment_length_right = int((self._game_y_upper_bound - self._paddle_height - self._game_y_lower_bound) /
(len(from_client_connection_team_right) + 1))
# setup paddles for each team
for count, (from_client_connection, client_name) in enumerate(from_client_connection_team_left.items()):
self._from_client_connections[from_client_connection] = client_name
self._paddles[client_name] = Paddle(position=(self._game_x_lower_bound,
self._game_y_lower_bound + segment_length_left * (count + 1)),
paddle_width=self._paddle_width,
paddle_height=self._paddle_height,
upper_bound=self._game_y_upper_bound - self._paddle_height,
lower_bound=self._game_y_lower_bound,
paddle_speed_scaling=cfg.PADDLE_SPEED_SCALING,
paddle_max_speed=cfg.PADDLE_MAX_SPEED,
team=LEFT_TEAM)
metadata["left_team"].append(client_name)
for count, (from_client_connection, client_name) in enumerate(from_client_connection_team_right.items()):
self._from_client_connections[from_client_connection] = client_name
self._paddles[client_name] = Paddle(position=(self._game_x_upper_bound - self._paddle_width,
self._game_y_lower_bound + segment_length_right * (count + 1)),
paddle_width=self._paddle_width,
paddle_height=self._paddle_height,
upper_bound=self._game_y_upper_bound - self._paddle_height,
lower_bound=self._game_y_lower_bound,
paddle_speed_scaling=cfg.PADDLE_SPEED_SCALING,
paddle_max_speed=cfg.PADDLE_MAX_SPEED,
team=RIGHT_TEAM)
metadata["right_team"].append(client_name)
# setup ball
self._ball = Ball(BALL_SIZE, cfg.BALL_X_SPEED)
self._ball.rect.y = self._game_y_lower_bound + int((WINDOW_HEIGHT + BALL_SIZE) / 2)
self._ball.rect.x = self._game_x_lower_bound + int((WINDOW_WIDTH + BALL_SIZE) / 2)
self._score_left = 0
self._score_right = 0
csv_data_path = "./data/ping_pong"
if not os.path.exists(csv_data_path):
os.makedirs(csv_data_path)
csv_file_name = csv_data_path + '/' + session_name + '_' + str(int(time()))
self._csv_file = open(csv_data_path + '/' + session_name + '_' + str(int(time())) + ".csv", 'w', newline='')
self._csv_writer = csv.writer(self._csv_file, delimiter=';')
metadata["client_window_height"] = CLIENT_WINDOW_HEIGHT
metadata["client_window_width"] = CLIENT_WINDOW_WIDTH
metadata["session_time_seconds"] = SESSION_TIME_SECONDS
metadata["seconds_count_down"] = SECONDS_COUNT_DOWN
metadata["count_down_message"] = COUNT_DOWN_MESSAGE
metadata["paddle_width"] = self._paddle_width
metadata["paddle_height"] = self._paddle_height
metadata["ai_paddle_max_speed"] = cfg.AI_PADDLE_MAX_SPEED
metadata["paddle_speed_scaling"] = cfg.PADDLE_SPEED_SCALING
metadata["paddle_max_speed"] = cfg.PADDLE_MAX_SPEED
metadata["ball_x_speed"] = cfg.BALL_X_SPEED
metadata["ball_bounce_on_paddle_scale"] = cfg.BALL_BOUNCE_ON_PADDLE_SCALE
json_file_name = csv_file_name + "_metadata"
record_metadata(json_file_name, metadata)
self._running = False
def run(self):
self._running = True
to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True)
to_client_update_state_thread.start()
from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True)
from_client_commands_thread.start()
print("[STATUS] Running ping pong task")
# Wait for threads to finish
to_client_update_state_thread.join()
from_client_commands_thread.join()
self._csv_file.close()
extra_data = {}
extra_data["score_left"] = self._score_left
extra_data["score_right"] = self._score_right
request_clients_end(self._to_client_connections, extra_data)
print("[STATUS] Ping pong task ended")
def _to_client_update_state(self):
"""Update the state of the game and reply to clients
"""
counter_target = SECONDS_COUNT_DOWN
game_started = False
start_ticks = pygame.time.get_ticks()
seconds = 0.0
clock = pygame.time.Clock()
while self._running:
# manage timer
if seconds >= counter_target:
if game_started:
self._running = False
break
else:
counter_target = SESSION_TIME_SECONDS
start_ticks = pygame.time.get_ticks()
game_started = True
# Update state of the ball
if game_started:
self._ball.update()
# Check for collision between ball and paddles
paddle_collide_ball = False
for paddle in self._paddles.values():
if pygame.sprite.collide_mask(self._ball, paddle):
if self._ball.velocity[0] > 0 and paddle.team == LEFT_TEAM:
self._ball.velocity[1] = -self._ball.velocity[1]
self._ball.rect.x = paddle.rect.x + self._paddle_width
elif self._ball.velocity[0] < 0 and paddle.team == RIGHT_TEAM:
self._ball.velocity[1] = -self._ball.velocity[1]
self._ball.rect.x = paddle.rect.x - BALL_SIZE
else:
ball_bound_y_velocity = int(((self._ball.rect.y + BALL_SIZE / 2.0) -
(paddle.rect.y + self._paddle_height / 2.0))
* self._ball_bounce_on_paddle_scale)
# Prevent ball from ever moving pure horizontally
ball_bound_y_velocity = 1 if ball_bound_y_velocity == 0 else ball_bound_y_velocity
self._ball.bounce(ball_bound_y_velocity)
if self._ball.rect.x < CLIENT_WINDOW_WIDTH / 2:
self._ball.rect.x = self._game_x_lower_bound + self._paddle_width
else:
self._ball.rect.x = self._game_x_upper_bound - self._paddle_width - BALL_SIZE
paddle_collide_ball = True
break
# If ball has not collided with paddle, check if it collides with the wall
if not paddle_collide_ball:
# Collides with right wall
if self._ball.rect.x >= self._game_x_upper_bound - BALL_SIZE:
self._score_left += 1
self._ball.bounce()
# Offset the ball to avoid collision with paddle
self._ball.rect.x = self._game_x_upper_bound - BALL_SIZE
# Collides left wall
elif self._ball.rect.x <= self._game_x_lower_bound:
self._score_right += 1
self._ball.bounce()
# Offset the ball to avoid collision with paddle
self._ball.rect.x = self._game_x_lower_bound
# Collides with bottom wall
elif self._ball.rect.y >= self._game_y_upper_bound - BALL_SIZE:
self._ball.rect.y = self._game_y_upper_bound - BALL_SIZE - 1
self._ball.velocity[1] = -self._ball.velocity[1]
# Collides with top wall
elif self._ball.rect.y <= self._game_y_lower_bound:
self._ball.rect.y = self._game_y_lower_bound + 1
self._ball.velocity[1] = -self._ball.velocity[1]
data = {}
data["type"] = "state"
data["score_left"] = self._score_left
data["score_right"] = self._score_right
data["started"] = game_started
data["state"] = {}
data["state"]["ball"] = (self._ball.rect.x, self._ball.rect.y)
for client_name, paddle in self._paddles.items():
data["state"][client_name] = (paddle.rect.x, paddle.rect.y)
seconds_to_send = int(counter_target) - int(seconds)
data["seconds"] = 1 if seconds_to_send <= 0 else seconds_to_send
# Record state of the game
self._csv_writer.writerow([time(), time.monotonic(), json.dumps(data)])
send(self._to_client_connections, data)
seconds = (pygame.time.get_ticks() - start_ticks) / 1000.0
clock.tick(UPDATE_RATE)
def _from_client_commands(self):
"""Update state of paddles from user commands
"""
while self._running:
all_data = receive(self._from_client_connections.keys(), 0.1)
for data in all_data:
if data["type"] == "change":
self._paddles[data["sender"]].update_location(data["change"])
|
utils.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""VOLTTRON platform™ agent helper classes/functions."""
import argparse
import calendar
import errno
import logging
import sys
import syslog
import traceback
from datetime import datetime, tzinfo, timedelta
import psutil
import gevent
import os
import pytz
import re
import stat
import time
import yaml
from volttron.platform import get_home, get_address
from volttron.utils.prompt import prompt_response
from dateutil.parser import parse
from dateutil.tz import tzutc, tzoffset
from tzlocal import get_localzone
from volttron.platform.agent import json as jsonapi
from ConfigParser import ConfigParser
import subprocess
from subprocess import Popen
try:
from ..lib.inotify.green import inotify, IN_MODIFY
except AttributeError:
# inotify library is not available on OS X/MacOS.
# @TODO Integrate with the OS X FS Events API
inotify = None
IN_MODIFY = None
__all__ = ['load_config', 'run_agent', 'start_agent_thread',
'is_valid_identity', 'load_platform_config', 'get_messagebus',
'get_fq_identity', 'execute_command']
__author__ = 'Brandon Carpenter <brandon.carpenter@pnnl.gov>'
__copyright__ = 'Copyright (c) 2016, Battelle Memorial Institute'
__license__ = 'FreeBSD'
_comment_re = re.compile(
r'((["\'])(?:\\?.)*?\2)|(/\*.*?\*/)|((?:#|//).*?(?=\n|$))',
re.MULTILINE | re.DOTALL)
_log = logging.getLogger(__name__)
# The following are the only allowable characters for identities.
_VALID_IDENTITY_RE = re.compile(r"^[A-Za-z0-9_.\-]+$")
def is_valid_identity(identity_to_check):
""" Checks the passed identity to see if it contains invalid characters
A None value for identity_to_check will return False
@:param: string: The vip_identity to check for validity
@:return: boolean: True if values are in the set of valid characters.
"""
if identity_to_check is None:
return False
return _VALID_IDENTITY_RE.match(identity_to_check)
def normalize_identity(pre_identity):
if is_valid_identity(pre_identity):
return pre_identity
if pre_identity is None:
raise ValueError("Identity cannot be none.")
norm = ""
for s in pre_identity:
if _VALID_IDENTITY_RE.match(s):
norm += s
else:
norm += '_'
return norm
def _repl(match):
"""Replace the matched group with an appropriate string."""
# If the first group matched, a quoted string was matched and should
# be returned unchanged. Otherwise a comment was matched and the
# empty string should be returned.
return match.group(1) or ''
def strip_comments(string):
"""Return string with all comments stripped.
Both JavaScript-style comments (//... and /*...*/) and hash (#...)
comments are removed.
"""
return _comment_re.sub(_repl, string)
def load_config(config_path):
"""Load a JSON-encoded configuration file."""
if config_path is None:
_log.info("AGENT_CONFIG does not exist in environment. load_config returning empty configuration.")
return {}
if not os.path.exists(config_path):
_log.info("Config file specified by AGENT_CONFIG does not exist. load_config returning empty configuration.")
return {}
# First attempt parsing the file with a yaml parser (allows comments natively)
# Then if that fails we fallback to our modified json parser.
try:
with open(config_path) as f:
return yaml.safe_load(f.read())
except yaml.scanner.ScannerError as e:
try:
with open(config_path) as f:
return parse_json_config(f.read())
except StandardError as e:
_log.error("Problem parsing agent configuration")
raise
def load_platform_config(vhome=None):
"""Loads the platform config file if the path exists."""
config_opts = {}
if not vhome:
vhome = get_home()
path = os.path.join(vhome, 'config')
if os.path.exists(path):
parser = ConfigParser()
parser.read(path)
options = parser.options('volttron')
for option in options:
config_opts[option] = parser.get('volttron', option)
return config_opts
def get_platform_instance_name(vhome=None, prompt=False):
platform_config = load_platform_config(vhome)
instance_name = platform_config.get('instance-name', None)
if instance_name is not None:
instance_name = instance_name.strip('"')
if prompt:
if not instance_name:
instance_name = 'volttron1'
instance_name = prompt_response("Name of this volttron instance:",
mandatory=True, default=instance_name)
else:
if not instance_name:
_log.warning("Using hostname as instance name.")
if os.path.isfile('/etc/hostname'):
with open('/etc/hostname') as f:
instance_name = f.read().strip()
bus = platform_config.get('message-bus')
if bus is None:
bus = get_messagebus()
store_message_bus_config(bus, instance_name)
else:
err = "No instance-name is configured in $VOLTTRON_HOME/config. Please set instance-name in " \
"$VOLTTRON_HOME/config"
_log.error(err)
raise KeyError(err)
return instance_name
def get_fq_identity(identity, platform_instance_name=None):
"""
Return the fully qualified identity for the passed core identity.
Fully qualified identities are instance_name.identity
:param identity:
:param platform_instance_name: str The name of the platform.
:return:
"""
if not platform_instance_name:
platform_instance_name = get_platform_instance_name()
return "{}.{}".format(platform_instance_name, identity)
def get_messagebus():
"""Get type of message bus - zeromq or rabbbitmq."""
message_bus = os.environ.get('MESSAGEBUS')
if not message_bus:
config = load_platform_config()
message_bus = config.get('message-bus', 'zmq')
return message_bus
def store_message_bus_config(message_bus, instance_name):
# If there is no config file or home directory yet, create volttron_home
# and config file
if not instance_name:
raise ValueError("Instance name should be a valid string and should "
"be unique within a network of volttron instances "
"that communicate with each other. start volttron "
"process with '--instance-name <your instance>' if "
"you are running this instance for the first time. "
"Or add instance-name = <instance name> in "
"vhome/config")
v_home= get_home()
config_path = os.path.join(v_home, "config")
if os.path.exists(config_path):
config = ConfigParser()
config.read(config_path)
config.set('volttron', 'message-bus', message_bus)
config.set('volttron','instance-name', instance_name)
with open(config_path, 'w') as configfile:
config.write(configfile)
else:
if not os.path.exists(v_home):
os.makedirs(v_home, 0o755)
config = ConfigParser()
config.add_section('volttron')
config.set('volttron', 'message-bus', message_bus)
config.set('volttron', 'instance-name', instance_name)
with open(config_path, 'w') as configfile:
config.write(configfile)
def update_kwargs_with_config(kwargs, config):
"""
Loads the user defined configurations into kwargs.
1. Converts any dash/hyphen in config variables into underscores
2. Checks for configured "identity" value. Prints a deprecation
warning and uses it.
3. Checks for configured "agentid" value. Prints a deprecation warning
and ignores it
:param kwargs: kwargs to be updated
:param config: dictionary of user/agent configuration
"""
if config.get('identity') is not None:
_log.warning("DEPRECATION WARNING: Setting a historian's VIP IDENTITY"
" from its configuration file will no longer be supported"
" after VOLTTRON 4.0")
_log.warning(
"DEPRECATION WARNING: Using the identity configuration setting "
"will override the value provided by the platform. This new value "
"will not be reported correctly by 'volttron-ctl status'")
_log.warning("DEPRECATION WARNING: Please remove 'identity' from your "
"configuration file and use the new method provided by "
"the platform to set an agent's identity. See "
"scripts/core/make-mongo-historian.sh for an example of "
"how this is done.")
if config.get('agentid') is not None:
_log.warning("WARNING: Agent id cannot be configured. It is a unique "
"id assigned by VOLTTRON platform. Ignoring configured "
"agentid")
config.pop('agentid')
for k, v in config.items():
kwargs[k.replace("-","_")] = v
def parse_json_config(config_str):
"""Parse a JSON-encoded configuration file."""
return jsonapi.loads(strip_comments(config_str))
def run_agent(cls, subscribe_address=None, publish_address=None,
config_path=None, **kwargs):
"""Instantiate an agent and run it in the current thread.
Attempts to get keyword parameters from the environment if they
are not set.
"""
if not subscribe_address:
subscribe_address = os.environ.get('AGENT_SUB_ADDR')
if subscribe_address:
kwargs['subscribe_address'] = subscribe_address
if not publish_address:
publish_address = os.environ.get('AGENT_PUB_ADDR')
if publish_address:
kwargs['publish_address'] = publish_address
if not config_path:
config_path = os.environ.get('AGENT_CONFIG')
if config_path:
kwargs['config_path'] = config_path
agent = cls(**kwargs)
agent.run()
def start_agent_thread(cls, **kwargs):
"""Instantiate an agent class and run it in a new daemon thread.
Returns the thread object.
"""
import threading
agent = cls(**kwargs)
thread = threading.Thread(target=agent.run)
thread.daemon = True
thread.start()
return thread
def isapipe(fd):
fd = getattr(fd, 'fileno', lambda: fd)()
return stat.S_ISFIFO(os.fstat(fd).st_mode)
def default_main(agent_class, description=None, argv=sys.argv,
parser_class=argparse.ArgumentParser, **kwargs):
"""Default main entry point implementation for legacy agents.
description and parser_class are depricated. Please avoid using them.
"""
try:
# If stdout is a pipe, re-open it line buffered
if isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
try:
sub_addr = os.environ['AGENT_SUB_ADDR']
pub_addr = os.environ['AGENT_PUB_ADDR']
except KeyError as exc:
sys.stderr.write(
'missing environment variable: {}\n'.format(exc.args[0]))
sys.exit(1)
if sub_addr.startswith('ipc://') and sub_addr[6:7] != '@':
if not os.path.exists(sub_addr[6:]):
sys.stderr.write('warning: subscription socket does not '
'exist: {}\n'.format(sub_addr[6:]))
if pub_addr.startswith('ipc://') and pub_addr[6:7] != '@':
if not os.path.exists(pub_addr[6:]):
sys.stderr.write('warning: publish socket does not '
'exist: {}\n'.format(pub_addr[6:]))
config = os.environ.get('AGENT_CONFIG')
agent = agent_class(subscribe_address=sub_addr,
publish_address=pub_addr,
config_path=config, **kwargs)
agent.run()
except KeyboardInterrupt:
pass
def vip_main(agent_class, identity=None, version='0.1', **kwargs):
"""Default main entry point implementation for VIP agents."""
try:
# If stdout is a pipe, re-open it line buffered
if isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
# Quiet printing of KeyboardInterrupt by greenlets
Hub = gevent.hub.Hub
Hub.NOT_ERROR = Hub.NOT_ERROR + (KeyboardInterrupt,)
config = os.environ.get('AGENT_CONFIG')
identity = os.environ.get('AGENT_VIP_IDENTITY', identity)
message_bus = os.environ.get('MESSAGEBUS', 'zmq')
if identity is not None:
if not is_valid_identity(identity):
_log.warn('Deprecation warining')
_log.warn(
'All characters in {identity} are not in the valid set.'
.format(idenity=identity))
address = get_address()
agent_uuid = os.environ.get('AGENT_UUID')
volttron_home = get_home()
from volttron.platform.certs import Certs
certs = Certs()
if os.path.isfile(certs.remote_cert_bundle_file()):
os.environ['REQUESTS_CA_BUNDLE'] = certs.remote_cert_bundle_file()
agent = agent_class(config_path=config, identity=identity,
address=address, agent_uuid=agent_uuid,
volttron_home=volttron_home,
version=version,
message_bus=message_bus, **kwargs)
try:
run = agent.run
except AttributeError:
run = agent.core.run
task = gevent.spawn(run)
try:
task.join()
finally:
task.kill()
except KeyboardInterrupt:
pass
class SyslogFormatter(logging.Formatter):
_level_map = {logging.DEBUG: syslog.LOG_DEBUG,
logging.INFO: syslog.LOG_INFO,
logging.WARNING: syslog.LOG_WARNING,
logging.ERROR: syslog.LOG_ERR,
logging.CRITICAL: syslog.LOG_CRIT}
def format(self, record):
level = self._level_map.get(record.levelno, syslog.LOG_INFO)
return '<{}>'.format(level) + super(SyslogFormatter, self).format(
record)
class JsonFormatter(logging.Formatter):
def format(self, record):
dct = record.__dict__.copy()
dct["msg"] = record.getMessage()
dct.pop('args')
exc_info = dct.pop('exc_info', None)
if exc_info:
dct['exc_text'] = ''.join(traceback.format_exception(*exc_info))
return jsonapi.dumps(dct)
class AgentFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
if fmt is None:
fmt = '%(asctime)s %(composite_name)s %(levelname)s: %(message)s'
super(AgentFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def composite_name(self, record):
if record.name == 'agents.log':
cname = '(%(processName)s %(process)d) %(remote_name)s'
elif record.name.startswith('agents.std'):
cname = '(%(processName)s %(process)d) <{}>'.format(
record.name.split('.', 2)[1])
else:
cname = '() %(name)s'
return cname % record.__dict__
def format(self, record):
if 'composite_name' not in record.__dict__:
record.__dict__['composite_name'] = self.composite_name(record)
if len(record.args) > 0 \
and 'tornado.access' in record.__dict__['composite_name']:
record.__dict__['msg'] = ','.join([str(b) for b in record.args])
record.__dict__['args'] = []
return super(AgentFormatter, self).format(record)
def setup_logging(level=logging.DEBUG):
root = logging.getLogger()
if not root.handlers:
handler = logging.StreamHandler()
if isapipe(sys.stderr) and '_LAUNCHED_BY_PLATFORM' in os.environ:
handler.setFormatter(JsonFormatter())
else:
fmt = '%(asctime)s %(name)s %(levelname)s: %(message)s'
handler.setFormatter(logging.Formatter(fmt))
root.addHandler(handler)
root.setLevel(level)
def format_timestamp(time_stamp):
"""Create a consistent datetime string representation based on
ISO 8601 format.
YYYY-MM-DDTHH:MM:SS.mmmmmm for unaware datetime objects.
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM for aware datetime objects
:param time_stamp: value to convert
:type time_stamp: datetime
:returns: datetime in string format
:rtype: str
"""
time_str = time_stamp.strftime("%Y-%m-%dT%H:%M:%S.%f")
if time_stamp.tzinfo is not None:
sign = '+'
td = time_stamp.tzinfo.utcoffset(time_stamp)
if td.days < 0:
sign = '-'
td = -td
seconds = td.seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
time_str += "{sign}{HH:02}:{MM:02}".format(sign=sign,
HH=hours,
MM=minutes)
return time_str
def parse_timestamp_string(time_stamp_str):
"""
Create a datetime object from the supplied date/time string.
Uses dateutil.parse with no extra parameters.
For performance reasons we try
YYYY-MM-DDTHH:MM:SS.mmmmmm
or
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM
based on the string length before falling back to dateutil.parse.
@param time_stamp_str:
@return: value to convert
"""
if len(time_stamp_str) == 26:
try:
return datetime.strptime(time_stamp_str, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
pass
elif len(time_stamp_str) == 32:
try:
base_time_stamp_str = time_stamp_str[:26]
time_zone_str = time_stamp_str[26:]
time_stamp = datetime.strptime(base_time_stamp_str, "%Y-%m-%dT%H:%M:%S.%f")
# Handle most common case.
if time_zone_str == "+00:00":
return time_stamp.replace(tzinfo=pytz.UTC)
hours_offset = int(time_zone_str[1:3])
minutes_offset = int(time_zone_str[4:6])
seconds_offset = hours_offset * 3600 + minutes_offset * 60
if time_zone_str[0] == "-":
seconds_offset = -seconds_offset
return time_stamp.replace(tzinfo=tzoffset("", seconds_offset))
except ValueError:
pass
return parse(time_stamp_str)
def get_aware_utc_now():
"""Create a timezone aware UTC datetime object from the system time.
:returns: an aware UTC datetime object
:rtype: datetime
"""
utcnow = datetime.utcnow()
utcnow = pytz.UTC.localize(utcnow)
return utcnow
def get_utc_seconds_from_epoch(timestamp=None):
"""
convert a given time stamp to seconds from epoch based on utc time. If
given time is naive datetime it is considered be local to where this
code is running.
@param timestamp: datetime object
@return: seconds from epoch
"""
if timestamp is None:
timestamp = datetime.now(tz=tzutc())
if timestamp.tzinfo is None:
local_tz = get_localzone()
# Do not use datetime.replace(tzinfo=local_tz) instead use localize()
timestamp = local_tz.localize(timestamp)
# utctimetuple can be called on aware timestamps and it will
# convert to UTC first.
seconds_from_epoch = calendar.timegm(timestamp.utctimetuple())
# timetuple loses microsecond accuracy so we have to put it back.
seconds_from_epoch += timestamp.microsecond / 1000000.0
return seconds_from_epoch
def process_timestamp(timestamp_string, topic=''):
"""
Convert timestamp string timezone aware utc timestamp
@param timestamp_string: datetime string to parse
@param topic: topic to which parse errors are published
@return: UTC datetime object and the original timezone of input datetime
"""
if timestamp_string is None:
_log.error("message for {topic} missing timetamp".format(topic=topic))
return
try:
timestamp = parse_timestamp_string(timestamp_string)
except (ValueError, TypeError):
_log.error("message for {topic} bad timetamp string: {ts_string}"
.format(topic=topic, ts_string=timestamp_string))
return
if timestamp.tzinfo is None:
timestamp = timestamp.replace(tzinfo=pytz.UTC)
original_tz = None
else:
original_tz = timestamp.tzinfo
timestamp = timestamp.astimezone(pytz.UTC)
return timestamp, original_tz
def watch_file(fullpath, callback):
"""Run callback method whenever the file changes
Not available on OS X/MacOS.
"""
dirname, filename = os.path.split(fullpath)
if inotify is None:
_log.warning("Runtime changes to: %s not supported on this platform.", fullpath)
else:
with inotify() as inot:
inot.add_watch(dirname, IN_MODIFY)
for event in inot:
if event.name == filename and event.mask & IN_MODIFY:
callback()
def watch_file_with_fullpath(fullpath, callback):
"""Run callback method whenever the file changes
Not available on OS X/MacOS.
"""
dirname, filename = os.path.split(fullpath)
if inotify is None:
_log.warning("Runtime changes to: %s not supported on this platform.", fullpath)
else:
with inotify() as inot:
inot.add_watch(dirname, IN_MODIFY)
for event in inot:
if event.name == filename and event.mask & IN_MODIFY:
callback(fullpath)
def create_file_if_missing(path, permission=0o660, contents=None):
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
open(path)
except IOError as exc:
if exc.errno != errno.ENOENT:
raise
_log.debug('missing file %s', path)
_log.info('creating file %s', path)
fd = os.open(path, os.O_CREAT | os.O_WRONLY, permission)
try:
if contents:
os.write(fd, contents)
finally:
os.close(fd)
def fix_sqlite3_datetime(sql=None):
"""Primarily for fixing the base historian cache on certain versions
of python.
Registers a new datetime converter to that uses dateutil parse. This
should
better resolve #216, #174, and #91 without the goofy workarounds that
change data.
Optional sql argument is for testing only.
"""
if sql is None:
import sqlite3 as sql
sql.register_adapter(datetime, format_timestamp)
sql.register_converter("timestamp", parse_timestamp_string)
def execute_command(cmds, env=None, cwd=None, logger=None, err_prefix=None):
_, output = execute_command_p(cmds, env, cwd, logger, err_prefix)
return output
def execute_command_p(cmds, env=None, cwd=None, logger=None, err_prefix=None):
""" Executes a given command. If commands return code is 0 return stdout.
If not logs stderr and raises RuntimeException"""
process = Popen(cmds, env=env, cwd=cwd, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(output, error) = process.communicate()
if not err_prefix:
err_prefix = "Error executing command"
if process.returncode != 0:
err_message = "\n{}: Below Command failed with non zero exit code.\n" \
"Command:{} \nStderr:\n{}\n".format(err_prefix,
" ".join(cmds),
error)
if logger:
logger.exception(err_message)
raise RuntimeError()
else:
raise RuntimeError(err_message)
return process.returncode, output
def is_volttron_running(volttron_home):
"""
Checks if volttron is running for the given volttron home. Checks if a VOLTTRON_PID file exist and if it does
check if the PID in the file corresponds to a running process. If so, returns True else returns False
:param vhome: volttron home
:return: True if VOLTTRON_PID file exists and points to a valid process id
"""
pid_file = os.path.join(volttron_home, 'VOLTTRON_PID')
if os.path.exists(pid_file):
running = False
with open(pid_file, 'r') as pf:
pid = int(pf.read().strip())
running = psutil.pid_exists(pid)
return running
else:
return False
|
test_rest_v2_0_0.py
|
import json
import random
import string
import subprocess
import unittest
from multiprocessing import Process
import requests
import sys
import time
from dateutil.parser import parse
from test.apiv2.rest_api import Podman
PODMAN_URL = "http://localhost:8080"
def _url(path):
return PODMAN_URL + "/v2.0.0/libpod" + path
def ctnr(path):
try:
r = requests.get(_url("/containers/json?all=true"))
ctnrs = json.loads(r.text)
except Exception as e:
msg = f"Bad container response: {e}"
if r is not None:
msg = msg + " " + r.text
sys.stderr.write(msg + "\n")
raise
return path.format(ctnrs[0]["Id"])
def validateObjectFields(buffer):
objs = json.loads(buffer)
if not isinstance(objs, dict):
for o in objs:
_ = o["Id"]
else:
_ = objs["Id"]
return objs
class TestApi(unittest.TestCase):
podman = None # initialized podman configuration for tests
service = None # podman service instance
def setUp(self):
super().setUp()
try:
TestApi.podman.run("run", "alpine", "/bin/ls", check=True)
except subprocess.CalledProcessError as e:
if e.stdout:
sys.stdout.write("\nRun Stdout:\n" + e.stdout.decode("utf-8"))
if e.stderr:
sys.stderr.write("\nRun Stderr:\n" + e.stderr.decode("utf-8"))
raise
@classmethod
def setUpClass(cls):
super().setUpClass()
TestApi.podman = Podman()
TestApi.service = TestApi.podman.open("system", "service", "tcp:localhost:8080", "--time=0")
# give the service some time to be ready...
time.sleep(2)
returncode = TestApi.service.poll()
if returncode is not None:
raise subprocess.CalledProcessError(returncode, "podman system service")
r = requests.post(_url("/images/pull?reference=docker.io%2Falpine%3Alatest"))
if r.status_code != 200:
raise subprocess.CalledProcessError(
r.status_code, f"podman images pull docker.io/alpine:latest {r.text}"
)
@classmethod
def tearDownClass(cls):
TestApi.service.terminate()
stdout, stderr = TestApi.service.communicate(timeout=0.5)
if stdout:
sys.stdout.write("\nService Stdout:\n" + stdout.decode("utf-8"))
if stderr:
sys.stderr.write("\nService Stderr:\n" + stderr.decode("utf-8"))
return super().tearDownClass()
def test_info(self):
r = requests.get(_url("/info"))
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(r.content)
_ = json.loads(r.text)
def test_events(self):
r = requests.get(_url("/events?stream=false"))
self.assertEqual(r.status_code, 200, r.text)
self.assertIsNotNone(r.content)
for line in r.text.splitlines():
obj = json.loads(line)
# Actor.ID is uppercase for compatibility
_ = obj["Actor"]["ID"]
def test_containers(self):
r = requests.get(_url("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.text)
self.assertEqual(len(obj), 0)
def test_containers_all(self):
r = requests.get(_url("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
validateObjectFields(r.text)
def test_inspect_container(self):
r = requests.get(_url(ctnr("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
obj = validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_stats(self):
r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
validateObjectFields(r.text)
def test_delete_containers(self):
r = requests.delete(_url(ctnr("/containers/{}")))
self.assertEqual(r.status_code, 204, r.text)
def test_stop_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start_containers(self):
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertIsNone(r.text)
def test_attach_containers(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(_url(ctnr("/containers/{}/attach")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs_containers(self):
r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
# TODO Need to support Docker-py order of network/container creates
def test_post_create_compat_connect(self):
"""Create network and container then connect to network"""
net_default = requests.post(
PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestDefaultNetwork"}
)
self.assertEqual(net_default.status_code, 201, net_default.text)
create = requests.post(
PODMAN_URL + "/v1.40/containers/create?name=postCreate",
json={
"Cmd": ["top"],
"Image": "alpine:latest",
"NetworkDisabled": False,
# FIXME adding these 2 lines cause: (This is sampled from docker-py)
# "network already exists","message":"container
# 01306e499df5441560d70071a54342611e422a94de20865add50a9565fd79fb9 is already connected to CNI
# network \"TestDefaultNetwork\": network already exists"
# "HostConfig": {"NetworkMode": "TestDefaultNetwork"},
# "NetworkingConfig": {"EndpointsConfig": {"TestDefaultNetwork": None}},
# FIXME These two lines cause:
# CNI network \"TestNetwork\" not found","message":"error configuring network namespace for container
# 369ddfa7d3211ebf1fbd5ddbff91bd33fa948858cea2985c133d6b6507546dff: CNI network \"TestNetwork\" not
# found"
# "HostConfig": {"NetworkMode": "TestNetwork"},
# "NetworkingConfig": {"EndpointsConfig": {"TestNetwork": None}},
# FIXME no networking defined cause: (note this error is from the container inspect below)
# "internal libpod error","message":"network inspection mismatch: asked to join 2 CNI network(s) [
# TestDefaultNetwork podman], but have information on 1 network(s): internal libpod error"
},
)
self.assertEqual(create.status_code, 201, create.text)
payload = json.loads(create.text)
self.assertIsNotNone(payload["Id"])
start = requests.post(PODMAN_URL + f"/v1.40/containers/{payload['Id']}/start")
self.assertEqual(start.status_code, 204, start.text)
connect = requests.post(
PODMAN_URL + "/v1.40/networks/TestDefaultNetwork/connect",
json={"Container": payload["Id"]},
)
self.assertEqual(connect.status_code, 200, connect.text)
self.assertEqual(connect.text, "OK\n")
inspect = requests.get(f"{PODMAN_URL}/v1.40/containers/{payload['Id']}/json")
self.assertEqual(inspect.status_code, 200, inspect.text)
payload = json.loads(inspect.text)
self.assertFalse(payload["Config"].get("NetworkDisabled", False))
self.assertEqual(
"TestDefaultNetwork",
payload["NetworkSettings"]["Networks"]["TestDefaultNetwork"]["NetworkID"],
)
# TODO restore this to test, when joining multiple networks possible
# self.assertEqual(
# "TestNetwork",
# payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
# )
# TODO Need to support network aliases
# self.assertIn(
# "test_post_create",
# payload["NetworkSettings"]["Networks"]["TestNetwork"]["Aliases"],
# )
def test_post_create_compat(self):
"""Create network and connect container during create"""
net = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"})
self.assertEqual(net.status_code, 201, net.text)
create = requests.post(
PODMAN_URL + "/v1.40/containers/create?name=postCreate",
json={
"Cmd": ["date"],
"Image": "alpine:latest",
"NetworkDisabled": False,
"HostConfig": {"NetworkMode": "TestNetwork"},
},
)
self.assertEqual(create.status_code, 201, create.text)
payload = json.loads(create.text)
self.assertIsNotNone(payload["Id"])
inspect = requests.get(f"{PODMAN_URL}/v1.40/containers/{payload['Id']}/json")
self.assertEqual(inspect.status_code, 200, inspect.text)
payload = json.loads(inspect.text)
self.assertFalse(payload["Config"].get("NetworkDisabled", False))
self.assertEqual(
"TestNetwork",
payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
)
def test_commit(self):
r = requests.post(_url(ctnr("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.content)
self.assertIsInstance(obj, dict)
self.assertIn("Id", obj)
def test_images_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/images/json")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageList
required_keys = (
"Id",
"ParentId",
"RepoTags",
"RepoDigests",
"Created",
"Size",
"SharedSize",
"VirtualSize",
"Labels",
"Containers",
)
objs = json.loads(r.content)
self.assertIn(type(objs), (list,))
for o in objs:
self.assertIsInstance(o, dict)
for k in required_keys:
self.assertIn(k, o)
def test_inspect_image_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/images/alpine/json")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageInspect
required_keys = (
"Id",
"Parent",
"Comment",
"Created",
"Container",
"DockerVersion",
"Author",
"Architecture",
"Os",
"Size",
"VirtualSize",
"GraphDriver",
"RootFS",
"Metadata",
)
obj = json.loads(r.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
_ = parse(obj["Created"])
def test_delete_image_compat(self):
r = requests.delete(PODMAN_URL + "/v1.40/images/alpine?force=true")
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.content)
self.assertIn(type(obj), (list,))
def test_pull(self):
r = requests.post(_url("/images/pull?reference=alpine"), timeout=15)
self.assertEqual(r.status_code, 200, r.status_code)
text = r.text
keys = {
"error": False,
"id": False,
"images": False,
"stream": False,
}
# Read and record stanza's from pull
for line in str.splitlines(text):
obj = json.loads(line)
key_list = list(obj.keys())
for k in key_list:
keys[k] = True
self.assertFalse(keys["error"], "Expected no errors")
self.assertTrue(keys["id"], "Expected to find id stanza")
self.assertTrue(keys["images"], "Expected to find images stanza")
self.assertTrue(keys["stream"], "Expected to find stream progress stanza's")
def test_search_compat(self):
# Had issues with this test hanging when repositories not happy
def do_search():
r = requests.get(PODMAN_URL + "/v1.40/images/search?term=alpine", timeout=5)
self.assertEqual(r.status_code, 200, r.text)
objs = json.loads(r.text)
self.assertIn(type(objs), (list,))
search = Process(target=do_search)
search.start()
search.join(timeout=10)
self.assertFalse(search.is_alive(), "/images/search took too long")
def test_ping(self):
r = requests.get(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.head(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
def test_history_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/images/alpine/history")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageHistory
required_keys = ("Id", "Created", "CreatedBy", "Tags", "Size", "Comment")
objs = json.loads(r.content)
self.assertIn(type(objs), (list,))
for o in objs:
self.assertIsInstance(o, dict)
for k in required_keys:
self.assertIn(k, o)
def test_network_compat(self):
name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
# Cannot test for 0 existing networks because default "podman" network always exists
create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": name})
self.assertEqual(create.status_code, 201, create.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
self.assertIn("Id", obj)
ident = obj["Id"]
self.assertNotEqual(name, ident)
ls = requests.get(PODMAN_URL + "/v1.40/networks")
self.assertEqual(ls.status_code, 200, ls.content)
objs = json.loads(ls.content)
self.assertIn(type(objs), (list,))
found = False
for network in objs:
if network["Name"] == name:
found = True
self.assertTrue(found, f"Network {name} not found")
inspect = requests.get(PODMAN_URL + f"/v1.40/networks/{ident}")
self.assertEqual(inspect.status_code, 200, inspect.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
inspect = requests.delete(PODMAN_URL + f"/v1.40/networks/{ident}")
self.assertEqual(inspect.status_code, 204, inspect.content)
inspect = requests.get(PODMAN_URL + f"/v1.40/networks/{ident}")
self.assertEqual(inspect.status_code, 404, inspect.content)
prune = requests.post(PODMAN_URL + "/v1.40/networks/prune")
self.assertEqual(prune.status_code, 405, prune.content)
def test_volumes_compat(self):
name = "Volume_" + "".join(random.choice(string.ascii_letters) for i in range(10))
ls = requests.get(PODMAN_URL + "/v1.40/volumes")
self.assertEqual(ls.status_code, 200, ls.content)
# See https://docs.docker.com/engine/api/v1.40/#operation/VolumeList
required_keys = (
"Volumes",
"Warnings",
)
obj = json.loads(ls.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
create = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name})
self.assertEqual(create.status_code, 201, create.content)
# See https://docs.docker.com/engine/api/v1.40/#operation/VolumeCreate
# and https://docs.docker.com/engine/api/v1.40/#operation/VolumeInspect
required_keys = (
"Name",
"Driver",
"Mountpoint",
"Labels",
"Scope",
"Options",
)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
self.assertEqual(obj["Name"], name)
inspect = requests.get(PODMAN_URL + f"/v1.40/volumes/{name}")
self.assertEqual(inspect.status_code, 200, inspect.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
rm = requests.delete(PODMAN_URL + f"/v1.40/volumes/{name}")
self.assertEqual(rm.status_code, 204, rm.content)
prune = requests.post(PODMAN_URL + "/v1.40/volumes/prune")
self.assertEqual(prune.status_code, 200, prune.content)
if __name__ == "__main__":
unittest.main()
|
thread_pool.py
|
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""
A simple thread pool implementation
"""
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import threading
import queue
import multiprocessing
import traceback
from time import time
from splunktalib.common import log
class ThreadPool(object):
"""
A simple thread pool implementation
"""
_high_watermark = 0.2
_resize_window = 10
def __init__(self, min_size=1, max_size=128, task_queue_size=1024, daemon=True):
assert task_queue_size
if not min_size or min_size <= 0:
min_size = multiprocessing.cpu_count()
if not max_size or max_size <= 0:
max_size = multiprocessing.cpu_count() * 8
self._min_size = min_size
self._max_size = max_size
self._daemon = daemon
self._work_queue = queue.Queue(task_queue_size)
self._thrs = []
for _ in range(min_size):
thr = threading.Thread(target=self._run)
self._thrs.append(thr)
self._admin_queue = queue.Queue()
self._admin_thr = threading.Thread(target=self._do_admin)
self._last_resize_time = time()
self._last_size = min_size
self._lock = threading.Lock()
self._occupied_threads = 0
self._count_lock = threading.Lock()
self._started = False
def start(self):
"""
Start threads in the pool
"""
with self._lock:
if self._started:
return
self._started = True
for thr in self._thrs:
thr.daemon = self._daemon
thr.start()
self._admin_thr.start()
log.logger.info("ThreadPool started.")
def tear_down(self):
"""
Tear down thread pool
"""
with self._lock:
if not self._started:
return
self._started = False
for thr in self._thrs:
self._work_queue.put(None, block=True)
self._admin_queue.put(None)
if not self._daemon:
log.logger.info("Wait for threads to stop.")
for thr in self._thrs:
thr.join()
self._admin_thr.join()
log.logger.info("ThreadPool stopped.")
def enqueue_funcs(self, funcs, block=True):
"""
run jobs in a fire and forget way, no result will be handled
over to clients
:param funcs: tuple/list-like or generator like object, func shall be
callable
"""
if not self._started:
log.logger.info("ThreadPool has already stopped.")
return
for func in funcs:
self._work_queue.put(func, block)
def apply_async(self, func, args=(), kwargs=None, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:callback: when func is done and without exception, call the callback
:return AsyncResult, clients can poll or wait the result through it
"""
if not self._started:
log.logger.info("ThreadPool has already stopped.")
return None
res = AsyncResult(func, args, kwargs, callback)
self._work_queue.put(res)
return res
def apply(self, func, args=(), kwargs=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
if not self._started:
log.logger.info("ThreadPool has already stopped.")
return None
res = self.apply_async(func, args, kwargs)
return res.get()
def size(self):
return self._last_size
def resize(self, new_size):
"""
Resize the pool size, spawn or destroy threads if necessary
"""
if new_size <= 0:
return
if self._lock.locked() or not self._started:
log.logger.info(
"Try to resize thread pool during the tear " "down process, do nothing"
)
return
with self._lock:
self._remove_exited_threads_with_lock()
size = self._last_size
self._last_size = new_size
if new_size > size:
for _ in range(new_size - size):
thr = threading.Thread(target=self._run)
thr.daemon = self._daemon
thr.start()
self._thrs.append(thr)
elif new_size < size:
for _ in range(size - new_size):
self._work_queue.put(None)
log.logger.info("Finished ThreadPool resizing. New size=%d", new_size)
def _remove_exited_threads_with_lock(self):
"""
Join the exited threads last time when resize was called
"""
joined_thrs = set()
for thr in self._thrs:
if not thr.is_alive():
try:
if not thr.daemon:
thr.join(timeout=0.5)
joined_thrs.add(thr.ident)
except RuntimeError:
pass
if joined_thrs:
live_thrs = []
for thr in self._thrs:
if thr.ident not in joined_thrs:
live_thrs.append(thr)
self._thrs = live_thrs
def _do_resize_according_to_loads(self):
if (
self._last_resize_time
and time() - self._last_resize_time < self._resize_window
):
return
thr_size = self._last_size
free_thrs = thr_size - self._occupied_threads
work_size = self._work_queue.qsize()
log.logger.debug(
"current_thr_size=%s, free_thrs=%s, work_size=%s",
thr_size,
free_thrs,
work_size,
)
if work_size and work_size > free_thrs:
if thr_size < self._max_size:
thr_size = min(thr_size * 2, self._max_size)
self.resize(thr_size)
elif free_thrs > 0:
free = free_thrs * 1.0
if free / thr_size >= self._high_watermark and free_thrs >= 2:
# 20 % thrs are idle, tear down half of the idle ones
thr_size = thr_size - int(free_thrs // 2)
if thr_size > self._min_size:
self.resize(thr_size)
self._last_resize_time = time()
def _do_admin(self):
admin_q = self._admin_queue
resize_win = self._resize_window
while 1:
try:
wakup = admin_q.get(timeout=resize_win + 1)
except queue.Empty:
self._do_resize_according_to_loads()
continue
if wakup is None:
break
else:
self._do_resize_according_to_loads()
log.logger.info(
"ThreadPool admin thread=%s stopped.", threading.current_thread().getName()
)
def _run(self):
"""
Threads callback func, run forever to handle jobs from the job queue
"""
work_queue = self._work_queue
count_lock = self._count_lock
while 1:
log.logger.debug("Going to get job")
func = work_queue.get()
if func is None:
break
if not self._started:
break
log.logger.debug("Going to exec job")
with count_lock:
self._occupied_threads += 1
try:
func()
except Exception:
log.logger.error(traceback.format_exc())
with count_lock:
self._occupied_threads -= 1
log.logger.debug("Done with exec job")
log.logger.info("Thread work_queue_size=%d", work_queue.qsize())
log.logger.debug(
"Worker thread %s stopped.", threading.current_thread().getName()
)
class AsyncResult(object):
def __init__(self, func, args, kwargs, callback):
self._func = func
self._args = args
self._kwargs = kwargs
self._callback = callback
self._q = queue.Queue()
def __call__(self):
try:
if self._args and self._kwargs:
res = self._func(*self._args, **self._kwargs)
elif self._args:
res = self._func(*self._args)
elif self._kwargs:
res = self._func(**self._kwargs)
else:
res = self._func()
except Exception as e:
self._q.put(e)
return
else:
self._q.put(res)
if self._callback is not None:
self._callback()
def get(self, timeout=None):
"""
Return the result when it arrives. If timeout is not None and the
result does not arrive within timeout seconds then
multiprocessing.TimeoutError is raised. If the remote call raised an
exception then that exception will be reraised by get().
"""
try:
res = self._q.get(timeout=timeout)
except queue.Empty:
raise multiprocessing.TimeoutError("Timed out")
if isinstance(res, Exception):
raise res
return res
def wait(self, timeout=None):
"""
Wait until the result is available or until timeout seconds pass.
"""
try:
res = self._q.get(timeout=timeout)
except queue.Empty:
pass
else:
self._q.put(res)
def ready(self):
"""
Return whether the call has completed.
"""
return len(self._q)
def successful(self):
"""
Return whether the call completed without raising an exception.
Will raise AssertionError if the result is not ready.
"""
if not self.ready():
raise AssertionError("Function is not ready")
res = self._q.get()
self._q.put(res)
if isinstance(res, Exception):
return False
return True
|
start.py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import logging
import setproctitle
import bigchaindb
from bigchaindb.lib import BigchainDB
from bigchaindb.core import App
from bigchaindb.web import server, websocket_server
from bigchaindb import event_stream
from bigchaindb.events import Exchange, EventTypes
from bigchaindb.utils import Process
logger = logging.getLogger(__name__)
BANNER = """
****************************************************************************
* *
* ┏┓ ╻┏━╸┏━╸╻ ╻┏━┓╻┏┓╻╺┳┓┏┓ ┏━┓ ┏━┓ ╺┳┓┏━╸╻ ╻ *
* ┣┻┓┃┃╺┓┃ ┣━┫┣━┫┃┃┗┫ ┃┃┣┻┓ ┏━┛ ┃┃┃ ┃┃┣╸ ┃┏┛ *
* ┗━┛╹┗━┛┗━╸╹ ╹╹ ╹╹╹ ╹╺┻┛┗━┛ ┗━╸╹┗━┛╹╺┻┛┗━╸┗┛ *
* codename "fluffy cat" *
* Initialization complete. BigchainDB Server is ready and waiting. *
* *
* You can send HTTP requests via the HTTP API documented in the *
* BigchainDB Server docs at: *
* https://bigchaindb.com/http-api *
* *
* Listening to client connections on: {:<15} *
* *
****************************************************************************
"""
def start():
# Exchange object for event stream api
logger.info('Starting BigchainDB')
exchange = Exchange()
# start the web api
app_server = server.create_server(
settings=bigchaindb.config['server'],
log_config=bigchaindb.config['log'],
bigchaindb_factory=BigchainDB)
p_webapi = Process(name='bigchaindb_webapi', target=app_server.run, daemon=True)
p_webapi.start()
# start message
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
# start websocket server
p_websocket_server = Process(name='bigchaindb_ws',
target=websocket_server.start,
daemon=True,
args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
p_websocket_server.start()
# connect to tendermint event stream
p_websocket_client = Process(name='bigchaindb_ws_to_tendermint',
target=event_stream.start,
daemon=True,
args=(exchange.get_publisher_queue(),))
p_websocket_client.start()
p_exchange = Process(name='bigchaindb_exchange', target=exchange.run, daemon=True)
p_exchange.start()
# We need to import this after spawning the web server
# because import ABCIServer will monkeypatch all sockets
# for gevent.
from abci import ABCIServer
setproctitle.setproctitle('bigchaindb')
# Start the ABCIServer
app = ABCIServer(app=App())
app.run()
if __name__ == '__main__':
start()
|
main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import json
import telegram.ext
import telegram
import sys
import datetime
import os
import logging
import threading
# reload(sys)
# sys.setdefaultencoding('utf8')
Version_Code = 'v1.0.0'
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
PATH = os.path.dirname(os.path.realpath(__file__)) + '/'
CONFIG = json.loads(open(PATH + 'config.json', 'r').read())
DATA_LOCK = False
submission_list = json.loads(open(PATH + 'data.json', 'r').read())
def save_data():
global DATA_LOCK
while DATA_LOCK:
time.sleep(0.05)
DATA_LOCK = True
f = open(PATH + 'data.json', 'w')
f.write(json.dumps(submission_list, ensure_ascii=False))
f.close()
DATA_LOCK = False
def save_config():
f = open(PATH + 'config.json', 'w')
f.write(json.dumps(CONFIG, indent=4))
f.close()
updater = telegram.ext.Updater(token=CONFIG['Token'])
dispatcher = updater.dispatcher
me = updater.bot.get_me()
CONFIG['ID'] = me.id
CONFIG['Username'] = '@' + me.username
print('Starting... (ID: ' + str(CONFIG['ID']) + ', Username: ' \
+ CONFIG['Username'] + ')')
def process_msg(bot, update):
if update.channel_post != None:
return
if update.message.chat_id == CONFIG['Group_ID'] \
and update.message.reply_to_message != None:
if update.message.reply_to_message.from_user.id == CONFIG['ID'] \
and (update.message.reply_to_message.forward_from != None
or update.message.reply_to_message.forward_from_chat
!= None):
msg = update.message.reply_to_message
global submission_list
if submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] == True:
return
if submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['type'] == 'real':
post = real_name_post(bot, msg,
update.message.from_user)
elif submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['type'] \
== 'anonymous':
post = anonymous_post(bot, msg,
update.message.from_user)
if update.message.text != None:
bot.send_message(chat_id=CONFIG['Publish_Channel_ID'],
text=update.message.text,
reply_to_message_id=post.message_id)
return
if update.message.from_user.id == update.message.chat_id:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real'),
telegram.InlineKeyboardButton("否",
callback_data='submission_type:anonymous')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
if update.message.forward_from != None \
or update.message.forward_from_chat != None:
if update.message.forward_from_chat != None:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
elif update.message.forward_from.id \
!= update.message.from_user.id:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
bot.send_message(chat_id=update.message.chat_id,
text="即将完成投稿...\n您是否想要保留消息来源(保留消息发送者用户名)",
reply_to_message_id=update.message.message_id,
reply_markup=markup)
def process_command(bot, update):
if update.channel_post != None:
return
command = update.message.text[1:].replace(CONFIG['Username'], ''
).lower()
if command == 'start':
bot.send_message(chat_id=update.message.chat_id,
text="""可接收的投稿类型:
文字
图片
音频/语音
视频
文件""")
return
if command == 'version':
bot.send_message(chat_id=update.message.chat_id,
text='Telegram Submission Bot\n'
+ Version_Code
+ '\nhttps://github.com/Netrvin/telegram-submission-bot'
)
return
if update.message.from_user.id == CONFIG['Admin']:
if command == 'setgroup':
CONFIG['Group_ID'] = update.message.chat_id
save_config()
bot.send_message(chat_id=update.message.chat_id,
text="已设置本群为审稿群")
return
def anonymous_post(bot, msg, editor):
if msg.audio != None:
r = bot.send_audio(chat_id=CONFIG['Publish_Channel_ID'],
audio=msg.audio, caption=msg.caption)
elif msg.document != None:
r = bot.send_document(chat_id=CONFIG['Publish_Channel_ID'],
document=msg.document,
caption=msg.caption)
elif msg.voice != None:
r = bot.send_voice(chat_id=CONFIG['Publish_Channel_ID'],
voice=msg.voice, caption=msg.caption)
elif msg.video != None:
r = bot.send_video(chat_id=CONFIG['Publish_Channel_ID'],
video=msg.video, caption=msg.caption)
elif msg.photo:
r = bot.send_photo(chat_id=CONFIG['Publish_Channel_ID'],
photo=msg.photo[0], caption=msg.caption)
else:
r = bot.send_message(chat_id=CONFIG['Publish_Channel_ID'],
text=msg.text_markdown,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] = True
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + editor.name
+ '](tg://user?id=' + str(editor.id)
+ ")\n已采用", chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text="您的稿件已过审,感谢您对我们的支持",
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
threading.Thread(target=save_data).start()
return r
def real_name_post(bot, msg, editor):
global submission_list
r = bot.forward_message(chat_id=CONFIG['Publish_Channel_ID'],
from_chat_id=CONFIG['Group_ID'],
message_id=msg.message_id)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] = True
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + editor.name
+ '](tg://user?id=' + str(editor.id)
+ ")\n已采用", chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text="您的稿件已过审,感谢您对我们的支持",
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
threading.Thread(target=save_data).start()
return r
def process_callback(bot, update):
if update.channel_post != None:
return
global submission_list
query = update.callback_query
if query.message.chat_id == CONFIG['Group_ID'] and query.data \
== 'receive:real':
real_name_post(bot, query.message.reply_to_message,
query.from_user)
return
if query.message.chat_id == CONFIG['Group_ID'] and query.data \
== 'receive:anonymous':
anonymous_post(bot, query.message.reply_to_message,
query.from_user)
return
if query.data == 'cancel:submission':
bot.edit_message_text(text="已取消投稿",
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return
msg = "新投稿\n投稿人: [" + query.message.reply_to_message.from_user.name \
+ '](tg://user?id=' \
+ str(query.message.reply_to_message.from_user.id) + ")\n来源: "
fwd_msg = bot.forward_message(chat_id=CONFIG['Group_ID'],
from_chat_id=query.message.chat_id,
message_id=query.message.reply_to_message.message_id)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)] = {}
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['posted'] = False
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Sender_Name'] = \
query.message.reply_to_message.from_user.name
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Sender_ID'] = \
query.message.reply_to_message.from_user.id
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Original_MsgID'] = \
query.message.reply_to_message.message_id
if query.data == 'submission_type:real':
msg += "保留"
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['type'] = 'real'
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("采用"
, callback_data='receive:real')]])
markup_msg = bot.send_message(chat_id=CONFIG['Group_ID'],
text=msg, reply_to_message_id=fwd_msg.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Markup_ID'] = \
markup_msg.message_id
elif query.data == 'submission_type:anonymous':
msg += "匿名"
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['type'] = 'anonymous'
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("采用"
, callback_data='receive:anonymous')]])
markup_msg = bot.send_message(chat_id=CONFIG['Group_ID'],
text=msg, reply_to_message_id=fwd_msg.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Markup_ID'] = \
markup_msg.message_id
bot.edit_message_text(text="感谢您的投稿", chat_id=query.message.chat_id,
message_id=query.message.message_id)
threading.Thread(target=save_data).start()
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.text
| telegram.ext.Filters.audio
| telegram.ext.Filters.photo
| telegram.ext.Filters.video
| telegram.ext.Filters.voice
| telegram.ext.Filters.document, process_msg))
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.command,
process_command))
dispatcher.add_handler(telegram.ext.CallbackQueryHandler(process_callback))
updater.start_polling()
print('Started')
updater.idle()
print('Stopping...')
save_data()
print('Data saved.')
print('Stopped.')
|
util.py
|
import webbrowser
from threading import Thread
from time import sleep
import os
def open_browser_tab(url):
def _open_tab():
sleep(1)
webbrowser.open_new_tab(url)
thread = Thread(target=_open_tab)
thread.daemon = True
thread.start()
def list_pdf(path):
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
if os.path.splitext(filename)[1].lower() == '.pdf' and filename[0] != '.':
yield os.path.join(dirpath, filename)
|
web_app_mapper.py
|
import Queue
import threading
import os
import urllib2
threads = 10
target = "http://xperblueray.com"
directory = "/usr/blueray/Downloads"
filters = [".jpg",".gif","png",".css"]
os.chdir(directory)
web_paths = Queue.Queue()
for r,d,f in os.walk("."):
for files in f:
remote_path = "%s/%s" % (r,files)
if remote_path.startswith("."):
remote_path = remote_path[1:]
if os.path.splitext(files)[1] not in filters:
web_paths.put(remote_path)
def test_remote():
while not web_paths.empty():
path = web_paths.get()
url = "%s%s" % (target, path)
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
content = response.read()
print "[%d] => %s" % (response.code,path)
response.close()
except urllib2.HTTPError as error:
pass
for i in range(threads):
print "Spawning thread: %d" % i
t = threading.Thread(target=test_remote)
t.start()
|
server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
import os
import ctypes
import socket
import select
import struct
import logging
import threading
import multiprocessing
import time
import errno
import tvm._ffi
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
from tvm.runtime.module import load_module as _load_module
from tvm.contrib import utils
from tvm.contrib.popen_pool import PopenWorker
from . import _ffi_api
from . import base
# pylint: disable=unused-import
from . import testing
from .base import TrackerCode
logger = logging.getLogger("RPCServer")
def _server_env(load_library, work_path=None):
"""Server environment function return temp dir"""
if work_path:
temp = work_path
else:
temp = utils.tempdir()
# pylint: disable=unused-variable
@tvm._ffi.register_func("tvm.rpc.server.workpath", override=True)
def get_workpath(path):
return temp.relpath(path)
@tvm._ffi.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
@tvm._ffi.register_func("tvm.rpc.server.download_linked_module", override=True)
def download_linked_module(file_name):
"""Load module from remote side."""
# c++ compiler/linker
cc = os.environ.get("CXX", "g++")
# pylint: disable=import-outside-toplevel
path = temp.relpath(file_name)
if path.endswith(".o"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc
_cc.create_shared(path + ".so", path, cc=cc)
path += ".so"
elif path.endswith(".tar"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc, tar as _tar
tar_temp = utils.tempdir(custom_path=path.replace(".tar", ""))
_tar.untar(path, tar_temp.temp_dir)
files = [tar_temp.relpath(x) for x in tar_temp.listdir()]
_cc.create_shared(path + ".so", files, cc=cc)
path += ".so"
elif path.endswith(".dylib") or path.endswith(".so"):
pass
else:
raise RuntimeError("Do not know how to link %s" % file_name)
logger.info("Send linked module %s to client", path)
return bytearray(open(path, "rb").read())
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, work_path=None):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library, work_path)
_ffi_api.ServerLoop(sockfd)
if not work_path:
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(
tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr]
)
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key": "server:" + rpc_key, "addr": (custom_addr, port)}
base.sendjson(tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
work_path = utils.tempdir()
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(
target=_serve_loop, args=(conn, addr, load_library, work_path)
)
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
# pylint: disable=import-outside-toplevel
import psutil
parent = psutil.Process(server_proc.pid)
# terminate worker children
for child in parent.children(recursive=True):
child.terminate()
# terminate the worker
server_proc.terminate()
work_path.remove()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
if magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(target=_serve_loop, args=(sock, addr, load_library))
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
class PopenRPCServerState(object):
"""Internal PopenRPCServer State"""
current = None
def __init__(
self,
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
):
# start update
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
if silent:
logger.setLevel(logging.ERROR)
if not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.thread = threading.Thread(
target=_listen_loop,
args=(self.sock, self.port, key, tracker_addr, load_library, self.custom_addr),
)
self.thread.start()
else:
self.thread = threading.Thread(
target=_connect_proxy_loop, args=((host, port), key, load_library)
)
self.thread.start()
def _popen_start_rpc_server(
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
):
if no_fork:
multiprocessing.set_start_method("spawn")
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenRPCServerState(
host, port, port_end, is_proxy, tracker_addr, key, load_library, custom_addr, silent
)
PopenRPCServerState.current = state
# returns the port so that the main can get the port number.
return state.port
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
no_fork: bool, optional
Whether forbid fork in multiprocessing.
"""
def __init__(
self,
host="0.0.0.0",
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
):
try:
if _ffi_api.ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_rpc_server,
[
host,
port,
port_end,
is_proxy,
tracker_addr,
key,
load_library,
custom_addr,
silent,
no_fork,
],
)
# receive the port
self.port = self.proc.recv()
self.host = host
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.kill()
self.proc = None
def __del__(self):
self.terminate()
|
tk_zzc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/6/22 0022'
"""
import psutil
from tkinter import messagebox
from time import sleep
import threading
import tkinter as tk
import os
from queue import Queue
from tkinter import *
from tkinter import scrolledtext
from tkinter import messagebox
from tkinter.filedialog import askdirectory
class MainPage(object):
def __init__(self, master):
self.window = master
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 650
x = (sw - ww) / 2
y = (sh - wh) / 2
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.domainVar = tk.StringVar()
self.spidermunVar = tk.IntVar()
self.threadnumVar = tk.IntVar()
self.timeVar = tk.IntVar()
self.save_pathVar = tk.StringVar()
self.base_path=os.path.abspath(os.path.dirname(__file__))
self.goon_flag = 0
self.logMessage = Queue()
self.config_queue= Queue()
self.flag_queue = Queue()
self.url_queue = Queue()
self.create_page() # 初始化界面
self.show_logs() # 初始化日志线程
self.init_monitor() # 初始化进度条
def create_page(self):
self.loading() # 进度条
self.config() # 配置
self.log() # 日志
self.meun() # 菜单
def monitor_task(self):
while True:
start_url = self.url_queue.get()
self.url_queue.put(start_url)
print('进度条时间',self.url_queue.qsize())
sleep(0.8)
self.change_schedule(self.url_queue.qsize(),self.spidernum)
self.url_queue.task_done()
def change_schedule(self, now_schedule, all_schedule):
self.canvas.coords(self.fill_rec, (5, 5, 6 + (1- now_schedule / all_schedule) * 325, 25))
self.window.update()
self.loadingVar.set(str(round(now_schedule / all_schedule * 100, 2)) + '%')
def loading(self):
loadingInformation = tk.LabelFrame(self.window, text="爬取任务进度", padx=10, pady=5) # 水平,垂直方向上的边距均为10
loadingInformation.place(x=3, y=20)
frame = Frame(loadingInformation).grid(row=0, column=0) # 使用时将框架根据情况选择新的位置
self.canvas = Canvas(loadingInformation, width=330, height=30, bg="white")
self.canvas.grid(row=0, column=0)
self.loadingVar = StringVar()
# 进度条以及完成程度
self.out_rec = self.canvas.create_rectangle(5, 5, 325, 25, outline="green", width=1)
self.fill_rec = self.canvas.create_rectangle(5, 5, 5, 25, outline="", width=0, fill="green")
tk.Label(loadingInformation, textvariable=self.loadingVar).grid(column=1, row=0, sticky='w', pady=2, padx=2) #
self.loadingVar.set(str(00.00) + '%')
def config(self):
Config = tk.LabelFrame(self.window, text="配置", padx=25, pady=2) # 水平,垂直方向上的边距均为 10
Config.place(x=3, y=100)
tk.Label(Config, text="解析域名:").grid(column=0, row=0, sticky='w', pady=2) #
tk.Label(Config, text="解析次数:").grid(column=0, row=1, sticky='w', pady=2) #
tk.Label(Config, text="爬取线程:").grid(column=0, row=2, sticky='w', pady=2) #
tk.Label(Config, text="爬取频率/s:").grid(column=0, row=3, sticky='w', pady=2) # 添加波特率标签
tk.Label(Config, text="保存路径:").grid(column=0, row=4, sticky='w', pady=2) # 添加波特率标签
with open(os.path.join(self.base_path,'config.ini'),'r+')as f : # 读取配置信息
config =eval(f.read())
self.domainVar.set(config.get('domain','https://www.baidu.com/'))
self.spidermunVar.set(config.get('spidernum',1000))
self.threadnumVar.set(config.get('threadnum',30))
self.timeVar.set(config.get('timenum',0))
self.save_pathVar.set(config.get('path',os.path.join(self.base_path,'title'))) # 自定义配置信息
self.domainEntry = tk.Entry(Config, textvariable=self.domainVar, width=22)
self.domainEntry.grid(column=1, row=0, pady=2)
self.spider_numEntry = tk.Entry(Config, textvariable=self.spidermunVar, width=22)
self.spider_numEntry.grid(column=1, row=1, pady=2)
self.threadEntry = tk.Entry(Config, textvariable=self.threadnumVar, width=22)
self.threadEntry.grid(column=1, row=2, pady=2)
self.timeEntry = tk.Entry(Config, textvariable=self.timeVar, width=22)
self.timeEntry.grid(column=1, row=3, pady=2)
self.pathEntry = tk.Entry(Config, textvariable=self.save_pathVar, width=22)
self.pathEntry.grid(column=1, row=4, pady=2)
self.pathBtn = tk.Button(Config, text="选择路径", command=self.check_path)
self.pathBtn.grid(column=2, row=4, pady=2, ipadx=15, padx=10)
self.config_queue.put(config)
Config_start = tk.LabelFrame(self.window, text="操作", padx=10, pady=2) # 水平,垂直方向上的边距均为 10
Config_start.place(x=3, y=275)
tk.Button(Config_start, text="更新配置", command=self.updata_config).grid(column=0, row=0, pady=2, ipadx=40,
padx=15)
self.startBtn = tk.Button(Config_start, text="开始采集", command=self.start_spider)
self.startBtn.grid(column=0, row=1, pady=2, ipadx=40, padx=22)
self.stopBtn = tk.Button(Config_start, text="暂停采集", command=self.stop_spider)
self.stopBtn.config(state=tk.DISABLED)
self.stopBtn.grid(column=1, row=1, pady=2, ipadx=40, padx=22)
def log(self): # 日志
self.logMessage.put('欢迎使用【新闻网采集器器定制版ByAjay13】')
version_message = " 2019年6月20日 版本:V0.1 \n" \
" - 支持爬虫暂停,继续 \n" \
" - 支持进度可视化 \n" \
" - 支持多线程爬取 "
self.logMessage.put(version_message)
logInformation = tk.LabelFrame(self.window, text="日志", padx=10, pady=10) # 水平,垂直方向上的边距均为10
logInformation.place(x=3, y=380)
self.logInformation_Window = scrolledtext.ScrolledText(logInformation, width=47, height=13, padx=10, pady=10,
wrap=tk.WORD)
self.logInformation_Window.grid()
# 菜单说明
def meun(self):
menubar = tk.Menu(self.window)
aboutmemu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='关于', menu=aboutmemu)
aboutmemu.add_command(label='软件说明', command=self.show_Description)
aboutmemu.add_command(label='版本', command=self.show_Version)
aboutmemu.add_command(label='开发者', command=self.show_Developer)
window.config(menu=menubar)
def show_Description(self):
Description(self.window)
def show_Version(self):
Version(self.window)
def show_Developer(self):
Developer(self.window)
def check_path(self):
path_ = askdirectory()
if path_ is not '':
self.save_pathVar.set(path_)
# 更新配置
def updata_config(self):
config=dict()
self.config_queue.queue.clear()
domain = self.domainEntry.get()
spidernum= self.spider_numEntry.get()
threadnum = self.threadEntry.get()
timenum = self.timeEntry.get()
path = self.pathEntry.get()
if domain=='' or spidernum=='' or threadnum=='' or timenum=='' or path=='':
tk.messagebox.showerror(title='配置', message='配置信息不能为空!')
self.logMessage.put('配置信息不能为空')
else:
config.update({"domain":domain,"spidernum":int(spidernum),"threadnum":int(threadnum),"timenum":int(timenum),"path":path})
with open(os.path.join(self.base_path,'config.ini'),'w')as f:
f.write(str(config))
self.config_queue.put(config)
self.logMessage.put('更新配置:[域名]{};[数量]{};[线程]{};[频率]{};[路径]{};'.format(domain,spidernum,threadnum,timenum,path))
tk.messagebox.showinfo(title='配置', message='配置信息更新成功!')
def log_queue(self):
while True:
log = self.logMessage.get()
self.logInformation_Window.insert(END, '【{log}】'.format( log=log) + '\n')
self.logInformation_Window.see(END)
def show_logs(self):
Tlog_queue = threading.Thread(target=self.log_queue, args=())
Tlog_queue.daemon = True
Tlog_queue.start()
def start_spider(self):
config = self.config_queue.get()
self.spidernum = config.get('spidernum')
self.config_queue.put(config)
self.flag_queue.queue.clear()
self.flag_queue.put(1)
self.startBtn.config(state=tk.DISABLED,text='正在采集',bg='coral',)
self.stopBtn.config(state=tk.NORMAL,text='暂停采集',bg='#F5F5F5',)
from zz_spider import ZZSpider
zzs = ZZSpider()
t = threading.Thread(target=zzs.run, args=(config,self.url_queue,self.flag_queue,self.logMessage,self.startBtn,self.stopBtn,tk))# config,url_queue,flag_queue,logMessage
t.start()
self.logMessage.put('开始采集')
print('下发完成')
def stop_spider(self):
if self.goon_flag==0:
self.flag_queue.queue.clear()
self.flag_queue.put(0)
self.stopBtn.config(state=tk.NORMAL, text='继续采集', bg='coral', )
self.startBtn.config(state=tk.DISABLED, text='暂停采集', bg='#F5F5F5', )
self.goon_flag+=1
self.logMessage.put('暂停采集')
else:
self.flag_queue.queue.clear()
self.flag_queue.put(1)
self.stopBtn.config(state=tk.NORMAL, text='暂停采集', bg='#F5F5F5', )
self.startBtn.config(state=tk.DISABLED, text='正在采集', bg='coral', )
self.goon_flag-=1
self.logMessage.put('继续采集')
# 启动一个线程,记录下url_queue中队列的长度,返回给message信号
def monitor_thread(self):
m = threading.Thread(target=self.monitor_task)
m.setDaemon(True)
m.start()
self.url_queue.join()
def init_monitor(self):
t = threading.Thread(target=self.monitor_thread )
t.start()
# 杀死进程
def kill_pro(name):
for proc in psutil.process_iter():
# print("pid-%d,name:%s" % (proc.pid, proc.name()))
if proc.name().find(name) == 0:
print('杀死进程')
killcmd = 'taskkill /f /im {}'.format(proc.name())
os.system(killcmd)
print(killcmd)
def close_windows():
if messagebox.askyesno(title='关闭程序', message='是否关闭程序?'):
window.destroy()
# 调用杀死进程的方法
print('调用杀死进程的方法')
kill_pro(name='tk_zzc')
# 使用说明界面
class Description():
'''
软件描述说明介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 650
wh = 720
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('使用说明')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于使用说明", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = "【使用前仔细阅读使用说明】 \n\n" \
"使用说明\n" \
"本项目采用多线程爬取引导蜘蛛池,爬取速度快,效率高。\n" \
"**注意事项**\n\n" \
"- 爬取频率:为多久进行一次爬取,默认数值0s,可以根据需求设置,时间间隔太小会封ip\n\n" \
"- 爬取线程: 爬取的线程与电脑的性能有关、一般电脑20个线程,\n电脑性能高可以开50、100个\n\n" \
"- 爬取的路径:爬取路径错误或者路径不设置将会文件将导出到title文件夹下面\n\n" \
"- 关闭程序后结束爬取\n\n" \
" \n" \
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 版本说明界面
class Version():
'''
软件版本说明介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('软件版本')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于版本更新", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 2019年6月 20日 版本:V1.0 \n"
tk.Label(Dev, text=text).grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 开发者说明界面
class Developer():
'''
软件开发者介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('开发者')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于开发者", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 作者:AJay13\n" \
" 技能:熟悉各项爬虫与反爬虫,数据清洗,\n 网站搭建,软件编写\n" \
" 联系:BoeSKh5446sa23sadKJH84ads5\n"
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 检测路径
# 版本测试时间
def test_time(over_time):
from datetime import datetime
d2 = datetime.strptime(over_time, '%Y-%m-%d %H:%M:%S')
now = datetime.now()
if d2 > now:
return True
else:
return False
def create_config_ini():
print('base_path基础路径', basePath)
config_path = os.path.join(basePath, 'config.ini')
if not os.path.exists(config_path):
with open(config_path, 'a+')as f:
f.write(str({'spidernum': '1000', 'timenum': '0', 'path':os.path.join(basePath,'title'), 'threadnum': '30', 'domain': 'https://www.baidu.com/'}))
def create_title_dir():
if not os.path.exists(os.path.join(basePath,'title')):
os.makedirs(os.path.join(basePath,'title'))
if __name__ == '__main__':
window = tk.Tk() # 父容器
print('开始')
window.title("网站泛域名解析定制版v0.1-ByAjay13") # 父容器标题
basePath = os.path.abspath(os.path.dirname(__file__))
if test_time('2020-5-11 16:00:00'): # 测试授权日期
create_config_ini() # 配置文件
create_title_dir() #配置默认路径
MainPage(window)
print('监听')
window.protocol('WM_DELETE_WINDOW', close_windows)
window.mainloop()
else:
window.wm_attributes('-topmost', 1)
sw = window.winfo_screenwidth()
sh = window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
Dev = tk.LabelFrame(window, text="授权超时", padx=10, pady=2) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 你已经超出授权使用期限\n" \
" 请联系管理员进行提权\n \n" \
" 联系:BoeSKh5446sa23sadKJH84ads5\n"
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=2, padx=5) # 添加用户账号
window.mainloop()
|
sawyer_host_RMH.py
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('sawyer_rr_bridge')
import rospy
import intera_interface
from std_msgs.msg import Empty
import sys, argparse
import struct
import time
import RobotRaconteur as RR
import thread
import threading
import numpy
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from intera_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
from sawyer_pykdl import sawyer_kinematics
sawyer_servicedef="""
#Service to provide simple interface to Sawyer
service SawyerJoint_Interface
option version 0.4
object Sawyer
property double[] joint_positions
property double[] joint_velocities
property double[] joint_torques
property double[] endeffector_positions
property double[] endeffector_orientations
property double[] endeffector_twists
property double[] endeffector_wrenches
property double[] endeffector_velocity
property double[] pseudoinverse_Jacobian
property double[] Jacobian
property double[] inertia
function void setControlMode(uint8 mode)
function void setJointCommand(string limb, double[] command)
function void setPositionModeSpeed(double speed)
function void readJointPositions()
function void Sawyer_movetoNeutral()
function double[] solveIKfast(double[] positions, double[] quaternions, string limb_choice)
end object
"""
class Sawyer_impl(object):
def __init__(self):
print "Initializing Node"
rospy.init_node('sawyer_jointstates')
print "Enabling Robot"
rs = intera_interface.RobotEnable()
rs.enable()
# self._valid_limb_names = {'left': 'left',
# 'l': 'left',
# 'right': 'right',
# 'r': 'right'}
self._valid_limb_names = {'right': 'right',
'r': 'right'}
# get information from the SDK
# self._left = intera_interface.Limb('left')
self._right = intera_interface.Limb('right')
#self._l_jnames = self._left.joint_names()
self._r_jnames = self._right.joint_names()
self.kin=sawyer_kinematics('right')
# data initializations
self._jointpos = [0]*7
self._jointvel = [0]*7
self._jointtor = [0]*7
self._ee_pos = [0]*3
self._ee_or = [0]*4
self._ee_tw = [0]*6
self._ee_wr = [0]*6
self._ee_vel = [0]*6
self._pijac=[] #numpy.matrix('0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0')
self._jac=[] #numpy.matrix('0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0;0,0,0,0,0,0')
self._inertia_mat=[] #numpy.matrix('0,0,0,0,0,0,0;0,0,0,0,0,0,0;0,0,0,0,0,0,0;0,0,0,0,0,0,0;0,0,0,0,0,0,0;0,0,0,0,0,0,0;0,0,0,0,0,0,0')
#self._l_joint_command = dict(zip(self._l_jnames,[0.0]*7))
self._r_joint_command = dict(zip(self._r_jnames,[0.0]*7))
self.MODE_POSITION = 0;
self.MODE_VELOCITY = 1;
self.MODE_TORQUE = 2;
self._mode = self.MODE_POSITION
self.RMH_delay=.01
# initial joint command is current pose
self.readJointPositions()
#self.setJointCommand('left',self._jointpos[0:7])
self.setJointCommand('right',self._jointpos[0:7])
# Start background threads
self._running = True
self._t_joints = threading.Thread(target=self.jointspace_worker)
self._t_joints.daemon = True
self._t_joints.start()
self._t_effector = threading.Thread(target=self.endeffector_worker)
self._t_effector.daemon = True
self._t_effector.start()
self._t_command = threading.Thread(target=self.command_worker)
self._t_command.daemon = True
self._t_command.start()
def close(self):
self._running = False
self._t_joints.join()
self._t_effector.join()
self._t_command.join()
if (self._mode != self.MODE_POSITION):
self._right.exit_control_mode()
@property
def joint_positions(self):
return self._jointpos
@property
def joint_velocities(self):
return self._jointvel
@property
def joint_torques(self):
return self._jointtor
@property
def endeffector_positions(self):
return self._ee_pos
@property
def endeffector_orientations(self):
return self._ee_or
@property
def endeffector_twists(self):
return self._ee_tw
@property
def endeffector_wrenches(self):
return self._ee_wr
@property
def endeffector_velocity(self):
return self._ee_vel
@property
def pseudoinverse_Jacobian(self):
return self._pijac
@property
def Jacobian(self):
return self._jac
@property
def inertia(self):
return self._inertia_mat
def readJointPositions(self):
#l_angles = self._left.joint_angles()
r_angles = self._right.joint_angles()
#if l_angles:
# for i in xrange(0,len(self._l_jnames)):
# self._jointpos[i] = l_angles[self._l_jnames[i]]
# if r_angles:
# for i in xrange(0,len(self._r_jnames)):
# self._jointpos[i+7] = r_angles[self._r_jnames[i]]
if r_angles:
for i in xrange(0,len(self._r_jnames)):
self._jointpos[i] = r_angles[self._r_jnames[i]]
def readJointVelocities(self):
#l_velocities = self._left.joint_velocities()
r_velocities = self._right.joint_velocities()
#if l_velocities:
# for i in xrange(0,len(self._l_jnames)):
# self._jointvel[i] = l_velocities[self._l_jnames[i]]
# if r_velocities:
# for i in xrange(0,len(self._r_jnames)):
# self._jointvel[i+7] = r_velocities[self._r_jnames[i]]
if r_velocities:
for i in xrange(0,len(self._r_jnames)):
self._jointvel[i] = r_velocities[self._r_jnames[i]]
def readJointTorques(self):
#l_efforts = self._left.joint_efforts()
r_efforts = self._right.joint_efforts()
#if l_efforts:
# for i in xrange(0,len(self._l_jnames)):
# self._jointtor[i] = l_efforts[self._l_jnames[i]]
if r_efforts:
for i in xrange(0,len(self._r_jnames)):
self._jointtor[i] = r_efforts[self._r_jnames[i]]
def readEndEffectorPoses(self):
# l_pose = self._left.endpoint_pose()
# if l_pose:
# self._ee_pos[0] = l_pose['position'].x
# self._ee_pos[1] = l_pose['position'].y
# self._ee_pos[2] = l_pose['position'].z
# self._ee_or[0] = l_pose['orientation'].w
# self._ee_or[1] = l_pose['orientation'].x
# self._ee_or[2] = l_pose['orientation'].y
# self._ee_or[3] = l_pose['orientation'].z
r_pose = self._right.endpoint_pose()
if r_pose:
self._ee_pos[0] = r_pose['position'].x
self._ee_pos[1] = r_pose['position'].y
self._ee_pos[2] = r_pose['position'].z
self._ee_or[0] = r_pose['orientation'].w
self._ee_or[1] = r_pose['orientation'].x
self._ee_or[2] = r_pose['orientation'].y
self._ee_or[3] = r_pose['orientation'].z
def readKDL(self):
temppij=self.kin.jacobian_pseudo_inverse()
tempj=self.kin.jacobian()
tempi=self.kin.inertia()
self._pijac=numpy.array(temppij).flatten()
self._jac=numpy.array(tempj).flatten()
self._inertia_mat=numpy.array(tempi).flatten()
def readEndEffectorTwists(self):
# l_twist = self._left.endpoint_velocity()
# if l_twist:
# self._ee_tw[0] = l_twist['angular'].x
# self._ee_tw[1] = l_twist['angular'].y
# self._ee_tw[2] = l_twist['angular'].z
# self._ee_tw[3] = l_twist['linear'].x
# self._ee_tw[4] = l_twist['linear'].y
# self._ee_tw[5] = l_twist['linear'].z
r_twist = self._right.endpoint_velocity()
if r_twist:
self._ee_tw[0] = r_twist['angular'].x
self._ee_tw[1] = r_twist['angular'].y
self._ee_tw[2] = r_twist['angular'].z
self._ee_tw[3] = r_twist['linear'].x
self._ee_tw[4] = r_twist['linear'].y
self._ee_tw[5] = r_twist['linear'].z
def readEndEffectorWrenches(self):
# l_wrench = self._left.endpoint_effort()
# if l_wrench:
# self._ee_wr[0] = l_wrench['torque'].x
# self._ee_wr[1] = l_wrench['torque'].y
# self._ee_wr[2] = l_wrench['torque'].z
# self._ee_wr[3] = l_wrench['force'].x
# self._ee_wr[4] = l_wrench['force'].y
# self._ee_wr[5] = l_wrench['force'].z
r_wrench = self._right.endpoint_effort()
if r_wrench:
self._ee_wr[0] = r_wrench['torque'].x
self._ee_wr[1] = r_wrench['torque'].y
self._ee_wr[2] = r_wrench['torque'].z
self._ee_wr[3] = r_wrench['force'].x
self._ee_wr[4] = r_wrench['force'].y
self._ee_wr[5] = r_wrench['force'].z
def readEndEffectorVelocity(self):
r_ee_vel = self._right.endpoint_velocity()
if r_pose:
# self._ee_pos[0] = r_pose['position'].x
# self._ee_pos[1] = r_pose['position'].y
# self._ee_pos[2] = r_pose['position'].z
# self._ee_or[0] = r_pose['orientation'].w
# self._ee_or[1] = r_pose['orientation'].x
# self._ee_or[2] = r_pose['orientation'].y
# self._ee_or[3] = r_pose['orientation'].z
self._ee_vel[0] = r_ee_vel['linear'].x
self._ee_vel[1] = r_ee_vel['linear'].y
self._ee_vel[2] = r_ee_vel['linear'].z
self._ee_vel[3] = r_ee_vel['angular'].w
self._ee_vel[4] = r_ee_vel['angular'].x
self._ee_vel[5] = r_ee_vel['angular'].y
def setControlMode(self, mode):
if mode != self.MODE_POSITION and \
mode != self.MODE_VELOCITY and \
mode != self.MODE_TORQUE:
return
if mode == self.MODE_POSITION:
# self._left.exit_control_mode()
self._right.exit_control_mode()
# set command to current joint positions
# self.setJointCommand('left',self._jointpos[0:7])
self.setJointCommand('right',self._jointpos[0:7])
elif mode == self.MODE_VELOCITY:
# set command to zeros
# self.setJointCommand('left',[0]*7)
self.setJointCommand('right',[0]*7)
elif mode == self.MODE_TORQUE:
# set command to zeros
# self.setJointCommand('left',[0]*7)
self.setJointCommand('right',[0]*7)
self._mode = mode
# This function calls RSDK ikFast Service
def solveIKfast(self, positions, quaternions, limb_choice):
ns = "ExternalTools/" + limb_choice + "/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePositionIKRequest()
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
poses = {}
# if (limb_choice == 'left' or limb_choice == 'l'):
# limb_choice = 'left'
# poses = {
# 'left': PoseStamped(
# header=hdr,
# pose=Pose(
# position = Point(
# x = positions[0],
# y = positions[1],
# z = positions[2],
# ),
# orientation = Quaternion(
# x = quaternions[1],
# y = quaternions[2],
# z = quaternions[3],
# w = quaternions[0],
# ),
# ),
# ),
# 'right': PoseStamped(
# header=hdr,
# pose=Pose(
# position = Point(
# x = self._ee_pos[3],
# y = self._ee_pos[4],
# z = self._ee_pos[5],
# ),
# orientation = Quaternion(
# x = self._ee_or[5],
# y = self._ee_or[6],
# z = self._ee_or[7],
# w = self._ee_or[4],
# ),
# ),
# ),
# }
if (limb_choice == 'right' or limb_choice == 'r'):
limb_choice = 'right'
poses = {
'left': PoseStamped(
header=hdr,
pose=Pose(
position = Point(
x = self._ee_pos[0],
y = self._ee_pos[1],
z = self._ee_pos[2],
),
orientation = Quaternion(
x = self._ee_or[1],
y = self._ee_or[2],
z = self._ee_or[3],
w = self._ee_or[0],
),
),
),
'right': PoseStamped(
header=hdr,
pose=Pose(
position = Point(
x = positions[0],
y = positions[1],
z = positions[2],
),
orientation = Quaternion(
x = quaternions[1],
y = quaternions[2],
z = quaternions[3],
w = quaternions[0],
),
),
),
}
else:
print "Not a valid arm"
return
# begin the solvinng process
ikreq.pose_stamp.append(poses[limb_choice])
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
return 1
# Check if result valid, and type of seed ultimately used to get solution
# convert rospy's string representation of uint8[]'s to int's
resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
resp.result_type)
seed_dict = {
ikreq.SEED_USER: 'User Provided Seed',
ikreq.SEED_CURRENT: 'Current Joint Angles',
ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
}
if (resp_seeds[0] != resp.RESULT_INVALID):
seed_str = seed_dict.get(resp_seeds[0], 'None')
print("SUCCESS - Valid Joint Solution Found from Seed Type: %s" %
(seed_str,))
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
print "\nIK Joint Solution:\n", limb_joints
print "------------------"
print "Response Message:\n", resp
# if no valid solution was found
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return resp.joints[0].position
def setJointCommand(self, limb, command):
limb = limb.lower()
if not limb in self._valid_limb_names.keys():
return
# if self._valid_limb_names[limb] == 'left':
# for i in xrange(0,len(self._l_jnames)):
# self._l_joint_command[self._l_jnames[i]] = command[i]
if self._valid_limb_names[limb] == 'right':
for i in xrange(0,len(self._r_jnames)):
self._r_joint_command[self._r_jnames[i]] = command[i]
def setPositionModeSpeed(self, speed):
if speed < 0.0:
speed = 0.0
elif speed > 1.0:
speed = 1.0
# self._left.set_joint_position_speed(speed)
self._right.set_joint_position_speed(speed)
# worker function to request and update joint data for sawyer
# maintain 100 Hz read rate
# TODO: INCORPORATE USER-DEFINED JOINT PUBLISH RATE
def jointspace_worker(self):
while self._running:
t1 = time.time()
self.readJointPositions()
self.readJointVelocities()
self.readJointTorques()
self.readKDL()
while (time.time() - t1 < self.RMH_delay):
# idle
time.sleep(0.001)
# worker function to request and update end effector data for sawyer
# Try to maintain 100 Hz operation
def endeffector_worker(self):
while self._running:
t1 = time.time()
self.readEndEffectorPoses()
self.readEndEffectorTwists()
self.readEndEffectorWrenches()
while (time.time() - t1 < self.RMH_delay):
# idle
time.sleep(0.001)
# worker function to continuously issue commands to sawyer
# Try to maintain 100 Hz operation
# TODO: INCLUDE CLOCK JITTER CORRECTION
def command_worker(self):
while self._running:
t1 = time.time()
if (self._mode == self.MODE_POSITION):
# self._left.set_joint_positions(self._l_joint_command)
self._right.set_joint_positions(self._r_joint_command)
elif (self._mode == self.MODE_VELOCITY):
# self._left.set_joint_velocities(self._l_joint_command)
self._right.set_joint_velocities(self._r_joint_command)
elif (self._mode == self.MODE_TORQUE):
#self._supp_cuff_int_pubs['left'].publish()
#self._supp_cuff_int_pubs['right'].publish()
# self._left.set_joint_torques(self._l_joint_command)
self._right.set_joint_torques(self._r_joint_command)
while (time.time() - t1 < self.RMH_delay):
# idle
#.01
time.sleep(0.001)
def main(argv):
# parse command line arguments
parser = argparse.ArgumentParser(
description='Initialize Joint Controller.')
parser.add_argument('--port', type=int, default = 0,
help='TCP port to host service on' + \
'(will auto-generate if not specified)')
args = parser.parse_args(argv)
#Enable numpy
RR.RobotRaconteurNode.s.UseNumPy=True
#Set the Node name
RR.RobotRaconteurNode.s.NodeName="SawyerRMHServer"
#Initialize object
sawyer_obj = Sawyer_impl()
#Create transport, register it, and start the server
print "Registering Transport"
t = RR.TcpTransport()
t.EnableNodeAnnounce(RR.IPNodeDiscoveryFlags_NODE_LOCAL |
RR.IPNodeDiscoveryFlags_LINK_LOCAL |
RR.IPNodeDiscoveryFlags_SITE_LOCAL)
RR.RobotRaconteurNode.s.RegisterTransport(t)
t.StartServer(args.port)
port = args.port
if (port == 0):
port = t.GetListenPort()
#Register the service type and the service
print "Starting Service"
RR.RobotRaconteurNode.s.RegisterServiceType(sawyer_servicedef)
RR.RobotRaconteurNode.s.RegisterService("Sawyer",
"SawyerJoint_Interface.Sawyer",
sawyer_obj)
print "Service started, connect via"
print "tcp://localhost:" + str(port) + "/SawyerRMHServer/Sawyer"
raw_input("press enter to quit...\r\n")
sawyer_obj.close()
# This must be here to prevent segfault
RR.RobotRaconteurNode.s.Shutdown()
if __name__ == '__main__':
main(sys.argv[1:])
|
runtests.py
|
#!/usr/bin/env python
#
# Copyright (c) 2009, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
import sys
import contextlib
import traceback
import unittest
import time
import os
import subprocess
import errno
import signal
import urllib2
import threading
import Queue
PREFIX = os.environ.get("AFDT_TEST_PREFIX", "").split()
class SubprocessTestCase(unittest.TestCase):
def setUp(self):
def sigchld_handler(signum, frame):
while True:
status = os.waitpid(-1, os.WNOHANG | os.WUNTRACED | os.WCONTINUED)
if status == (0, 0):
break
if os.WIFSTOPPED(status[1]) or os.WIFCONTINUED(status[1]):
# Ignore SIGCHLDs due to stopping and starting a child
continue
raise Exception("child died unexpectedly: %r" % (status,))
signal.signal(signal.SIGCHLD, sigchld_handler)
def killChildren(self, children):
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
for proc in children:
try:
if proc is not None:
os.kill(proc.pid, signal.SIGTERM)
except OSError, err:
if err.errno != errno.ESRCH:
traceback.print_exc()
class EvhttpTest(SubprocessTestCase):
prod_port = 8080
port0 = 9090
port1 = 9191
# Number of requests to send to the production port to verify
# the set of servers that are listening on it.
# This is nondeterministic, but we take our chances.
iterations = 0x10000
def setUp(self):
SubprocessTestCase.setUp(self)
def startserver(port):
return subprocess.Popen(PREFIX +
["./server", "-a", str(port), "-s", "p" + str(port)])
self.proc0 = None
self.proc1 = None
self.proc0 = startserver(self.port0)
self.proc1 = startserver(self.port1)
# TODO(dreiss): Check statuses so we can stop sleeping early
time.sleep(1.0/2)
status = os.waitpid(-1, os.WNOHANG)
assert status == (0, 0)
def tearDown(self):
self.killChildren([self.proc0, self.proc1])
def testServers(self):
def openurl(port, path):
with contextlib.closing(urllib2.urlopen(
"http://localhost:%d/%s" % (port, path))) as handle:
return handle.read()
def checkret(port, path, content):
self.assertEqual(openurl(port, path), content)
def putret(port, path, q):
q.put(openurl(port, path))
def checkset(port, path, expect):
results = set()
iter = 0
while iter < self.iterations:
results.add(openurl(port, path))
self.assert_(results <= expect)
if results == expect:
break
iter += 1
self.assertNotEqual(iter, self.iterations)
# Check basic status responses
checkret(self.port0, "status", "p%d" % self.port0)
checkret(self.port1, "status", "p%d" % self.port1)
# Have one server bind to production
checkret(self.port0, "bind_prod", "bind")
# Verify production
checkret(self.prod_port, "status", "p%d" % self.port0)
# Rebind detection
checkret(self.port0, "bind_prod", "already_open")
# Close production
checkret(self.port0, "close_prod", "closed")
# Verify close production
checkret(self.port0, "close_prod", "no_prod")
# Repeat with the other server
checkret(self.port1, "bind_prod", "bind")
checkret(self.prod_port, "status", "p%d" % self.port1)
checkret(self.port1, "bind_prod", "already_open")
checkret(self.port1, "close_prod", "closed")
checkret(self.port1, "close_prod", "no_prod")
# Have one server bind to production
checkret(self.port0, "bind_prod", "bind")
# Verify production
checkret(self.prod_port, "status", "p%d" % self.port0)
# Have the other server grab the socket
checkret(self.port1, "bind_prod", "afdt")
# Verify that both are listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port0, self.port1]]))
# Close the socket on the original server
checkret(self.port0, "close_prod", "closed")
# Verify that only the second is listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port1]]))
# Have the first server get the socket back
checkret(self.port0, "bind_prod", "afdt")
# Verify that both are listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port0, self.port1]]))
# Close the socket on the second server
checkret(self.port1, "close_prod", "closed")
# Verify that only the first is listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port0]]))
# Close the socket on the first server
checkret(self.port0, "close_prod", "closed")
# Repeat the simple case with the second server
checkret(self.port1, "bind_prod", "bind")
checkret(self.prod_port, "status", "p%d" % self.port1)
checkret(self.port1, "bind_prod", "already_open")
checkret(self.port1, "close_prod", "closed")
checkret(self.port1, "close_prod", "no_prod")
# Have the first server bind to production
checkret(self.port0, "bind_prod", "bind")
# Verify production
checkret(self.prod_port, "status", "p%d" % self.port0)
# Suspend that process
self.proc0.send_signal(signal.SIGSTOP)
# Use a background thread to have the second server grab the socket
q = Queue.Queue()
t = threading.Thread(target=putret, args=(self.port1, "bind_prod", q))
t.start()
# After a half second, we should still be waiting
time.sleep(0.5)
self.assert_(q.empty())
# The second server should still be able to respond to requests
checkret(self.port1, "status", "p%d" % self.port1)
# Let the first server wake up and transfer the socket
self.proc0.send_signal(signal.SIGCONT)
# The second server should receive the socket quickly
self.assertEqual(q.get(timeout=1.0/16), "afdt")
t.join(1.0/16)
self.assertFalse(t.isAlive())
# Close the socket on the first server
checkret(self.port0, "close_prod", "closed")
# Verify that the second is listening
checkret(self.prod_port, "status", "p%d" % self.port1)
# Remove the signal handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Shut both servers down
checkret(self.port0, "shutdown", "shutting_down")
checkret(self.port1, "shutdown", "shutting_down")
# Make sure they both go down in a reasonable time
def sigalrm_handler(signum, frame):
raise Exception("waitpid timed out")
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(1)
self.assertEqual(self.proc0.wait(), 0)
self.assertEqual(self.proc1.wait(), 0)
self.proc0 = None
self.proc1 = None
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
class CatterTest(SubprocessTestCase):
svport = 9090
client = "catter"
def setUp(self):
SubprocessTestCase.setUp(self)
def tearDown(self):
self.killChildren([self.svproc, self.clproc])
def testCatter(self):
self.svproc = None
self.clproc = None
self.svproc = subprocess.Popen(PREFIX +
["./catter", "-s"], stdout=subprocess.PIPE)
time.sleep(1.0/4)
self.clproc = subprocess.Popen(PREFIX +
["./" + self.client], stdin=subprocess.PIPE)
time.sleep(1.0/4)
self.clproc.stdin.write("TEST1")
time.sleep(1.0/4)
# Remove the signal handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Let the server exit
time.sleep(1.0/2)
self.clproc.stdin.write("TEST2")
self.clproc.stdin.close()
time.sleep(1.0/4)
self.assertEqual(self.svproc.stdout.read(), "TEST1TEST2")
# Make sure they both go down in a reasonable time
# TODO(dreiss): Factor out subprocs?
def sigalrm_handler(signum, frame):
raise Exception("waitpid timed out")
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(1)
self.assertEqual(self.svproc.wait(), 0)
self.assertEqual(self.clproc.wait(), 0)
self.svproc = None
self.clproc = None
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
class SyncCatterTest(CatterTest):
client = "sync_catter"
# The evhttp test relies on some really new features of libevent,
# so allow it to be disabled independently.
if os.environ.get("NO_HTTP_TEST", False):
del EvhttpTest
if __name__ == "__main__":
unittest.main()
|
main.py
|
#!/usr/bin/python
import paho.mqtt.client as paho
import psutil
import pywapi
import dweepy
import signal
import sys
import time
import pyupm_grove as grove
from TH02 import *
from flask import Flask
from flask_restful import Api, Resource
from threading import Thread
DeviceID = "90b68610b05b"
#RESTFull support class
app = Flask(__name__)
api = Api(app)
class DataSensorRestApi(Resource):
def get(self):
data = 'Temperature: %d *C '%sensTH.readTemperature()
data = data + 'Humidity: %d%% '%sensTH.readHumidity()
data = data + 'Light: %d Lux'%sensL.value()
return data
#End RESTFull support
sensTH = TH02(1)
sensL = grove.GroveLight(0)
def functionApiWeather():
data = pywapi.get_weather_from_weather_com('MXJO0043', 'metric')
message = data['location']['name']
message = message + ", Temperature " + \
data['current_conditions']['temperature']
message = message + ", Atmospheric Pressure " + \
data['current_conditions']['temperature']
return message
def functionDataActuator(status):
print "Data Actuator Status %s" % status
def functionDataActuatorMqttOnMessage(mosq, obj, msg):
print "Data Sensor Mqtt Subscribe Message!"
functionDataActuator(msg.payload)
def functionDataActuatorMqttSubscribe():
mqttclient = paho.Client()
mqttclient.on_message = functionDataActuatorMqttOnMessage
mqttclient.connect("test.mosquitto.org", 1883, 60)
mqttclient.subscribe("IoT101/"+DeviceID+"/DataActuator", 0)
while mqttclient.loop() == 0:
pass
def functionDataSensor():
# netdata = psutil.net_io_counters()
# data = netdata.packets_sent + netdata.packets_recv
temp = sensTH.readTemperature()
hum = sensTH.readHumidity()
lig = sensL.value()
dweepy.dweet_for('IoT'+DeviceID, {'Temp':str(temp), \
'Hum':str(hum),'Lig':str(lig)})
print dweepy.get_latest_dweet_for('IoT'+DeviceID)
def functionDataSensorMqttOnPublish(mosq, obj, msg):
print "Data Sensor Mqtt Published!"
def functionDataSensorMqttPublish():
mqttclient = paho.Client()
mqttclient.on_publish = functionDataSensorMqttOnPublish
mqttclient.connect("test.mosquitto.org", 1883, 60)
while True:
data = functionDataSensor()
topic = "IoT101/"+DeviceID+"DataSensor"
mqttclient.publish(topic, data)
time.sleep(1)
def functionSignalHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, functionSignalHandler)
threadmqttpublish = Thread(target=functionDataSensorMqttPublish)
threadmqttpublish.start()
threadmqttsubscribe = Thread(target=functionDataActuatorMqttSubscribe)
threadmqttsubscribe.start()
api.add_resource(DataSensorRestApi, '/sensor')
app.run(host='0.0.0.0', debug=True)
while True:
print "Hello Internet of Things 101"
print "Data Sensor: %s " % functionDataSensor()
print "API Weather: %s " % functionApiWeather()
time.sleep(2)
# End of File
|
client.py
|
# built in
import socket
import os.path
from time import sleep
import logging
import zlib
from queue import deque
import threading
from typing import List
# my own
from src.utilities import rsa_utility
from src.utilities import AESCipher
from src.utilities.config_utility import network_configuration_loader
# dependencies
import msgpack
import pyDH
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey.RSA import importKey
logging.basicConfig(level=logging.DEBUG)
# NOTE: to add port forwarding automation
# NOTE: to add 2 FA with email or etc
# NOTE: to check auth using server perment pem
class Client(object):
def __init__(self, user_id: str = "DUMMY"):
self.localhost: str = None
self.port: int = None
self.client_socket: socket.socket = None
self.localhost, self.port = network_configuration_loader()
self.port = int(self.port)
self.user_id = user_id
self.connected = False
self.publicKey = None
self.privateKey = None
self.serverPublicKey = None
self.directory = os.path.dirname(os.path.realpath(__file__))
self.load_keys()
self.decrypyor = PKCS1_OAEP.new(self.privateKey)
self.encryptor = None
self.__aes256key: bytes = ""
self.__internal_deque = deque()
self.__external_deque = deque()
self.my_supported_actions = [""]
self.run_recv_thread = False
self.recv_thread_obj = self.recv_thread_obj = threading.Thread(
target=self.recv_thread, args=[])
self.rec_thread_exit = True
self.run_sending_thread = False
self.sending_thread_obj = threading.Thread(
target=self.sending_thread, args=[])
self.send_thread_exit = True # forcefully close?
def load_keys(self):
"""
load the client keys if created,
if not create and loads
"""
if not os.path.exists('./private.pem') or \
not os.path.exists('./public.pem'):
logging.debug("keys not found so will be created")
rsa_utility.createAndSaveKeys(self.directory)
logging.debug("loading keys")
self.publicKey = rsa_utility.loadKeyFromFile(
f'{self.directory}/public.pem')
self.privateKey = rsa_utility.loadKeyFromFile(
f'{self.directory}/private.pem')
if os.path.exists('./server.pem'):
logging.debug("server key was found and now is loaded")
self.serverPublicKey = rsa_utility.loadKeyFromFile('server.pem')
else:
logging.debug("server key was not found, handshake now being held")
def init_connection(self):
"""
init the client socket
"""
logging.debug("initing connection")
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((self.localhost, self.port))
self.connected = True
def secure_connection(self):
"""
create secure connection to the server that at
the end the communication is based on aes encryption
"""
self.init_connection()
self.handshake()
self.secure_connection_setup()
def handshake(self):
"""
handle rsa exchange with the server
"""
data = {'Action': "EXCHANGE", 'PubKey':
rsa_utility.rsaKeyToBase64String(self.publicKey.exportKey())}
self.client_socket.send(msgpack.dumps(data))
self.serverPublicKey = self.client_socket.recv(4096)
self.serverPublicKey = rsa_utility.rsaKeyFromBase64String(
self.serverPublicKey)
logging.debug(f"server key: {self.serverPublicKey}")
self.encryptor = PKCS1_OAEP.new(importKey(self.serverPublicKey))
def secure_connection_setup(self) -> str:
"""
does rsa keys exchange then does diffie hellman
algorithem to generate keys that will be used for
AES encryption
"""
logging.debug("secure_connection_setup was called")
server_data = self.client_socket.recv(4096)
if server_data in ['', b'']: # client disconnected
return "disconnected" # client disconnected
# from now on the chat is rsa encrypted
# NOTE: should i check for rsa verification?
# start dffie hellman
logging.debug("start diffie hellman")
privateKey = pyDH.DiffieHellman()
bytesPubKey = str(privateKey.gen_public_key()).encode('utf-8')
bytesPubKey = zlib.compress(bytesPubKey)
data_to_send = {'Action': 'DiffieHellman', 'PubKey': bytesPubKey}
data_to_send = msgpack.dumps(data_to_send)
# logging.debug(self.decrypyor.decrypt(server_data))
self.client_socket.send(
self.encryptor.encrypt(data_to_send))
logging.debug("end diffie hellman")
logging.debug(self.decrypyor.decrypt(server_data))
server_data = msgpack.loads(self.decrypyor.decrypt(server_data))
serverPubKey = server_data['PubKey']
serverPubKey = int(zlib.decompress(serverPubKey).decode('utf-8'))
secret = privateKey.gen_shared_key(serverPubKey)
logging.debug(f"aes key is {secret}")
self.__aes256key = secret[:32].encode('utf-8')
return secret[:32] # 256 bit key
def login(self, password) -> bool:
"""
login to server action
"""
# need to go to user in db check if password and hash can verify
logging.debug(f"aes key is {self.__aes256key}")
data = {'Action': 'LOGIN', 'Data': {
"user_id": self.user_id, "password": password}}
self.send(data)
response = self.client_socket.recv(1024)
response = msgpack.loads(response)
if response:
logging.debug("initiating recv thread in client inner")
self.run_recv_thread = True
self.recv_thread_obj.start()
self.run_sending_thread = True
self.sending_thread_obj.start()
return response
def sign_up(self, password: str) -> bool: # this not need thread
"""
handle steps for account creation
"""
data = {'Action': 'SIGN_UP', 'Data': {
"user_id": self.user_id, "password": password}}
self.send(data)
answer = self.client_socket.recv(1024)
if answer:
self.login(password)
return msgpack.loads(answer)
def send(self, data: dict, none_blocking=False):
"""
send data to server encrypted and with header of size
"""
if none_blocking:
self.__internal_deque.append(data)
else:
data = AESCipher.encrypt_data_to_bytes(data, self.__aes256key)
header = Client.send_header(data)
try:
self.client_socket.send(header + data)
logging.debug("sent data to server")
except Exception as e:
logging.debug(f"error in send {e}")
def recv_thread(self):
"""
thread that recives messages
"""
self.rec_thread_exit = False # NOTE: change to recv exit
logging.debug("recv_thread called inner client")
while self.run_recv_thread:
sleep(0.05)
try:
logging.debug("block size recv call")
data_size = self.client_socket.recv(5)
if len(data_size) != 5: # FIXME: if server crashed, wont close
continue
except Exception as e:
logging.debug(f"exception in recv thread {e}")
continue
try:
data_size = int(msgpack.loads(data_size))
except msgpack.exceptions.ExtraData:
continue
except ValueError:
continue
data = self.client_socket.recv(data_size)
logging.debug(f"recv thread got {data}")
# NOTE: move to a separated thread? the decrypt and handling? nah
data = AESCipher.decrypt_data_from_bytes(data, self.__aes256key)
if data["Action"] not in self.my_supported_actions:
ac = data["Action"]
logging.debug(f"unsupported action {ac}")
self.__external_deque.append(data)
else:
self.__internal_deque.append(data)
self.rec_thread_exit = True
logging.debug("exiting recv threading in client inner")
exit(0)
def sending_thread(self):
"""
thread that sends messages
"""
while self.run_sending_thread:
sleep(0.05)
if self.__internal_deque:
data_to_send = self.__internal_deque.popleft()
logging.debug("sending data")
self.send(data_to_send, none_blocking=False)
logging.debug("data sent")
logging.debug("exiting sending thread")
exit(0)
def create_group(self, group_name: str, group_members: List[str],
group_admin: str = "me"):
"""
send to the server task of creating a group,
"""
if group_admin == "me":
group_admin = self.user_id
action_to_send = {'Action': 'CREATE_GROUP', 'Data': {
"members": group_members,
'admin': group_admin,
'group_name': group_name
}}
self.send(action_to_send, none_blocking=True)
def edit_group(self, group_name: str, group_members: List[str],
group_admin: str = "me"):
"""
send to the server task of edit existing group
"""
if group_admin == 'me':
group_admin == self.user_id
action_to_send = {'Action': "EDIT_GROUP", 'Data': {
'members': group_members,
'admin': group_admin,
'origin_name': group_name
}}
self.send(action_to_send, none_blocking=True)
def pass_message(self, target: str, message: str):
"""
send to the server text to pass for
both group and user
"""
data = {'Action': 'PASS_TO', 'Data': {
'target': target, 'text': message}}
self.send(data, none_blocking=True)
def add_member(self, member: str):
"""
send to server request to add member
"""
action_to_send = {'Action': "ADD_MEMBER", "Data": {"user_id":
member}}
self.send(action_to_send, none_blocking=True)
def is_online(self, user_id: str):
"""
ask server for user id and return boolean
of server answer
"""
data = {'Action': 'SEARCH', 'Data': {'user_id': user_id}}
self.send(data, none_blocking=True)
# NOTE: this will be handled in the thread cuz its blocking
""" answer = self.client_socket.recv(4096)
logging.debug(f"asked server if {user_id} is online: {answer}")
answer = AESCipher.decrypt_data_from_bytes(answer, self.__aes256key)
return answer"""
def group_search(self, group_name: str, member: str = "me"):
"""
task the server with searching a group
that contains the current user
"""
if member == 'me':
member = self.user_id
action_to_send = {'Action': 'GROUP_SEARCH',
'Data': {'group_name': group_name,
'member_id': member}}
self.send(action_to_send, none_blocking=True)
def set_username(self, username: str):
# set the username if not logged into the server
self.user_id = username
def set_password(self, password: str):
# set the password, not needed?
pass
def get_username(self) -> str:
return self.user_id
def handle_internal_queue(self):
"""
action that the client socket need to handle
"""
pass
def get_external_queue_task(self):
"""
actions that the gui need to handle
"""
if self.__external_deque:
return self.__external_deque.popleft()
return None
def get_existed_group_data(self, group_name: str):
""" ask the server to get existing group data
the server will answer only if the asking the the admin
"""
request = {'Action': 'GROUP_INFO_REQUEST', 'Data': {
'group_name': group_name}}
self.send(request, none_blocking=True)
def close(self):
"""
close the connection
"""
logging.debug("client inner close call")
if self.connected:
self.run_recv_thread = False
self.run_sending_thread = False
data = {'Action': 'EXIT'}
self.send(data)
while not self.rec_thread_exit:
sleep(0.05)
self.client_socket.close()
def exit_group(self, group_name: str):
"""
ask the server to leave the group
"""
request = {'Action': 'LEAVE_GROUP', 'Data': {'group_name': group_name}}
self.send(request, none_blocking=True)
@staticmethod
def send_header(data: bytes) -> bytes:
"""
return the msg size header
"""
header = str(len(data)).zfill(4)
return msgpack.dumps(header)
if __name__ == '__main__':
a = Client("yoram")
a.secure_connection()
a.login("123")
a.close()
print("end")
|
train.py
|
from dataloader import EvalDataset, TrainDataset, NewBidirectionalOneShotIterator
from dataloader import get_dataset
import argparse
import os
import logging
import time
backend = os.environ.get('DGLBACKEND')
if backend.lower() == 'mxnet':
import multiprocessing as mp
from train_mxnet import load_model
from train_mxnet import train
from train_mxnet import test
else:
import torch.multiprocessing as mp
from train_pytorch import load_model
from train_pytorch import train
from train_pytorch import test
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument('--model_name', default='TransE',
choices=['TransE', 'TransH', 'TransR', 'TransD',
'RESCAL', 'DistMult', 'ComplEx', 'RotatE', 'pRotatE'],
help='model to use')
self.add_argument('--data_path', type=str, default='data',
help='root path of all dataset')
self.add_argument('--dataset', type=str, default='FB15k',
help='dataset name, under data_path')
self.add_argument('--format', type=str, default='1',
help='the format of the dataset.')
self.add_argument('--save_path', type=str, default='ckpts',
help='place to save models and logs')
self.add_argument('--save_emb', type=str, default=None,
help='save the embeddings in the specific location.')
self.add_argument('--max_step', type=int, default=80000,
help='train xx steps')
self.add_argument('--warm_up_step', type=int, default=None,
help='for learning rate decay')
self.add_argument('--batch_size', type=int, default=1024,
help='batch size')
self.add_argument('--batch_size_eval', type=int, default=8,
help='batch size used for eval and test')
self.add_argument('--neg_sample_size', type=int, default=128,
help='negative sampling size')
self.add_argument('--neg_sample_size_valid', type=int, default=1000,
help='negative sampling size for validation')
self.add_argument('--neg_sample_size_test', type=int, default=-1,
help='negative sampling size for testing')
self.add_argument('--hidden_dim', type=int, default=256,
help='hidden dim used by relation and entity')
self.add_argument('--lr', type=float, default=0.0001,
help='learning rate')
self.add_argument('-g', '--gamma', type=float, default=12.0,
help='margin value')
self.add_argument('--eval_percent', type=float, default=1,
help='sample some percentage for evaluation.')
self.add_argument('--gpu', type=int, default=-1,
help='use GPU')
self.add_argument('--mix_cpu_gpu', action='store_true',
help='mix CPU and GPU training')
self.add_argument('-de', '--double_ent', action='store_true',
help='double entitiy dim for complex number')
self.add_argument('-dr', '--double_rel', action='store_true',
help='double relation dim for complex number')
self.add_argument('--seed', type=int, default=0,
help='set random seed fro reproducibility')
self.add_argument('-log', '--log_interval', type=int, default=1000,
help='do evaluation after every x steps')
self.add_argument('--eval_interval', type=int, default=10000,
help='do evaluation after every x steps')
self.add_argument('-adv', '--neg_adversarial_sampling', type=bool, default=True,
help='if use negative adversarial sampling')
self.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
self.add_argument('--valid', type=bool, default=True,
help='if valid a model')
self.add_argument('--test', type=bool, default=True,
help='if test a model')
self.add_argument('-rc', '--regularization_coef', type=float, default=0.000002,
help='set value > 0.0 if regularization is used')
self.add_argument('-rn', '--regularization_norm', type=int, default=3,
help='norm used in regularization')
self.add_argument('--num_worker', type=int, default=16,
help='number of workers used for loading data')
self.add_argument('--non_uni_weight', action='store_true',
help='if use uniform weight when computing loss')
self.add_argument('--init_step', type=int, default=0,
help='DONT SET MANUALLY, used for resume')
self.add_argument('--step', type=int, default=0,
help='DONT SET MANUALLY, track current step')
self.add_argument('--pickle_graph', action='store_true',
help='pickle built graph, building a huge graph is slow.')
self.add_argument('--num_proc', type=int, default=1,
help='number of process used')
self.add_argument('--rel_part', action='store_true',
help='enable relation partitioning')
def get_logger(args):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
folder = '{}_{}_'.format(args.model_name, args.dataset)
n = len([x for x in os.listdir(args.save_path) if x.startswith(folder)])
folder += str(n)
args.save_path = os.path.join(args.save_path, folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
log_file = os.path.join(args.save_path, 'train.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
logger = logging.getLogger(__name__)
print("Logs are being recorded at: {}".format(log_file))
return logger
def run(args, logger):
# load dataset and samplers
dataset = get_dataset(args.data_path, args.dataset, args.format)
n_entities = dataset.n_entities
n_relations = dataset.n_relations
if args.neg_sample_size_test < 0:
args.neg_sample_size_test = n_entities
train_data = TrainDataset(dataset, args, ranks=args.num_proc)
if args.num_proc > 1:
train_samplers = []
for i in range(args.num_proc):
train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size,
mode='PBG-head',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True,
rank=i)
train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size,
mode='PBG-tail',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True,
rank=i)
train_samplers.append(NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail,
True, n_entities))
else:
train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size,
mode='PBG-head',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True)
train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size,
mode='PBG-tail',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True)
train_sampler = NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail,
True, n_entities)
if args.valid or args.test:
eval_dataset = EvalDataset(dataset, args)
if args.valid:
# Here we want to use the regualr negative sampler because we need to ensure that
# all positive edges are excluded.
if args.num_proc > 1:
valid_sampler_heads = []
valid_sampler_tails = []
for i in range(args.num_proc):
valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
mode='PBG-head',
num_workers=args.num_worker,
rank=i, ranks=args.num_proc)
valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
mode='PBG-tail',
num_workers=args.num_worker,
rank=i, ranks=args.num_proc)
valid_sampler_heads.append(valid_sampler_head)
valid_sampler_tails.append(valid_sampler_tail)
else:
valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
mode='PBG-head',
num_workers=args.num_worker,
rank=0, ranks=1)
valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
mode='PBG-tail',
num_workers=args.num_worker,
rank=0, ranks=1)
if args.test:
# Here we want to use the regualr negative sampler because we need to ensure that
# all positive edges are excluded.
if args.num_proc > 1:
test_sampler_tails = []
test_sampler_heads = []
for i in range(args.num_proc):
test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
mode='PBG-head',
num_workers=args.num_worker,
rank=i, ranks=args.num_proc)
test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
mode='PBG-tail',
num_workers=args.num_worker,
rank=i, ranks=args.num_proc)
test_sampler_heads.append(test_sampler_head)
test_sampler_tails.append(test_sampler_tail)
else:
test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
mode='PBG-head',
num_workers=args.num_worker,
rank=0, ranks=1)
test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
mode='PBG-tail',
num_workers=args.num_worker,
rank=0, ranks=1)
# We need to free all memory referenced by dataset.
eval_dataset = None
dataset = None
# load model
model = load_model(logger, args, n_entities, n_relations)
if args.num_proc > 1:
model.share_memory()
# train
start = time.time()
if args.num_proc > 1:
procs = []
for i in range(args.num_proc):
valid_samplers = [valid_sampler_heads[i], valid_sampler_tails[i]] if args.valid else None
proc = mp.Process(target=train, args=(args, model, train_samplers[i], valid_samplers))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
else:
valid_samplers = [valid_sampler_head, valid_sampler_tail] if args.valid else None
train(args, model, train_sampler, valid_samplers)
print('training takes {} seconds'.format(time.time() - start))
if args.save_emb is not None:
if not os.path.exists(args.save_emb):
os.mkdir(args.save_emb)
model.save_emb(args.save_emb, args.dataset)
# test
if args.test:
if args.num_proc > 1:
procs = []
for i in range(args.num_proc):
proc = mp.Process(target=test, args=(args, model, [test_sampler_heads[i], test_sampler_tails[i]]))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
else:
test(args, model, [test_sampler_head, test_sampler_tail])
if __name__ == '__main__':
args = ArgParser().parse_args()
# sagemaker related args
num_gpus = int(os.environ['SM_NUM_GPUS'])
if num_gpus == 0:
args.gpu = -1
else:
# only use gpu0 now
args.gpu = 0
# specify model save location
args.save_path = str(os.environ['SM_MODEL_DIR'])
args.save_emb = os.path.join(args.save_path, 'emb')
print(args)
logger = get_logger(args)
run(args, logger)
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import shutil
import tempfile
import threading
import unittest
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
@test_util.run_deprecated_v1
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_histogram_freq_must_have_validation_data(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
inp = keras.Input((INPUT_DIM,))
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit w/o validation data should raise ValueError if histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit(
x_train, y_train, batch_size=BATCH_SIZE, callbacks=cbs, epochs=3)
for cb in cbs:
cb.on_train_end()
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbs)
for cb in cbs:
cb.on_train_end()
# Make sure file writer cache is clear to avoid failures during cleanup.
writer_cache.FileWriterCache.clear()
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
keras.callbacks.TensorBoard._init_writer = _init_writer
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 0.5, 1, 1.5, 2, 2.5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
# Batch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
# Integer mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
@test_util.run_deprecated_v1
def test_fit_generator_with_callback(self):
class TestCallback(keras.callbacks.Callback):
def set_model(self, model):
# Check the model operations for the optimizer operations that
# the _make_train_function adds under a named scope for the
# optimizer. This ensurs the full model is populated before the
# set_model callback is called.
optimizer_name_scope = 'training/' + model.optimizer.__class__.__name__
graph_def = ops.get_default_graph().as_graph_def()
for node in graph_def.node:
if node.name.startswith(optimizer_name_scope):
return
raise RuntimeError('The optimizer operations are not present in the '
'model graph when the Callback.set_model function '
'is called')
np.random.seed(1337)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=1,
validation_data=generator(),
validation_steps=2,
callbacks=[TestCallback()],
verbose=0)
if __name__ == '__main__':
test.main()
|
main.py
|
from drivebuildclient.AIExchangeService import AIExchangeService
from drivebuildclient.aiExchangeMessages_pb2 import SimulationID, VehicleID
service = AIExchangeService("defender.fim.uni-passau.de", 8383)
ego = VehicleID()
ego.vid = "ego"
non_ego = VehicleID()
non_ego.vid = "nonEgo"
def start(sid: SimulationID, vid: VehicleID) -> None:
from drivebuildclient.aiExchangeMessages_pb2 import SimStateResponse, DataRequest
request = DataRequest()
request.request_ids.extend([vid.vid + "Position"])
while True:
sim_state = service.wait_for_simulator_request(sid, vid)
data = service.request_data(sid, vid, request)
print(data)
if sim_state != SimStateResponse.SimState.RUNNING:
break
def main() -> None:
from pathlib import Path
from threading import Thread
submission_result = service.run_tests("test", "test", Path("criteria0.dbc.xml"), Path("environment.dbe.xml"))
if submission_result and submission_result.submissions:
for _, sid in submission_result.submissions.items():
ego_thread = Thread(target=start, args=(sid, ego))
ego_thread.start()
non_ego_thread = Thread(target=start, args=(sid, non_ego))
non_ego_thread.start()
ego_thread.join()
non_ego_thread.join()
print(service.get_result(sid))
else:
print("Submitted tests were invalid.")
print(submission_result.message.message)
if __name__ == "__main__":
main()
|
processes.py
|
from subprocess import PIPE, STDOUT, CalledProcessError, Popen
from threading import Thread
class ProcessesContextManager:
"""A context manager that kills any processes given to it on exit from its context."""
def __init__(self, processes):
self.processes = processes
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
for process in self.processes:
process.kill()
def run_subprocess_and_log_stdout_and_stderr(command, logger, log_level="info", *args, **kwargs):
"""Run a subprocess, sending its stdout and stderr output to the given logger. Extra `args` and `kwargs` are
provided to the `subprocess.Popen` instance used.
:param iter(str) command: command to run
:param logging.Logger logger: logger to use to log stdout and stderr
:param str log_level: level to log output at
:raise CalledProcessError: if the subprocess fails (i.e. if it doesn't exit with a 0 return code)
:return subprocess.CompletedProcess:
"""
def _log_lines_from_stream(stream, logger):
"""Log lines from the given stream.
:param io.BufferedReader stream:
:param logging.Logger logger:
:return None:
"""
with stream:
for line in iter(stream.readline, b""):
getattr(logger, log_level.lower())(line.decode().strip())
process = Popen(command, stdout=PIPE, stderr=STDOUT, *args, **kwargs)
Thread(target=_log_lines_from_stream, args=[process.stdout, logger]).start()
process.wait()
if process.returncode != 0:
raise CalledProcessError(returncode=process.returncode, cmd=" ".join(command))
return process
|
ui.py
|
import wx
import cv2
import numpy as np
import core
import matplotlib.pyplot as plt
from multiprocessing import Process
IMAGE = None
IMAGE_BUFFER = None
ANALYSIS = None
class AppFrame(wx.Frame):
def __init__(self, parent, title):
super().__init__(parent, title=title, size=(1400, 900))
self.initialize_GUI()
def initialize_GUI(self):
self.sp = wx.SplitterWindow(self, style=wx.SP_3DBORDER)
self.p1 = PanelWithButtons(self.sp)
self.p2 = PanelWithImage(self.sp)
self.sp.SplitVertically(self.p1, self.p2, 300)
menubar = wx.MenuBar()
filemenu = wx.Menu()
fileopen = filemenu.Append(wx.ID_OPEN, "Open", "Open an image")
filequit = filemenu.Append(wx.ID_EXIT, "Quit", "Quit application")
menubar.Append(filemenu, "&File")
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnBrowse, fileopen)
self.Bind(wx.EVT_MENU, self.OnQuit, filequit)
self.Centre()
def OnQuit(self, e):
self.Close()
def OnBrowse(self, e):
with wx.FileDialog(None, "Choose a file", style=wx.ID_OPEN) as dialog:
if dialog.ShowModal() == wx.ID_OK:
self.image_path = dialog.GetPaths()
self.load_file()
def load_file(self):
global IMAGE, IMAGE_BUFFER
img = cv2.imread(self.image_path[0])
IMAGE = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)
IMAGE_BUFFER = IMAGE.copy()
frame.p2.Refresh()
class PanelWithButtons(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
vbox = wx.BoxSizer(orient=wx.VERTICAL)
b1 = wx.Button(self, label="Geometric filter")
b2 = wx.Button(self, label="Erosion")
b3 = wx.Button(self, label="Dilation")
b4 = wx.Button(self, label="Averaging")
b5 = wx.Button(self, label="Gamma")
b6 = wx.Button(self, label="Logarithmic")
b7 = wx.Button(self, label="Run automatic segmentation")
b8 = wx.Button(self, label="Draw area histogram")
vbox.Add(b1, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b2, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b3, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b4, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b5, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b6, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b7, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
vbox.Add(b8, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnGeometricPress, b1)
self.Bind(wx.EVT_BUTTON, self.OnErosionPress, b2)
self.Bind(wx.EVT_BUTTON, self.OnDilationPress, b3)
self.Bind(wx.EVT_BUTTON, self.OnAveragePress, b4)
self.Bind(wx.EVT_BUTTON, self.OnGammaPress, b5)
self.Bind(wx.EVT_BUTTON, self.OnLogarithmicPress, b6)
self.Bind(wx.EVT_BUTTON, self.OnSegmentationPress, b7)
self.Bind(wx.EVT_BUTTON, self.OnHistogramPress, b8)
self.SetBackgroundColour((225, 225, 225))
def OnErosionPress(self, e):
print("Erosion")
global IMAGE_BUFFER
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
IMAGE_BUFFER = core.apply_erosion(IMAGE_BUFFER, kernel)
frame.p2.Refresh()
def OnDilationPress(self, e):
print("Dilation")
global IMAGE_BUFFER
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
IMAGE_BUFFER = core.apply_dilation(IMAGE_BUFFER, kernel)
frame.p2.Refresh()
def OnGeometricPress(self, e):
print("Geometric")
global IMAGE_BUFFER
IMAGE_BUFFER = core.apply_geometric_spatial_filter(IMAGE_BUFFER, (3, 3))
frame.p2.Refresh()
def OnAveragePress(self, e):
print("Average")
global IMAGE_BUFFER
IMAGE_BUFFER = core.apply_averaging_spatial_filter(IMAGE_BUFFER, (4, 4))
frame.p2.Refresh()
def OnGammaPress(self, e):
print("Gamma")
global IMAGE_BUFFER
IMAGE_BUFFER = core.apply_gamma_transform(IMAGE_BUFFER, 1, 2)
frame.p2.Refresh()
def OnLogarithmicPress(self, e):
print("Logarithmic")
global IMAGE_BUFFER
IMAGE_BUFFER = core.apply_log_transform(IMAGE_BUFFER, 1)
frame.p2.Refresh()
def OnSegmentationPress(self, e):
print("Segmentation")
global IMAGE_BUFFER, ANALYSIS
ANALYSIS, IMAGE_BUFFER = core.run_automatic_segmentation(IMAGE_BUFFER)
frame.p2.Refresh()
def OnHistogramPress(self, e):
print("Histogram")
global ANALYSIS
total_count, raw_count = core.analyze_connected_components(ANALYSIS)
print(total_count)
def draw_histogram(total_count):
plt.hist(total_count)
plt.show()
pass
p = Process(target=draw_histogram, args=(total_count,))
p.start()
class PanelWithImage(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
wx.EVT_PAINT(self, self.on_paint)
self.SetBackgroundColour((225, 225, 225))
def on_paint(self, e):
canvas = wx.PaintDC(self)
h, w = IMAGE_BUFFER.shape[:2]
bmp = wx.Bitmap.FromBuffer(w, h, IMAGE_BUFFER)
canvas.DrawBitmap(bmp, 30, 20)
if __name__ == "__main__":
app = wx.App()
frame = AppFrame(None, "Decryptococcus")
frame.Show()
app.MainLoop()
|
main.py
|
# DIY Async
# Examples of Async use network service(i.e., reading a web API), database retrieve
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
# def countdown(n):
# while n > 0:
# print('Down', n)
# time.sleep(1)
# n -= 1
#
#
# def countup(stop):
# x = 0
# while x < stop:
# print('Up', x)
# time.sleep(1)
# x += 1
# Sequential execution
# countdown(5)
# countup(5)
# Concurrent execution
# Classic solution: use threads
# import threading
#
# threading.Thread(target=countdown, args=(5,)).start()
# threading.Thread(target=countup, args=(5,)).start()
# Note on threads in Python, like C they are hardware - posix threads
# the GIL (Global Interpreter Lock) prevents threads in Python from running in parallel they can only run on a single CPU
# Let's say we don't have threads - How do you get concurrency if you don't have threads?
# Why do it? - answers: scaling, control - can't kill threads once they're started, no cancellation
# Issue: Figure out how to switch between tasks.
# Have to figure out how to interrupt the loops
# Scheduling of callback funcs
import time
from collections import deque
import heapq
class Scheduler:
def __init__(self):
self.ready = deque() # Functions ready to execute
self.sleeping = [] # Sleeping functions
self.sequence = 0 # sequence number avoids case when deadlines are identical
def call_soon(self, func):
self.ready.append(func)
def call_later(self, delay, func):
self.sequence += 1
deadline = time.time() + delay # Expiration time
# priority queue
heapq.heappush(self.sleeping, (deadline, self.sequence, func))
# self.sleeping.append((deadline, func))
# self.sleeping.sort() # Sort by closest deadline
def run(self):
while self.ready or self.sleeping:
if not self.ready:
# Find the nearest deadline
# Use of heapq is more efficient and includes the sorting bit
deadline, _, func = heapq.heappop(self.sleeping)
# deadline, func = self.sleeping.pop(0)
delta = deadline - time.time()
if delta > 0:
time.sleep(delta)
self.ready.append(func)
while self.ready:
func = self.ready.popleft()
func()
sched = Scheduler() # Behind the scenes scheduler object
# Basically implemented a recursive func call backed by the Scheduler thing
def countdown(n):
if n > 0:
print('Down', n)
# time.sleep(4) # Blocking call (nothing else can run until sleep finishes)
sched.call_later(4, lambda: countdown(n - 1))
# sched.call_soon(lambda: countdown(5))
# sched.run()
def countup(stop, x=0): # make x a default arg because it's carrying internal state
def _run(x) -> object: # replace x as default arg by recursively calling _run
if x < stop:
print('Up', x)
# time.sleep(1) Also switch from call_soon to call_later
sched.call_later(1, lambda: _run(x + 1))
_run(0)
sched.call_soon(lambda: countdown(5))
sched.call_soon(lambda: countup(20)) # arg of 5 to 20 since up is running much faster than down
sched.run()
|
driver 4-4-2019.py
|
import tkinter as tk, threading
from tkinter import *
import tkinter.font
from tkinter import filedialog
from tkinter.font import *
import imageio
from imageio import *
from PIL import *
import cv2
from cv2 import *
import PIL
from PIL import Image, ImageTk
from PIL import *
import os, sys
import time
from time import *
import json
from json import *
import requests
from requests import *
import moviepy
from moviepy import *
import moviepy.editor
from moviepy.editor import VideoFileClip
import matplotlib
from matplotlib import *
# import matplotlib.pyplot
# from matplotlib.pyplot import *
import math
from math import *
import numpy
from numpy import *
mainFile = None
fileName = None
directory = None
frames = None
video = None
video2 = None
width = None #the width of the video
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
master.title("Bowling Analysis")
master.resizable(False, False)
self.pack()
self.create_widgets()
def create_widgets(self):
self.categoryFont = Font(family="Times New Roman", size=14, underline=True)
self.normalFont = Font(family="Times New Roman", size=12, underline=False)
self.buttonFont = Font(family="Times New Roman", size=16, underline=False)
self.labelInfo = Label(root, text="Please Enter Information:", font=self.categoryFont)
self.labelInfo.pack()
self.labelInfo.place(x=20, y=10, anchor=NW)
self.labelName = Label(root, text="Name:", font=self.normalFont)
self.labelName.pack()
self.labelName.place(x=20, y=40, anchor=NW)
self.labelGender = Label(root, text="Gender:", font=self.normalFont)
self.labelGender.pack()
self.labelGender.place(x=20, y=70, anchor=NW)
self.labelAge = Label(root, text="Age:", font=self.normalFont)
self.labelAge.pack()
self.labelAge.place(x=20, y=100, anchor=NW)
self.labelHeight = Label(root, text="Height:", font=self.normalFont)
self.labelHeight.pack()
self.labelHeight.place(x=20, y=130, anchor=NW)
self.boxName = Text(root, height=1, width=14)
self.boxName.pack()
self.boxName.place(x=95, y=43, anchor=NW)
genderChoices = ["Male", "Female"]
genderMF = StringVar(root)
genderMF.set("Male")
self.boxGender = OptionMenu(root, genderMF, *genderChoices)
self.boxGender.config(width=12)
self.boxGender.pack()
self.boxGender.place(x=95, y=68, anchor=NW)
self.boxAge = Text(root, height=1, width=14)
self.boxAge.pack()
self.boxAge.place(x=95, y=103, anchor=NW)
self.labelFeet = Label(root, text="FT:", font=self.normalFont)
self.labelFeet.pack()
self.labelFeet.place(x=95, y=130, anchor=NW)
self.labelInches = Label(root, text="IN:", font=self.normalFont)
self.labelInches.pack()
self.labelInches.place(x=160, y=130, anchor=NW)
self.boxFeet = Text(root, height=1, width=2)
self.boxFeet.pack()
self.boxFeet.place(x=125, y=133, anchor=NW)
self.boxInches = Text(root, height=1, width=2)
self.boxInches.pack()
self.boxInches.place(x=190, y=133, anchor=NW)
self.startButton = Button(root, height=2, width=15, text='Start', font=self.buttonFont, command=playVideo)
self.startButton.pack()
self.startButton.place(x=20, y=200)
self.restartButton = Button(root, height=2, width=15, text='Restart', font=self.buttonFont,
command=restartVideo)
self.restartButton.pack()
self.restartButton.place(x=225, y=200)
self.fileButton = Button(root, height=1, width=4, text='File:', font=self.normalFont, command=selectFile)
self.fileButton.pack()
self.fileButton.place(x=230, y=36, anchor=NW)
self.fileBox = Text(root, height=1, width=14)
self.fileBox.pack()
self.fileBox.place(x=305, y=43, anchor=NW)
self.labelFrames = Label(root, text="Frames:", font=self.normalFont)
self.labelFrames.pack()
self.labelFrames.place(x=230, y=70, anchor=NW)
self.framesBox = Text(root, height=1, width=14)
self.framesBox.pack()
self.framesBox.place(x=305, y=73, anchor=NW)
self.labelPerceived = Label(root, text="Perceived Values:", font=self.categoryFont)
self.labelPerceived.pack()
self.labelPerceived.place(x=500, y=10, anchor=NW)
self.labelRknee = Label(root, text="Right Leg Angle at Release:", font=self.normalFont)
self.labelRknee.pack()
self.labelRknee.place(x=500, y=40, anchor=NW)
self.answerRknee = Label(root, text="", font=self.normalFont)
self.answerRknee.pack()
self.answerRknee.place(x=725, y=40, anchor=NE)
self.labelRelbow = Label(root, text="Right Arm Angle at Release:", font=self.normalFont)
self.labelRelbow.pack()
self.labelRelbow.place(x=500, y=70, anchor=NW)
self.answerRelbow = Label(root, text="", font=self.normalFont)
self.answerRelbow.pack()
self.answerRelbow.place(x=725, y=70, anchor=NE)
self.labelLknee = Label(root, text="Left Leg Angle at Release:", font=self.normalFont)
self.labelLknee.pack()
self.labelLknee.place(x=500, y=100, anchor=NW)
self.answerLknee = Label(root, text="", font=self.normalFont)
self.answerLknee.pack()
self.answerLknee.place(x=725, y=100, anchor=NE)
self.labelLelbow = Label(root, text="Left Arm Angle at Release:", font=self.normalFont)
self.labelLelbow.pack()
self.labelLelbow.place(x=500, y=130, anchor=NW)
self.answerLelbow = Label(root, text="", font=self.normalFont)
self.answerLelbow.pack()
self.answerLelbow.place(x=725, y=130, anchor=NE)
self.labelBack = Label(root, text="Back Angle at Release:", font=self.normalFont)
self.labelBack.pack()
self.labelBack.place(x=500, y=160, anchor=NW)
self.answerBack = Label(root, text="", font=self.normalFont)
self.answerBack.pack()
self.answerBack.place(x=725, y=160, anchor=NE)
self.labelRkneeLS = Label(root, text="Right Leg Angle at Last Step:", font=self.normalFont)
self.labelRkneeLS.pack()
self.labelRkneeLS.place(x=750, y=40, anchor=NW)
self.answerRkneeLS = Label(root, text="", font=self.normalFont)
self.answerRkneeLS.pack()
self.answerRkneeLS.place(x=1000, y=40, anchor=NE)
self.labelRelbowLS = Label(root, text="Right Arm Angle at Last Step:", font=self.normalFont)
self.labelRelbowLS.pack()
self.labelRelbowLS.place(x=750, y=70, anchor=NW)
self.answerRelbowLS = Label(root, text="", font=self.normalFont)
self.answerRelbowLS.pack()
self.answerRelbowLS.place(x=1000, y=70, anchor=NE)
self.labelLkneeLS = Label(root, text="Left Leg Angle at Last Step:", font=self.normalFont)
self.labelLkneeLS.pack()
self.labelLkneeLS.place(x=750, y=100, anchor=NW)
self.answerLkneeLS = Label(root, text="", font=self.normalFont)
self.answerLkneeLS.pack()
self.answerLkneeLS.place(x=1000, y=100, anchor=NE)
self.labelLelbowLS = Label(root, text="Left Arm Angle at Last Step", font=self.normalFont)
self.labelLelbowLS.pack()
self.labelLelbowLS.place(x=750, y=130, anchor=NW)
self.answerLelbowLS = Label(root, text="", font=self.normalFont)
self.answerLelbowLS.pack()
self.answerLelbowLS.place(x=1000, y=130, anchor=NE)
self.labelBackLS = Label(root, text="Back Angle at Last Step:", font=self.normalFont)
self.labelBackLS.pack()
self.labelBackLS.place(x=750, y=160, anchor=NW)
self.answerBackLS = Label(root, text="", font=self.normalFont)
self.answerBackLS.pack()
self.answerBackLS.place(x=1000, y=160, anchor=NE)
def cosineLaw(a,mid,c):
# (mid^2) = (a^2)+(c^2)-(2*a*c)*cos(midAngle)
midAngle = acos(((mid**2)-(a**2)-(c**2))/(-2*a*c))
midAngle = midAngle * 180 / math.pi
return midAngle
def pythag(x1,x2,y1,y2):
distance = sqrt(pow((x1-x2),2)+pow((y1-y2),2))
return distance
def pythagAngle(x1,x2,y1,y2):
angle = tan(abs(y1-y2)/abs(x1-x2))
angle = angle * 180 / math.pi
return angle
def checkPerson(jsonData):
personNum = None
peopleArray = None
roughX = None
middle = width/2
if len(jsonData["people"]) == 1:
personNum = 0
else:
for i in jsonData["people"]:
if jsonData["people"][i]["pose_keypoints_2d"][36] > 0:
roughX = jsonData["people"][i]["pose_keypoints_2d"][36]
elif jsonData["people"][i]["pose_keypoints_2d"][3] > 0:
roughX = jsonData["people"][i]["pose_keypoints_2d"][3]
elif jsonData["people"][i]["pose_keypoints_2d"][6] > 0:
roughX = jsonData["people"][i]["pose_keypoints_2d"][6]
peopleArray[i] = abs(roughX - middle)
personNum = numpy.argmin(peopleArray)
return personNum
def trackingAlgo(jsonNew,jsonOld):
#jsonNew["people"][0]["pose_keypoints_2d"][0]
try:
#Legs
rr = pythag(jsonNew["people"][0]["pose_keypoints_2d"][30],jsonOld["people"][0]["pose_keypoints_2d"][30],jsonNew["people"][0]["pose_keypoints_2d"][31],jsonOld["people"][0]["pose_keypoints_2d"][31])
rl = pythag(jsonNew["people"][0]["pose_keypoints_2d"][30],jsonOld["people"][0]["pose_keypoints_2d"][39],jsonNew["people"][0]["pose_keypoints_2d"][31],jsonOld["people"][0]["pose_keypoints_2d"][39])
if rr > rl:
rhipx = jsonNew["people"][0]["pose_keypoints_2d"][27]
rhipy = jsonNew["people"][0]["pose_keypoints_2d"][28]
rhipc = jsonNew["people"][0]["pose_keypoints_2d"][29]
rkneex = jsonNew["people"][0]["pose_keypoints_2d"][30]
rkneey = jsonNew["people"][0]["pose_keypoints_2d"][31]
rkneec = jsonNew["people"][0]["pose_keypoints_2d"][32]
ranklex = jsonNew["people"][0]["pose_keypoints_2d"][33]
rankley = jsonNew["people"][0]["pose_keypoints_2d"][34]
ranklec = jsonNew["people"][0]["pose_keypoints_2d"][35]
rbigtoex = jsonNew["people"][0]["pose_keypoints_2d"][66]
rbigtoey = jsonNew["people"][0]["pose_keypoints_2d"][67]
rbigtoec = jsonNew["people"][0]["pose_keypoints_2d"][68]
rsmalltoex = jsonNew["people"][0]["pose_keypoints_2d"][69]
rsmalltoey = jsonNew["people"][0]["pose_keypoints_2d"][70]
rsmalltoec = jsonNew["people"][0]["pose_keypoints_2d"][71]
rheelx = jsonNew["people"][0]["pose_keypoints_2d"][72]
rheely = jsonNew["people"][0]["pose_keypoints_2d"][73]
rheelc = jsonNew["people"][0]["pose_keypoints_2d"][74]
jsonNew["people"][0]["pose_keypoints_2d"][27] = jsonNew["people"][0]["pose_keypoints_2d"][36]
jsonNew["people"][0]["pose_keypoints_2d"][28] = jsonNew["people"][0]["pose_keypoints_2d"][37]
jsonNew["people"][0]["pose_keypoints_2d"][29] = jsonNew["people"][0]["pose_keypoints_2d"][38]
jsonNew["people"][0]["pose_keypoints_2d"][30] = jsonNew["people"][0]["pose_keypoints_2d"][39]
jsonNew["people"][0]["pose_keypoints_2d"][31] = jsonNew["people"][0]["pose_keypoints_2d"][40]
jsonNew["people"][0]["pose_keypoints_2d"][32] = jsonNew["people"][0]["pose_keypoints_2d"][41]
jsonNew["people"][0]["pose_keypoints_2d"][33] = jsonNew["people"][0]["pose_keypoints_2d"][42]
jsonNew["people"][0]["pose_keypoints_2d"][34] = jsonNew["people"][0]["pose_keypoints_2d"][43]
jsonNew["people"][0]["pose_keypoints_2d"][35] = jsonNew["people"][0]["pose_keypoints_2d"][44]
jsonNew["people"][0]["pose_keypoints_2d"][66] = jsonNew["people"][0]["pose_keypoints_2d"][57]
jsonNew["people"][0]["pose_keypoints_2d"][67] = jsonNew["people"][0]["pose_keypoints_2d"][58]
jsonNew["people"][0]["pose_keypoints_2d"][68] = jsonNew["people"][0]["pose_keypoints_2d"][59]
jsonNew["people"][0]["pose_keypoints_2d"][69] = jsonNew["people"][0]["pose_keypoints_2d"][60]
jsonNew["people"][0]["pose_keypoints_2d"][70] = jsonNew["people"][0]["pose_keypoints_2d"][61]
jsonNew["people"][0]["pose_keypoints_2d"][71] = jsonNew["people"][0]["pose_keypoints_2d"][62]
jsonNew["people"][0]["pose_keypoints_2d"][72] = jsonNew["people"][0]["pose_keypoints_2d"][63]
jsonNew["people"][0]["pose_keypoints_2d"][73] = jsonNew["people"][0]["pose_keypoints_2d"][64]
jsonNew["people"][0]["pose_keypoints_2d"][74] = jsonNew["people"][0]["pose_keypoints_2d"][65]
jsonNew["people"][0]["pose_keypoints_2d"][36] = rhipx
jsonNew["people"][0]["pose_keypoints_2d"][37] = rhipy
jsonNew["people"][0]["pose_keypoints_2d"][38] = rhipc
jsonNew["people"][0]["pose_keypoints_2d"][39] = rkneex
jsonNew["people"][0]["pose_keypoints_2d"][40] = rkneey
jsonNew["people"][0]["pose_keypoints_2d"][41] = rkneec
jsonNew["people"][0]["pose_keypoints_2d"][42] = ranklex
jsonNew["people"][0]["pose_keypoints_2d"][43] = rankley
jsonNew["people"][0]["pose_keypoints_2d"][44] = ranklec
jsonNew["people"][0]["pose_keypoints_2d"][57] = rbigtoex
jsonNew["people"][0]["pose_keypoints_2d"][58] = rbigtoey
jsonNew["people"][0]["pose_keypoints_2d"][59] = rbigtoec
jsonNew["people"][0]["pose_keypoints_2d"][60] = rsmalltoex
jsonNew["people"][0]["pose_keypoints_2d"][61] = rsmalltoey
jsonNew["people"][0]["pose_keypoints_2d"][62] = rsmalltoec
jsonNew["people"][0]["pose_keypoints_2d"][63] = rheelx
jsonNew["people"][0]["pose_keypoints_2d"][64] = rheely
jsonNew["people"][0]["pose_keypoints_2d"][65] = rheelc
except:
return jsonNew
return jsonNew
def analyzeFrame(jsonData,analysisNum):
rTibia = pythag(jsonData["people"][0]["pose_keypoints_2d"][33],
jsonData["people"][0]["pose_keypoints_2d"][30],
jsonData["people"][0]["pose_keypoints_2d"][34],
jsonData["people"][0]["pose_keypoints_2d"][31])
rFemur = pythag(jsonData["people"][0]["pose_keypoints_2d"][30],
jsonData["people"][0]["pose_keypoints_2d"][27],
jsonData["people"][0]["pose_keypoints_2d"][31],
jsonData["people"][0]["pose_keypoints_2d"][28])
mid = pythag(jsonData["people"][0]["pose_keypoints_2d"][33],
jsonData["people"][0]["pose_keypoints_2d"][27],
jsonData["people"][0]["pose_keypoints_2d"][34],
jsonData["people"][0]["pose_keypoints_2d"][28])
rkneeAngle = cosineLaw(rTibia, mid, rFemur)
rHumerus = pythag(jsonData["people"][0]["pose_keypoints_2d"][6],
jsonData["people"][0]["pose_keypoints_2d"][9],
jsonData["people"][0]["pose_keypoints_2d"][7],
jsonData["people"][0]["pose_keypoints_2d"][10])
rRadius = pythag(jsonData["people"][0]["pose_keypoints_2d"][9],
jsonData["people"][0]["pose_keypoints_2d"][12],
jsonData["people"][0]["pose_keypoints_2d"][10],
jsonData["people"][0]["pose_keypoints_2d"][13])
mid = pythag(jsonData["people"][0]["pose_keypoints_2d"][6],
jsonData["people"][0]["pose_keypoints_2d"][12],
jsonData["people"][0]["pose_keypoints_2d"][7],
jsonData["people"][0]["pose_keypoints_2d"][13])
relbowAngle = cosineLaw(rHumerus, mid, rRadius)
lTibia = pythag(jsonData["people"][0]["pose_keypoints_2d"][42],
jsonData["people"][0]["pose_keypoints_2d"][39],
jsonData["people"][0]["pose_keypoints_2d"][43],
jsonData["people"][0]["pose_keypoints_2d"][40])
lFemur = pythag(jsonData["people"][0]["pose_keypoints_2d"][39],
jsonData["people"][0]["pose_keypoints_2d"][36],
jsonData["people"][0]["pose_keypoints_2d"][40],
jsonData["people"][0]["pose_keypoints_2d"][37])
mid = pythag(jsonData["people"][0]["pose_keypoints_2d"][42],
jsonData["people"][0]["pose_keypoints_2d"][36],
jsonData["people"][0]["pose_keypoints_2d"][43],
jsonData["people"][0]["pose_keypoints_2d"][37])
lkneeAngle = cosineLaw(lTibia, mid, lFemur)
lHumerus = pythag(jsonData["people"][0]["pose_keypoints_2d"][15],
jsonData["people"][0]["pose_keypoints_2d"][18],
jsonData["people"][0]["pose_keypoints_2d"][16],
jsonData["people"][0]["pose_keypoints_2d"][19])
lRadius = pythag(jsonData["people"][0]["pose_keypoints_2d"][18],
jsonData["people"][0]["pose_keypoints_2d"][21],
jsonData["people"][0]["pose_keypoints_2d"][19],
jsonData["people"][0]["pose_keypoints_2d"][22])
mid = pythag(jsonData["people"][0]["pose_keypoints_2d"][15],
jsonData["people"][0]["pose_keypoints_2d"][21],
jsonData["people"][0]["pose_keypoints_2d"][16],
jsonData["people"][0]["pose_keypoints_2d"][22])
lelbowAngle = cosineLaw(lHumerus, mid, lRadius)
imaginaryX = abs(
jsonData["people"][0]["pose_keypoints_2d"][3] - jsonData["people"][0]["pose_keypoints_2d"][24])
imaginaryY = abs(
jsonData["people"][0]["pose_keypoints_2d"][4] - jsonData["people"][0]["pose_keypoints_2d"][25])
back = pythag(jsonData["people"][0]["pose_keypoints_2d"][3],
jsonData["people"][0]["pose_keypoints_2d"][24],
jsonData["people"][0]["pose_keypoints_2d"][4],
jsonData["people"][0]["pose_keypoints_2d"][25])
backAngle = cosineLaw(imaginaryX, imaginaryY, back)
rkneeAngle = "{0:.2f}".format(rkneeAngle)
relbowAngle = "{0:.2f}".format(relbowAngle)
lkneeAngle = "{0:.2f}".format(lkneeAngle)
lelbowAngle = "{0:.2f}".format(lelbowAngle)
backAngle = "{0:.2f}".format(backAngle)
if analysisNum == 1:
app.answerRknee.config(text=rkneeAngle)
app.answerRelbow.config(text=relbowAngle)
app.answerLknee.config(text=lkneeAngle)
app.answerLelbow.config(text=lelbowAngle)
app.answerBack.config(text=backAngle)
elif analysisNum ==2:
app.answerRkneeLS.config(text=rkneeAngle)
app.answerRelbowLS.config(text=relbowAngle)
app.answerLkneeLS.config(text=lkneeAngle)
app.answerLelbowLS.config(text=lelbowAngle)
app.answerBackLS.config(text=backAngle)
def selectFile():
file = filedialog.askopenfilename(initialdir="/", title="Select file", filetypes=(
("All Files", "*.*"), ("MOV files", "*.MOV"), ("MP4 files", "*.mp4"), ("AVI files", "*.avi")))
app.fileBox.insert(END, file)
def playVideo(): # Creates the threads where the videos are played
thread = None
thread2 = None
if app.startButton.cget("text") == "Start":
global mainFile
global fileName
global directory
global video
global video2
mainFile = app.fileBox.get("1.0", 'end-1c')
fileName = os.path.basename(mainFile)
directory = os.path.splitext(mainFile)[0]
newClip = VideoFileClip(mainFile)
newFile = moviepy.video.fx.all.gamma_corr(newClip, .5)
newFile = moviepy.video.fx.all.lum_contrast(newFile,0,1,.15)
newFile.write_videofile(directory + "_Processed" + ".mp4")
if not os.path.exists(directory):
os.makedirs(directory)
# openPose = r"C:\Users\okeefel\Documents\openpose-1.4.0-win64-gpu-binaries\bin\OpenPoseDemo.exe"
openPose = r"bin\OpenPoseDemo.exe"
fileFlag = r" --video " + directory + "_Processed" + ".mp4" #unprocessed file to run
dataFlag = r" --write_json " + directory #where it saves the raw data
videoFlag = r" --write_video " + directory + "_Processed" + ".MOV"
# framesFlag = r" --frame_step " + app.framesBox.get("1.0", 'end-1c')#skips however many frames
displayFlag = r" --display 0" #Will not run on screen
peopleFlag = r" --number_people_max 2"
# trackingFlag = r" --tracking 0"
scaleFlag = r" --keypoint_scale 0"
os.chdir(r"C:\Users\okeefel\Documents\openpose-1.4.0-win64-gpu-binaries")
os.system(openPose + fileFlag + dataFlag + videoFlag + displayFlag + peopleFlag + scaleFlag)
video = imageio.get_reader(mainFile)
video2 = imageio.get_reader(directory + "_Processed" + ".MOV")
videoLabel = tk.Label()
videoLabel.pack()
videoLabel.place(x=20, y=300, anchor=NW)
thread = threading.Thread(target=stream, args=(videoLabel,))
thread.daemon = 1
thread.start()
videoLabel2 = tk.Label()
videoLabel2.pack()
videoLabel2.place(x=520, y=300, anchor=NW)
thread2 = threading.Thread(target=stream2, args=(videoLabel2,))
thread2.daemon = 1
thread2.start()
#Parse through all data
lastFrame = None
fileCount = 0
for file in os.listdir(directory): #file will be the json files
if file.endswith(".json"):
fileCount = fileCount + 1
lastFrame = None # frame before the current one being analyzed
badFrames = 0 # number of bad frames between good ones
lastfootLangle = None
lastfootRangle = None
lastFramePerson = None
personNum =None
step = 1
for fileNum in range(fileCount,0,-1): #file will be the json files
jsonName = None
if fileNum <= 10 :
fileNum = fileNum-1
fileNum = str(fileNum)
jsonName = directory + r"/" + os.path.splitext(fileName)[0] + "_Processed_00000000000" + fileNum + "_keypoints.json"
elif fileNum > 10 and fileNum <= 100:
fileNum = fileNum - 1
fileNum = str(fileNum)
jsonName = directory + r"/" + os.path.splitext(fileName)[0] + "_Processed_0000000000" + fileNum + "_keypoints.json"
elif fileNum > 100:
fileNum = fileNum - 1
fileNum = str(fileNum)
jsonName = directory + r"/" + os.path.splitext(fileName)[0] + "_Processed_000000000" + fileNum + "_keypoints.json"
with open(jsonName) as handle:
jsonData = json.loads(handle.read())
personNum = checkPerson(jsonData)
#236 to 237 on jonathans frames to test
if lastFrame != None: #Check all of the important points against previous points as an attempt at tracking
jsonData = trackingAlgo(jsonData,lastFrame)
#fill arrays then save graph
try:
x_list = [jsonData["people"][0]["pose_keypoints_2d"][0], \
jsonData["people"][0]["pose_keypoints_2d"][3], \
jsonData["people"][0]["pose_keypoints_2d"][6], \
jsonData["people"][0]["pose_keypoints_2d"][9], \
jsonData["people"][0]["pose_keypoints_2d"][12], \
jsonData["people"][0]["pose_keypoints_2d"][15], \
jsonData["people"][0]["pose_keypoints_2d"][18], \
jsonData["people"][0]["pose_keypoints_2d"][21], \
jsonData["people"][0]["pose_keypoints_2d"][24], \
jsonData["people"][0]["pose_keypoints_2d"][27], \
jsonData["people"][0]["pose_keypoints_2d"][30], \
jsonData["people"][0]["pose_keypoints_2d"][33], \
jsonData["people"][0]["pose_keypoints_2d"][36], \
jsonData["people"][0]["pose_keypoints_2d"][39], \
jsonData["people"][0]["pose_keypoints_2d"][42], \
jsonData["people"][0]["pose_keypoints_2d"][45], \
jsonData["people"][0]["pose_keypoints_2d"][48], \
jsonData["people"][0]["pose_keypoints_2d"][51], \
jsonData["people"][0]["pose_keypoints_2d"][54], \
jsonData["people"][0]["pose_keypoints_2d"][57], \
jsonData["people"][0]["pose_keypoints_2d"][60], \
jsonData["people"][0]["pose_keypoints_2d"][63], \
jsonData["people"][0]["pose_keypoints_2d"][66], \
jsonData["people"][0]["pose_keypoints_2d"][69], \
jsonData["people"][0]["pose_keypoints_2d"][72], \
]
y_list = [jsonData["people"][0]["pose_keypoints_2d"][1]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][4]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][7]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][10]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][13]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][16]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][19]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][22]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][25]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][28]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][31]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][34]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][37]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][40]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][43]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][46]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][49]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][52]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][55]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][58]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][61]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][64]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][67]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][70]*-1+1000, \
jsonData["people"][0]["pose_keypoints_2d"][73]*-1+1000, \
]
words = ["nose", \
"neck", \
"Rshoulder", \
"Relbow", \
"Rwrist", \
"Lshoulder", \
"Lelbow", \
"Lwrist", \
"Midhip", \
"Rhip", \
"Rknee", \
"Rankle", \
"Lhip", \
"Lknee", \
"Lankle", \
"Reye", \
"Leye", \
"Rear", \
"Lear", \
"LBigtoe", \
"LSmalltoe", \
"Lheel", \
"Rbigtoe", \
"Rsmalltoe", \
"Rheel", \
]
fig, ax = matplotlib.pyplot.subplots()
ax.scatter(x_list,y_list)
for i, txt in enumerate(words):
ax.annotate(txt,(x_list[i],y_list[i]))
matplotlib.pyplot.axis([numpy.amin(x_list)-.1,numpy.amax(x_list)+.1,numpy.amin(y_list)-.1,numpy.amax(y_list)-.3])
fig.savefig(directory + r"/" + os.path.splitext(fileName)[0] + "_Processed_000" + fileNum + ".png")
except:
badFrames = badFrames+1
if fileNum == str(fileCount-1): #The first frame starts with when ball is being released
analyzeFrame(jsonData,1)
skipFrame = 0
try:
if jsonData["people"][0]["pose_keypoints_2d"][59] >= .2 :
footLfrontX = jsonData["people"][0]["pose_keypoints_2d"][57]
footLfrontY = jsonData["people"][0]["pose_keypoints_2d"][58]
elif jsonData["people"][0]["pose_keypoints_2d"][62] >= .2 :
footLfrontX = jsonData["people"][0]["pose_keypoints_2d"][60]
footLfrontY= jsonData["people"][0]["pose_keypoints_2d"][61]
else:
skipFrame = 1
if jsonData["people"][0]["pose_keypoints_2d"][65] >= .2 :
footLrearX = jsonData["people"][0]["pose_keypoints_2d"][63]
footLrearY = jsonData["people"][0]["pose_keypoints_2d"][64]
elif jsonData["people"][0]["pose_keypoints_2d"][44] >= .2 :
footLrearX = jsonData["people"][0]["pose_keypoints_2d"][42]
footLrearY= jsonData["people"][0]["pose_keypoints_2d"][43]
else:
skipFrame = 1
except:
skipFrame = 1
if skipFrame == 0 :
imaginaryX = abs(footLfrontX - footLrearX)
imaginaryY = abs(footLfrontY - footLrearY)
foot = pythag(footLfrontX,footLrearX,footLfrontY,footLrearY)
footLangle = cosineLaw(imaginaryX, imaginaryY, foot)
if (step == 1) & (fileNum != str(fileCount-1)):
if footLangle > lastfootLangle+8:
print(fileNum)
analyzeFrame(lastFrame,2)
step = step + 1
lastfootLangle = footLangle
lastFrame = jsonData
lastFramePerson = personNum
os.remove(directory + "_Processed" + ".mp4")
app.startButton.config(text="Pause")
elif app.startButton.cget("text") == "Pause":
app.startButton.config(text="Continue")
elif app.startButton.cget("text") == "Continue":
app.startButton.config(text="Pause")
def restartVideo():
app.startButton.config(text="Start")
playVideo()
def stream(label): # First Video
for image in video.iter_data():
while app.startButton.cget("text") == "Continue":
sleep(1)
width, height = cv2.GetSize(image)
img = Image.fromarray(image)
img2 = img.resize((500, 500), Image.ANTIALIAS)
img3 = ImageTk.PhotoImage(img2)
label.config(image=img3)
label.image = img3
def stream2(label): # Second Video
for image in video2.iter_data():
while app.startButton.cget("text") == "Continue":
sleep(1)
img = Image.fromarray(image)
img2 = img.resize((500, 500), Image.ANTIALIAS)
img3 = ImageTk.PhotoImage(img2)
label.config(image=img3)
label.image = img3
root = tk.Tk()
root.state('zoomed')
app = Application(master=root)
app.mainloop()
|
livelock.py
|
#!/usr/bin/env python3
""" Three philosophers, thinking and eating sushi """
import threading
chopstick_a = threading.Lock()
chopstick_b = threading.Lock()
chopstick_c = threading.Lock()
sushi_count = 500
def philosopher(name, first_chopstick, second_chopstick):
global sushi_count
while sushi_count > 0: # eat sushi until it's all gone
first_chopstick.acquire()
second_chopstick.acquire()
try:
if sushi_count > 0:
sushi_count -= 1
print(name, 'took a piece! Sushi remaining:', sushi_count)
finally:
second_chopstick.release()
first_chopstick.release()
if __name__ == '__main__':
threading.Thread(target=philosopher, args=('Barron', chopstick_a, chopstick_b)).start()
threading.Thread(target=philosopher, args=('Olivia', chopstick_b, chopstick_c)).start()
threading.Thread(target=philosopher, args=('Steve', chopstick_c, chopstick_a)).start()
|
bench.py
|
import sys
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
import queue, threading, time
if len(sys.argv) != 5:
print('Usage: python3 bench.py load_file_path run_file_path endpoints nthread')
sys.exit(1)
alice, bob = generate_keypair(), generate_keypair()
metadata = {alice.public_key: bob.public_key}
loadFile, runFile, urls, threadNum = sys.argv[1], sys.argv[2], sys.argv[3].split(','), int(sys.argv[4])
bdbs = []
for url in urls:
bdb = BigchainDB(url)
bdbs.append(bdb)
print("BigchainDB with {} threads and {} servers.".format(threadNum, len(urls)))
def readFile(filepath, outQueue):
with open(filepath, 'r', encoding='UTF-8') as f:
line = f.readline()
num = 0
while line is not None and line != '':
if line.startswith('INSERT') == False and line.startswith('READ') == False and line.startswith('UPDATE') == False:
line = f.readline()
continue
outQueue.put(line)
line = f.readline()
num = num + 1
# if num == 10000:
# break
def sendTxn(lineQueue, latQueue, driver):
while lineQueue.empty() == False:
start = time.time()
try:
line = lineQueue.get(block=False, timeout=0)
except Empty:
continue
args = line.split(' ', 3)
if "INSERT" in line or "UPDATE" in line:
data = {
'data': {
args[2]: {
args[2]: args[3],
},
},
}
prepared_creation_tx = driver.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset=data,
metadata=metadata,
)
fulfilled_creation_tx = driver.transactions.fulfill(
prepared_creation_tx, private_keys=alice.private_key)
sent_creation_tx = driver.transactions.send_async(fulfilled_creation_tx)
else:
driver.assets.get(search=args[2])
end = time.time()
if latQueue is not None:
latQueue.put(end-start)
print("Start loading init data...")
loadQueue = queue.Queue(maxsize=100000)
readFile(loadFile, loadQueue)
#tLoadRead = threading.Thread(target=readFile, args=(loadFile, loadQueue,))
#tLoadRead.start()
#time.sleep(5)
num = loadQueue.qsize()
start = time.time()
loadThreadList = []
for i in range(32):
t = threading.Thread(target=sendTxn, args=(loadQueue, None, bdbs[i%len(bdbs)],))
loadThreadList.append(t)
t.start()
#tLoadRead.join()
for t in loadThreadList:
t.join()
end = time.time()
print("Load throughput {} TPS".format(num/(end - start)))
print("Start running experiments...")
runQueue = queue.Queue(maxsize=100000)
latencyQueue = queue.Queue(maxsize=100000)
#tRunRead = threading.Thread(target=readFile, args=(runFile, runQueue,))
#tRunRead.start()
#time.sleep(5)
readFile(runFile, runQueue)
time.sleep(5)
runThreadList = []
for i in range(threadNum):
t = threading.Thread(target=sendTxn, args=(runQueue, latencyQueue, bdbs[i%len(bdbs)],))
runThreadList.append(t)
start = time.time()
for t in runThreadList:
t.start()
time.sleep(1)
for t in runThreadList:
t.join()
end = time.time()
#allLatency = []
#def getLatency(latQueue):
lat = 0
num = 0
while latencyQueue.empty() == False:
ts = latencyQueue.get()
lat = lat + ts
num = num + 1
# allLatency.append(ts)
#tLatency = threading.Thread(target=getLatency, args=(latencyQueue,))
#tLatency.start()
# print("Before join...")
# tRunRead.join()
#for t in runThreadList:
# t.join()
print('Throughput of {} txn: {} txn/s'.format(num, num/(end-start)))
print('Latency: {} ms'.format(lat/num*1000))
|
OSINTData.py
|
import json
import requests
import threading
import time
class OSINTData:
def __init__(self):
self.headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
#'From': '@gmail.com'
def reader(self, URLs):
print("----------")
x=""
y=""
for i in URLs:
try:
x = requests.get(i, headers=self.headers).json()
except:
print(requests.get(i, headers=self.headers).status_code)
print(requests.get(i, headers=self.headers).text)
x={"error": i}
y += json.dumps(x, indent=4)
print(y)
def talosintelligence(self, ip):
URLs = ["https://talosintelligence.com/cloud_intel/query_suggestion?query="+ip,
"https://talosintelligence.com/cloud_intel/ip_reputation?ip="+ip,
"https://talosintelligence.com/cloud_intel/whois?whois_query="+ip]
self.reader(URLs)
def greynoise(self, ip):
URLs = ["https://www.greynoise.io/api/"+ip]
self.reader(URLs)
def haveibeenpwned(self, email):
URLs = ["https://haveibeenpwned.com/unifiedsearch/"+email]
self.reader(URLs)
def mispFullSearch(self, value):
# https://www.dan.me.uk/torlist/
AllFeeds = ["https://raw.githubusercontent.com/stamparm/ipsum/master/levels/1.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/2.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/3.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/4.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/5.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/6.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/7.txt",
"https://raw.githubusercontent.com/stamparm/ipsum/master/levels/8.txt",
"https://rules.emergingthreats.net/blockrules/compromised-ips.txt",
"https://check.torproject.org/torbulkexitlist",
"https://cybercrime-tracker.net/all.php",
"https://raw.githubusercontent.com/pan-unit42/iocs/master/diamondfox/diamondfox_panels.txt",
"https://home.nuug.no/~peter/pop3gropers.txt",
"https://openphish.com/feed.txt",
"https://raw.githubusercontent.com/ktsaou/blocklist-ipsets/master/firehol_level1.netset",
"https://cinsscore.com/list/ci-badguys.txt",
"https://lists.blocklist.de/lists/all.txt",
"https://dataplane.org/vncrfb.txt",
"https://dataplane.org/sshpwauth.txt",
"https://dataplane.org/sipregistration.txt",
"https://dataplane.org/sipquery.txt",
"https://dataplane.org/sipinvitation.txt",
"http://vxvault.net/URL_List.php",
"https://sslbl.abuse.ch/blacklist/sslipblacklist.csv",
"https://cybercrime-tracker.net/ccamlist.php",
"https://cybercrime-tracker.net/ccamgate.php",
"https://blocklist.greensnow.co/greensnow.txt",
"https://mirai.security.gives/data/ip_list.txt",
"https://malsilo.gitlab.io/feeds/dumps/url_list.txt",
"https://malsilo.gitlab.io/feeds/dumps/ip_list.txt",
"https://malsilo.gitlab.io/feeds/dumps/domain_list.txt",
"https://malshare.com/daily/malshare.current.all.txt"]
j=-1
for i in AllFeeds:
j+=1
proc=threading.Thread(target=OSINTData.loopfeeds, args=(self, i, value))
proc.start()
if (j%4 == 0):
time.sleep(4)
#proc.terminate()
time.sleep(2)
print("Scan Complete:")
def loopfeeds(self, i, value):
state=""
x=requests.get(i, headers=self.headers).text
if value in x: print("ALERT: " + i)
if __name__ == "__main__":
while True:
value=input("Value: ")
investigation = OSINTData()
investigation.talosintelligence(value)
investigation.greynoise(value)
#investigation.haveibeenpwned(value)
investigation.mispFullSearch(value)
|
labels.py
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import vialectrum as electrum
from vialectrum.plugins import BasePlugin, hook
from vialectrum.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
qzContigFilterServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'qzContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from qzContigFilter.qzContigFilterImpl import qzContigFilter
impl_qzContigFilter = qzContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['qzContigFilter.filter_contigs_async'] = ['qzContigFilter', 'filter_contigs']
async_check_methods['qzContigFilter.filter_contigs_check'] = ['qzContigFilter', 'filter_contigs']
sync_methods['qzContigFilter.filter_contigs'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'qzContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_qzContigFilter.filter_contigs,
name='qzContigFilter.filter_contigs',
types=[dict])
self.method_authentication['qzContigFilter.filter_contigs'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"qzContigFilter but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
botakw2.py
|
# -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from gtts import gTTS
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib3, urllib, urllib.parse
from threading import Thread
from googletrans import Translator
#==============================================================================#
mulai = time.time()
line = LINE("token")
line.log("Auth Token : " + str(line.authToken))
line.log("Timeline Token : " + str(line.tl.channelAccessToken))
ki = LINE(token, appName="2")
ki.log("Auth Token : " + str(ki.authToken))
ki.log("Timeline Token : " + str(ki.tl.channelAccessToken))
ki2 = LINE(token, appName="3")
ki2.log("Auth Token : " + str(ki2.authToken))
ki2.log("Timeline Token : " + str(ki2.tl.channelAccessToken))
ki3 = LINE(token, appName="4")
ki3.log("Auth Token : " + str(ki3.authToken))
ki3.log("Timeline Token : " + str(ki3.tl.channelAccessToken))
ki4 = LINE(token, appName="5")
ki4.log("Auth Token : " + str(ki4.authToken))
ki4.log("Timeline Token : " + str(ki4.tl.channelAccessToken))
ki5 = LINE(token, appName="6")
ki5.log("Auth Token : " + str(ki5.authToken))
ki5.log("Timeline Token : " + str(ki5.tl.channelAccessToken))
ki6 = LINE(token, appName="7")
ki6.log("Auth Token : " + str(ki6.authToken))
ki6.log("Timeline Token : " + str(ki6.tl.channelAccessToken))
print ("Login succes ")
KAC = [line,ki,ki2,ki3,ki4,ki5,ki6]
lineMID = line.profile.mid
kiMID = ki.profile.mid
ki2MID = ki2.profile.mid
ki3MID = ki3.profile.mid
ki4MID = ki4.profile.mid
ki5MID = ki5.profile.mid
ki6MID = ki6.profile.mid
lineProfile = line.getProfile()
kiProfile = ki.getProfile()
ki2Profile = ki2.getProfile()
ki3Profile = ki3.getProfile()
ki4Profile = ki4.getProfile()
ki5Profile = ki5.getProfile()
ki6Profile = ki6.getProfile()
Bots = [lineMID,kiMID,ki2MID,ki3MID,ki4MID,ki5MID,ki6MID]
lineSettings = line.getSettings()
kiSettings = ki.getSettings()
ki2Settings = ki2.getSettings()
ki3Settings = ki3.getSettings()
ki4Settings = ki4.getSettings()
ki5Settings = ki5.getSettings()
ki6Settings = ki6.getSettings()
admin =["u7e99c5b3e4f01c95c104d0993fc41998"]
oepoll = OEPoll(line)
oepoll1 = OEPoll(ki)
oepoll2 = OEPoll(ki2)
oepoll3 = OEPoll(ki3)
oepoll4 = OEPoll(ki4)
oepoll5 = OEPoll(ki5)
oepoll6 = OEPoll(ki6)
responsename = line.getProfile().displayName
responsename1 = ki.getProfile().displayName
responsename2 = ki2.getProfile().displayName
responsename3 = ki3.getProfile().displayName
responsename4 = ki4.getProfile().displayName
responsename5 = ki5.getProfile().displayName
responsename6 = ki6.getProfile().displayName
welcome = []
responPc = []
autoRespon = []
autoResponImage = []
autoResponPm = []
msg_dict = {}
#==============================================================================#
settings = {
"autoAdd": False,
"autoJoin": False,
"contact":False,
"autoblock": False,
"autoRespon": False,
"autoResponImage": False,
"autoResponPm": False,
"simiSimi": {},
"autoLeave": False,
"autojj": False,
"leavemsg": False,
"welcomemsg": False,
"responPc": False,
"keluar":"sᴇʟᴀᴍᴀᴛ ᴊᴀʟᴀɴ ....\nsᴇᴍᴏɢᴀ ᴋᴀᴍᴜ ʙᴀɪᴋ2 ᴅɪʟᴜᴀʀ sᴀɴᴀ\nsᴀᴍᴘᴀɪ ᴊᴜᴍᴘᴀ 👌👌👌",
"autoRead": False,
"protect": False,
"qrprotect": False,
"cancelprotect": False,
"inviteprotect": False,
"tag": "Maaf aku sedang sibuk ",
"tag2": "Ada apa kak tag saya",
"tag3": "Ada apasih ka Pm mulu",
"detectMention": False,
"autorejc": False,
"welcome":"sᴇᴍᴏɢᴀ ʙᴇᴛᴀʜ ʏᴀ...\nsᴀʟᴀᴍ ᴋᴇɴᴀʟ ᴅᴀʀɪ sᴀʏᴀ 😘",
"responpc": "Tag terus",
"checkSticker": False,
"TagMention": False,
"TagMention2": False,
"unsendMessage":False,
"autoBalas": False,
'wellcome':False,
'bymsg':{},
"lang":"JP",
"autoJoinTicket": {},
"changeGroupPicture":True,
"Mute": True,
"changePicture": {},
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
],
"mimic": {
"copy": False,
"status": False,
"target": {}
}
}
wait = {
"Sider":{},
"limit": 1,
"Mute": False,
"contact": False,
"timeline":False,
"selfbot":True,
"sukaPost":True,
"comment":"Autolike by: Team Dk Protection",
"welcomeOn":False,
"lang":"JP",
}
like = {
"like":True,
"likeOn":True,
"liked":True,
}
tikel = {
'sid':"48198",
'spkg':"2000000"
}
read = {
"readPoint": {},
"readMember": {},
"readTime": {},
"ROM": {}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
cctv={
"cyduk":{},
"point":{},
"sidermem":{}
}
myProfile["displayName"] = lineProfile.displayName
myProfile["statusMessage"] = lineProfile.statusMessage
myProfile["pictureStatus"] = lineProfile.pictureStatus
myProfile["displayName"] = kiProfile.displayName
myProfile["statusMessage"] = kiProfile.statusMessage
myProfile["pictureStatus"] = kiProfile.pictureStatus
myProfile["displayName"] = ki2Profile.displayName
myProfile["statusMessage"] = ki2Profile.statusMessage
myProfile["pictureStatus"] = ki2Profile.pictureStatus
myProfile["displayName"] = ki3Profile.displayName
myProfile["statusMessage"] = ki3Profile.statusMessage
myProfile["pictureStatus"] = ki3Profile.pictureStatus
myProfile["displayName"] = ki4Profile.displayName
myProfile["statusMessage"] = ki4Profile.statusMessage
myProfile["pictureStatus"] = ki4Profile.pictureStatus
myProfile["displayName"] = ki5Profile.displayName
myProfile["statusMessage"] = ki5Profile.statusMessage
myProfile["pictureStatus"] = ki5Profile.pictureStatus
myProfile["displayName"] = ki6Profile.displayName
myProfile["statusMessage"] = ki6Profile.statusMessage
myProfile["pictureStatus"] = ki6Profile.pictureStatus
#==============================================================================#
def restartBot():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def restart_program():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
line.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d : Jam, ♪ %02d : Menit, ♪ %02d : Detik ♪' % (hours, mins, secs)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours,24)
weaks, days = divmod(days,7)
if days == 0:
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
elif days > 0 and weaks == 0:
return '%02d Hari %02d Jam %02d Menit %02d Detik' %(days, hours, mins, secs)
elif days > 0 and weaks > 0:
return '%02d Minggu %02d Hari %02d Jam %02d Menit %02d Detik' %(weaks, days, hours, mins, secs)
def a2():
now2 = datetime.datetime.now()
nowT = datetime.datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def sendMentionV2(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@zeroxyuuki "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "❨✪❩ ᴅk mentions ❨✪❩ \n\n1. ".format(str(len(mid)))
textx2 ="╭════════════════╮\n ✍ ᴛᴏᴛᴀʟ {} ᴍᴇᴍʙᴇʀs".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(line.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
jp1 = line.getContact(lineMID).displayName
line.sendMessage(to, textx2 + "\n ✍ ᴍᴇɴᴛɪᴏɴᴇs ʙʏ : " + jp1 + "\n╰════════════════╯")
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def mentionMembers(to, mid):
try:
arrData = ""
textx = "╭════════════════╮\n ❨✪❩ ᴅk mentions ❨✪❩ \n║\n║◍ 1. ".format(str(len(mid)))
ginfo = line.getGroup(to)
arr = []
no = 1
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "║◍ {}. ".format(str(no))
else:
textx += "\n「 Total {} Member 」\n╰════════════════╯".format(str(len(mid)))
line.sendMessage(to, textx, {'AGENT_NAME':'「 Creator 」', 'AGENT_LINK': 'line://ti/p/~eg_2'.format(line.getProfile().userid), 'AGENT_ICON': "http://dl.profile.line-cdn.net/" + line.getProfile().picturePath, 'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMentionV2(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@jeck "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = line.getAllContactIds()
gid = line.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"◐ Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\n🔰 Group : "+str(len(gid))+"\n🔰 Teman : "+str(len(teman))+"\n🔰 Expired : In "+hari+"\n🔰 Version : Saints Bot\n🔰 Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\n🔰 Runtime : \n • "+bot
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def summon(to, nama):
aa = ""
bb = ""
strt = int(0)
akh = int(0)
nm = nama
myid = line.getProfile().mid
if myid in nm:
nm.remove(myid)
for mm in nm:
akh = akh + 6
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 7
akh = akh + 1
bb += "@nrik "
aa = (aa[:int(len(aa)-1)])
text = bb
try:
line.sendMessage(to, text, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
print(error)
def restartBot():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
line.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+1)
end_content = s.find(',"ow"',start_content+1)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
#time.sleep(0.1) #Timer could be used to slow down the request for#image downloads
page = page[end_content:]
return items
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict[data]:
line.deleteFile(msg_dict[data]["path"])
del msg_dict[data]
#atexit.register(atend)
def helpmessage():
helpMessage = """
╭═════════════
║[🔰 Ⓓⓚ~ⒷⓄⓣ☯t]
╠═════════════
║✍ ᴍᴇ
║✍ sᴘ
║✍ sᴇᴛ
║✍ ᴘᴘ
║✍ ɴᴋ:
║✍ ɢɪᴅ
║✍ ᴋɪᴄᴋ @
║✍ ᴠᴋɪᴄᴋ @
║✍ ɴᴜᴋᴇ
║✍ Dkbots
║✍ ɢᴜʀʟ
║✍ ʜᴇʟᴘ
║✍ ᴍɪᴅ
║✍ ᴍɪᴅ @
║✍ ᴍᴜsɪᴄ
║✍ ᴍᴏᴠɪᴇ
║✍ ʀᴇᴊᴇᴄᴛ
║✍ ᴄᴀɴᴄᴇʟ
║✍ ɢᴘɪᴄᴛ
║✍ ᴄᴏᴠᴇʀ
║✍ ᴘɪᴄᴛ @
║✍ ᴄᴏᴠᴇʀ @
║✍ ᴄᴏᴘʏ @
║✍ ɢᴄᴀʟʟ
║✍ sᴘᴀᴍ
║✍ ʙᴀᴄᴋᴜᴘ
║✍ ʏᴏᴜᴛᴜʙᴇ
║✍ ɪᴍᴀɢᴇ:
║✍ ɪɴsᴛᴀɢʀᴀᴍ
║✍ ᴋᴀʟᴋᴜʟᴀᴛᴏʀ
║✍ ʙʀᴏᴀᴅᴄᴀsᴛ
║✍ Tag\Tag all\Desah
╠═════════════
║ʀᴇʟᴀᴛᴇᴅ ɢʀᴏᴜᴘ
╠═════════════
║✍ ʀᴇʙᴏᴏᴛ
║✍ ʀᴜɴᴛɪᴍᴇ
║✍ ᴀʙᴏᴜᴛ
║✍ ᴄʀᴇᴀᴛᴏʀ
║✍ ᴍʏɴᴀᴍᴇ
║✍ ᴍʏʙɪᴏ
║✍ ᴍʏᴠɪᴅ
║✍ ɢᴇᴛʙɪᴏ @
║✍ ɢᴄʀᴇᴀᴛᴏʀ
║✍ ɢɴᴀᴍᴇ
║✍ ᴍᴇᴍʟɪsᴛ
║✍ ɢʀᴏᴜᴘs
║✍ ᴀᴜᴛᴏʟɪᴋᴇ
║✍ ʟɪɴᴋ ᴏɴ/ᴏғғ
║✍ ɢᴇᴛɴᴀᴍᴇ @
║✍ ᴜᴘᴅᴀᴛᴇ ᴘɪᴄᴛ
║✍ ɢᴇᴛ ᴄᴏɴᴛᴀᴄᴛ @
║✍ ʀᴇᴍᴏᴠᴇᴄʜᴀᴛ
║✍ ɢᴇᴛ ᴠɪᴅᴇᴏᴘʀᴏғɪʟᴇ
║✍ ᴜᴘᴅᴀᴛᴇ ᴘɪᴄᴛ ɢʀᴏᴜᴘ
║✍ ᴀʟʟsᴇᴛᴛɪɴɢs ᴍᴏᴅᴇ ᴏɴ
║✍ ᴀʟʟsᴇᴛᴛɪɴɢs ᴍᴏᴅᴇ ᴏғғ
║✍ ᴛᴀɢ /ʜɪ /ʜᴀɪ /ʜᴇᴍ /ᴅᴋ
╠═════════════
║Mimic Command
╠═════════════
║✍ ᴍɪᴍɪᴄᴀᴅᴅ
║✍ ᴍɪᴍɪᴄᴅᴇʟ
║✍ ᴍɪᴍɪᴄʟɪsᴛ
║✍ ᴍɪᴍɪᴄ ᴏɴ/ᴏғғ
╠═════════════
║Set Respon Dk
╠═════════════
║✍ sᴇᴛ ʀᴇsᴘᴏɴ1
║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴ1
║✍ sᴇᴛ ʀᴇsᴘᴏɴ2
║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴ2
║✍ sᴇᴛ ʀᴇsᴘᴏɴ3
║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴ3
║✍ sᴇᴛ ʀᴇsᴘᴏɴᴘᴄ
║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴᴘᴄ
║✍ sᴇᴛ ᴡᴇʟᴄᴏᴍᴇ
║✍ ᴄᴇᴋ ᴡᴇᴋᴄᴏᴍᴇ
║✍ sᴇᴛ ʟᴇᴀᴠᴇᴍsɢ
║✍ ᴄᴇᴋ ʟᴇᴀᴠᴇᴍsɢ
╠═════════════
║Command Kicker DK
╠═════════════
║✍ ᴍʏʙᴏᴛ
║✍ ᴀʟʟʙᴏᴛ
║✍ ᴘɪɴɢ
║✍ ᴘᴏɴɢ
║✍ ʙᴏᴛs ᴍᴇ
║✍ ʀᴇsᴘᴏɴs
║✍ ɴᴜᴋᴇᴀʟʟ
║✍ ʙᴏᴛ①/⑤ ʜᴇʟᴘ
║✍ ʟᴇᴀᴠᴇᴀʟʟɢʀᴏᴜᴘs
║✍ ʙᴏᴛs ᴜᴘᴅɴᴀᴍᴇ
║✍ ʙᴏᴛs ᴜᴘᴅsᴛᴀᴛᴜs
║✍ Dk.masuk
║✍ Dk.pulang
║✍ ʙᴏᴛs ᴄᴏᴘʏ @
║✍ ʙᴏᴛs ʙᴀᴄᴋᴜᴘ
║✍ ʙᴏᴛs ɢʀᴏᴜᴘs
║✍ ᴘᴜʟᴀɴɢ/ʙʏᴇ ᴀʟʟ
╠═════════════
║Command Protect
╠═════════════
║✍ ᴘʀᴏᴛᴇᴄᴛ sᴇᴛ
║✍ ᴘʀᴏᴛᴇᴄᴛ ᴏɴ/ᴏғғ
║✍ ʟɪɴᴋᴘʀᴏᴛᴇᴄᴛ ᴏɴ/ᴏғғ
║✍ ᴄᴀɴᴄᴇʟᴀʟʟ ᴏɴ/ᴏғғ
║✍ ɪɴᴠɪᴛᴇᴘʀᴏᴛᴇᴄᴛ ᴏɴ/ᴏғғ
║✍ ᴘʀᴏᴛᴇᴄᴛᴀʟʟ ᴏɴ/ᴏғғ
║✍ Promo
║✍ Kibar
║✍ Dkbot
║✍ Mybot
╚════════════
"""
return helpMessage
#==============================================================================#
def lineBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
line.sendMessage(op.param1, "ʜᴀʟʟᴏ {} ᴛx ғᴏʀ ᴀᴅᴅ ᴍᴇ\nʙʏ: ᴛᴇᴀᴍ ᴅᴋᴢ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ".format(str(line.getContact(op.param1).displayName)))
if op.type == 15:
print ("[ 15 ] MEMBER LEAVE GROUP")
if settings["leavemsg"] == True:
contact = line.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
text = "ɢᴏᴏᴅ ʙʏᴇ @!\n{}".format(str(settings["keluar"]))
sendMentionV2(op.param1, text, [op.param2])
line.sendImageWithURL(op.param1,image)
#____________________
if op.type == 17:
print ("[ 17 ] MEMBER JOIN GROUP")
if settings["welcomemsg"] == True:
group = line.getGroup(op.param1)
contact = line.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
text = "ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ ɢʀᴏᴜᴘ ☛ " + str(group.name) + "\nʜᴀʟʟᴏ @!{}\n".format(str(settings["welcome"]))
sendMentionV2(op.param1, text, [op.param2])
line.sendImageWithURL(op.param1,image)
if op.type == 5:
print ("[ 5 ] NOTIFIED AUTO BLOCK CONTACT")
if settings["autoblock"] == True:
line.sendMessage(op.param1, "Halo {} \nThank yah \nSory akun saya Autoblock ".format(str(line.getContact(op.param1).displayName)))
line.blockContact(op.param1)
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE GROUP")
group = line.getGroup(op.param1)
if settings["autoJoin"] == True:
line.acceptGroupInvitation(op.param1)
if op.type == 24:
print ("[ 24 ] NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
line.leaveRoom(op.param1)
if op.type == 25:
print ("[ 25 ] SEND MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
elif text.lower() == 'menu':
helpMessage = helpmessage()
jp1 = line.getContact(lineMID).displayName
line.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#line.sendContact(to, "u6c878e91cef2d76e95d5f63da51b2193")
elif text.lower() == 'bot1 help':
helpMessage = helpmessage()
jp1 = ki.getContact(kiMID).displayName
ki.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#line.sendContact(to, "u6c878e91cef2d76e95d5f63da51b2193")
elif text.lower() == 'bot2 help':
helpMessage = helpmessage()
jp1 = ki2.getContact(ki2MID).displayName
ki2.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#line.sendContact(to, "u6c878e91cef2d76e95d5f63da51b2193")
elif text.lower() == 'bot3 help':
helpMessage = helpmessage()
jp1 = ki3.getContact(ki3MID).displayName
ki3.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#line.sendContact(to, "u6c878e91cef2d76e95d5f63da51b2193")
elif text.lower() == 'bot4 help':
helpMessage = helpmessage()
jp1 = ki4.getContact(ki4MID).displayName
ki4.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#line.sendContact(to, "u6c878e91cef2d76e95d5f63da51b2193")
elif text.lower() == 'bot5 help':
helpMessage = helpmessage()
jp1 = ki5.getContact(ki5MID).displayName
ki5.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#line.sendContact(to, "u6c878e91cef2d76e95d5f63da51b2193")
elif text.lower() == 'bot6 help':
helpMessage = helpmessage()
jp1 = ki6.getContact(ki5MID).displayName
ki6.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#==============================================================================#
elif text.lower() == 'speed':
start = time.time()
line.sendMessage(to, "progres...")
elapsed_time = time.time() - start
line.sendMessage(to,format(str(elapsed_time))+" seconds")
ki.sendMessage(to,format(str(elapsed_time))+" seconds")
ki2.sendMessage(to,format(str(elapsed_time))+" seconds")
ki3.sendMessage(to,format(str(elapsed_time))+" seconds")
ki4.sendMessage(to,format(str(elapsed_time))+" seconds")
ki5.sendMessage(to,format(str(elapsed_time))+" seconds")
ki6.sendMessage(to,format(str(elapsed_time))+" seconds")
elif text.lower() == 'sp':
start = time.time()
line.sendMessage(to, "progres...")
elapsed_time = time.time() - start
line.sendMessage(to,format(str(elapsed_time))+" seconds")
ki.sendMessage(to,format(str(elapsed_time))+" seconds")
ki2.sendMessage(to,format(str(elapsed_time))+" seconds")
ki3.sendMessage(to,format(str(elapsed_time))+" seconds")
ki4.sendMessage(to,format(str(elapsed_time))+" seconds")
ki5.sendMessage(to,format(str(elapsed_time))+" seconds")
ki6.sendMessage(to,format(str(elapsed_time))+" seconds")
elif text.lower() == 'reboot':
line.sendMessage(to, "I'II come back latter")
line.sendMessage(to, "Restarted done ♪")
restartBot()
elif text.lower() == 'runtime':
eltime = time.time() -mulai
van = "╭════════════════════╮\n Mybot sudah berjalan selama\n " +waktu(eltime)+"\n╰════════════════════╯"
line.sendMessage(to,van)
#___________
elif text.lower() == 'tagmem' or text.lower() == 'up' or text.lower() == 'hi' or text.lower() == 'dk' or text.lower() == 'dor' or text.lower() == 'hem' or text.lower() == 'absen' or text.lower() == 'muach' or text.lower() == 'hai':
group = line.getGroup(msg.to)
members = [contact.mid for contact in group.members]
tags = []
for i in range(0, len(members), 20):
tags.append(list(members[i:i+20]))
for t in tags:
msg = ttypes.Message(to=to)
tst ="❨✪❩ ᴅᴋ ᴍᴇɴᴛɪᴏɴs ❨✪❩ \n\n"
tst += u''
s=len(tst)
d=[]
for i in range(len(t)):
d.append({"S":str(s), "E" :str(s+4), "M":t[i]})
s += 5
tst +=u'@jek\n'
msg.text = tst.rstrip()
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
line.talk.sendMessage(0,msg)
jp = line.getContact(lineMID).displayName
else:
line.sendMessage(to,"╭════════════════╮\n❨✪❩ ᴍᴇɴᴛɪᴏɴᴇs ʙʏ DK : " + jp+"\n╰════════════════╯")
elif text.lower() == 'about':
try:
arr = []
owner = "u7e99c5b3e4f01c95c104d0993fc41998"
creator = line.getContact(owner)
contact = line.getContact(lineMID)
grouplist = line.getGroupIdsJoined()
contactlist = line.getAllContactIds()
blockedlist = line.getBlockedContactIds()
ret_ = "____________________________\n❨✪❩ Impormation Selfbot ❨✪❩\n____________________________"
ret_ += "\n┃❨✪❩ Line Name : {}".format(contact.displayName)
ret_ += "\n┃❨✪❩ Groups : {}".format(str(len(grouplist)))
ret_ += "\n┃❨✪❩ Friends : {}".format(str(len(contactlist)))
ret_ += "\n┃❨✪❩ Blocked : {}".format(str(len(blockedlist)))
ret_ += "\n┃❨✪❩ Version1 : Python3"
ret_ += "\n┃❨✪❩ Version2 : Premium server"
ret_ += "\n┃❨✪❩ Masa Aktif : 00-00-2018"
ret_ += "\n┃❨✪❩ Creator : {}".format(creator.displayName)
ret_ += "\n____________________________"
line.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'settings' or text.lower() == 'myset':
try:
ret_ = "╭═════════════════╮\n"
ret_ += " ❨✪❩ Settings Mybot ❨✪❩\n"
ret_ += "╰═════════════════╯\n"
if settings["autoAdd"] == True: ret_ += "01.┃❨✪❩ Autoadd On \n"
else: ret_ += "01.┃❨✪❩ Autoadd Off \n"
if settings["autoblock"] == True: ret_ += "02.┃❨✪❩ AutoBlock On \n"
else: ret_ += "02.┃❨✪❩ AutoBlock Off \n"
if settings["contact"] == True: ret_ += "03.┃❨✪❩ Contact On \n"
else: ret_ += "03.┃❨✪❩ Contact Off \n"
if settings["autoJoin"] == True: ret_ += "04.┃❨✪❩ AutoJoin On \n"
else: ret_ += "04.┃❨✪❩ AutoJoin Off \n"
if settings["mimic"]["status"] == True: ret_ += "05.┃❨✪❩ Mimic On \n"
else: ret_ += "05.┃❨✪❩ Mimic Off \n"
if settings["welcomemsg"] == True: ret_+= "06.┃❨✪❩ Welcome On \n"
else: ret_ +="06.┃❨✪❩ Welcome Off \n"
if settings["leavemsg"] == True: ret_+= "07.┃❨✪❩ Leavemsg On \n"
else: ret_ +="07.┃❨✪❩ Leavemsg Off \n"
if settings["autoLeave"] == True: ret_ += "08.┃❨✪❩ AutoLeave On \n"
else: ret_ += "08.┃❨✪❩ AutoLeave Off \n"
if settings["autoRead"] == True: ret_ += "09.┃❨✪❩ AutoRead On \n"
else: ret_ += "09.┃❨✪❩ AutoRead Off \n"
if settings["checkSticker"] == True: ret_ += "10.┃❨✪❩ CekSticker On \n"
else: ret_ += "10.┃❨✪❩ CekSticker Off \n"
if settings["autoRespon"] == True: ret_ += "11.┃❨✪❩ Respon1 On \n"
else: ret_ += "11.┃❨✪❩ Respon1 Off \n"
if settings["autoResponImage"] == True: ret_ += "12.┃❨✪❩ Respon2 On \n"
else: ret_ += "12.┃❨✪❩ Respon2 Off \n"
if settings["autoResponPm"] == True: ret_ += "13.┃❨✪❩ Respon3 On \n"
else: ret_ += "13.┃❨✪❩ Respon3 Off \n"
if settings["responPc"] == True: ret_ += "14.┃❨✪❩ ResponPc On \n"
else: ret_ += "14.┃❨✪❩ ResponPc Off \n"
if settings["autorejc"] == True: ret_ += "15.┃❨✪❩ AutoReject On \n"
else: ret_ += "15.┃❨✪❩ AutoReject Off "
ret_ += "\n╭═══════════════════╮"
jp = line.getContact(lineMID).displayName
line.sendMessage(to, str(ret_)+"\n ❨✪❩ Line Name: "+jp+"\n ❨✪❩ ᴅᴋ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ❨✪❩\n╰═══════════════════╯")
except Exception as e:
line.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'protect set':
try:
ret_ = "Group Settings:\n\n"
if settings["protect"] == True: ret_ += " Protect : On \n"
else: ret_ += " Protect : Off\n"
if settings["qrprotect"] == True: ret_ += " Linkprotect : On \n"
else: ret_ += " Linkprotect : Off \n"
if settings["inviteprotect"] == True: ret_ += " Inviteprotect : On \n"
else: ret_ += " Inviteprotect : Off \n"
if settings["cancelprotect"] == True: ret_ += " Cancelall : On \n"
else: ret_ += " Cancelall : Off \n"
line.sendMessage(to, str(ret_))
ki.sendMessage(to, str(ret_))
ki2.sendMessage(to, str(ret_))
ki3.sendMessage(to, str(ret_))
ki4.sendMessage(to, str(ret_))
ki5.sendMessage(to, str(ret_))
ki6.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
elif text.lower() == 'allsettings mode on':
settings["autoAdd"] = True
settings["autoblock"] = True
settings["contact"] = True
settings["autoJoin"] = True
settings["mimic"]["status"] = True
settings["welcomemsg"] = True
settings["leavemsg"] = True
settings["autoLeave"] = False
settings["autoRead"] = True
settings["checkSticker"] = True
settings["autoRespon"] = True
settings["autoResponImage"] = True
settings["autoResponPm"] = True
settings["responPc"] = True
settings["autorejc"] = True
line.sendMessage(to, "All Setting Bot Mode On")
elif text.lower() == 'allsettings mode off':
settings["autoAdd"] = False
settings["autoblock"] = False
settings["contact"] = False
settings["autoJoin"] = False
settings["mimic"]["status"] = False
settings["welcomemsg"] = False
settings["leavemsg"] = False
settings["autoLeave"] = False
settings["autoRead"] = False
settings["checkSticker"] = False
settings["autoRespon"] = False
settings["autoResponImage"] = False
settings["autoResponPm"] = False
settings["responPc"] = False
settings["autorejc"] = False
line.sendMessage(to, "All Setting Bot Mode Off")
elif text.lower() == 'autoadd on':
settings["autoAdd"] = True
line.sendMessage(to, "AutoAdd already On")
elif text.lower() == 'autoadd off':
settings["autoAdd"] = False
line.sendMessage(to, "AutoAdd already Off")
elif text.lower() == 'autoblock on':
settings["autoblock"] = True
line.sendMessage(to, "AutoBlock already On")
elif text.lower() == 'autoblock off':
settings["autoblock"] = False
line.sendMessage(to, "AutoBlock already Off")
elif text.lower() == 'autojoin on':
settings["autoJoin"] = True
line.sendMessage(to, "AutoJoin already On")
elif text.lower() == 'autojoin off':
settings["autoJoin"] = False
line.sendMessage(to, "AutoJoin already Off")
elif text.lower() == 'autoleave on':
settings["autoLeave"] = True
line.sendMessage(to, "AutoLeave already On")
elif text.lower() == 'autoleave off':
settings["autoLeave"] = False
line.sendMessage(to, "AutoLeave already Off")
elif text.lower() == 'autoread on':
settings["autoRead"] = True
line.sendMessage(to, "Autoread Chat already On")
elif text.lower() == 'autoread off':
settings["autoRead"] = False
line.sendMessage(to, "Autoread Chat already Off")
elif text.lower() == 'ceksticker on':
settings["checkSticker"] = True
line.sendMessage(to, "CekStiker already On")
elif text.lower() == 'ceksticker off':
settings["checkSticker"] = False
line.sendMessage(to, "CekStiker already Off")
elif text.lower() == 'respon1 on':
if sender in lineMID:
settings["autoRespon"] = True
line.sendMessage(to, "Autorespon1 Text di Aktifkan")
elif text.lower() == 'respon1 off':
if sender in lineMID:
settings["autoRespon"] = False
line.sendMessage(to, "Autorespon1 Text Off")
elif text.lower() == 'respon2 on':
if sender in lineMID:
settings["autoResponImage"] = True
line.sendMessage(to, "Autorespon2 TagImage di Aktifkan")
elif text.lower() == 'respon2 off':
if sender in lineMID:
settings["autoResponImage"] = False
line.sendMessage(to, "Autorespon2 Image Off")
elif text.lower() == 'respon3 on':
if sender in lineMID:
settings["autoResponPm"] = True
line.sendMessage(to, "Autorespon3 PM di Aktifkan")
elif text.lower() == 'respon3 off':
if sender in lineMID:
settings["autoResponPm"] = False
line.sendMessage(to, "Autorespon3 PM Off")
elif text.lower() == 'responpc on':
if sender in lineMID:
settings["responPc"] = True
line.sendMessage(to, "Autorespon Tagpc di Aktifkan")
elif text.lower() == 'responpc off':
if sender in lineMID:
settings["responPc"] = False
line.sendMessage(to, "Autorespon Tagpc Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'protect on':
if sender in lineMID:
if settings["protect"] == True:
if settings["lang"] == "JP":
line.sendMessage(to," Protection Already On")
ki.sendMessage(to," Protection Already On")
ki2.sendMessage(to," Protection Already On")
ki3.sendMessage(to," Protection Already On")
ki4.sendMessage(to," Protection Already On")
ki5.sendMessage(to," Protection Already On")
ki6.sendMessage(to," Protection Already On")
else:
line.sendMessage(to,"Protection Already On")
ki.sendMessage(to," Protection Already On")
ki2.sendMessage(to," Protection Already On")
ki3.sendMessage(to," Protection Already On")
ki4.sendMessage(to," Protection Already On")
ki5.sendMessage(to," Protection Already On")
ki6.sendMessage(to," Protection Already On")
else:
settings["protect"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"Protection Already On")
ki.sendMessage(to," Protection Already On")
ki2.sendMessage(to," Protection Already On")
ki3.sendMessage(to," Protection Already On")
ki4.sendMessage(to," Protection Already On")
ki5.sendMessage(to," Protection Already On")
ki6.sendMessage(to," Protection Already On")
else:
line.sendMessage(to,"Protection Already On")
ki.sendMessage(to," Protection Already On")
ki2.sendMessage(to," Protection Already On")
ki3.sendMessage(to," Protection Already On")
ki4.sendMessage(to," Protection Already On")
ki5.sendMessage(to," Protection Already On")
ki6.sendMessage(to," Protection Already On")
elif text.lower() == 'protect off':
if sender in lineMID:
if settings["protect"] == False:
if settings["lang"] == "JP":
line.sendMessage(to," Protection Already Off ")
ki.sendMessage(to," Protection Already Off ")
ki2.sendMessage(to," Protection Already Off ")
ki3.sendMessage(to," Protection Already Off ")
ki4.sendMessage(to," Protection Already Off ")
ki5.sendMessage(to," Protection Already Off ")
ki6.sendMessage(to," Protection Already Off ")
else:
line.sendMessage(to,"Protection Already Off ")
ki.sendMessage(to," Protection Already Off ")
ki2.sendMessage(to," Protection Already Off ")
ki3.sendMessage(to," Protection Already Off ")
ki4.sendMessage(to," Protection Already Off ")
ki5.sendMessage(to," Protection Already Off ")
ki6.sendMessage(to," Protection Already Off ")
else:
settings["protect"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"Protection Already Off ")
ki.sendMessage(to," Protection Already Off ")
ki2.sendMessage(to," Protection Already Off ")
ki3.sendMessage(to," Protection Already Off ")
ki4.sendMessage(to," Protection Already Off ")
ki5.sendMessage(to," Protection Already Off ")
ki6.sendMessage(to," Protection Already Off ")
else:
line.sendMessage(to,"Protection Already Off ")
ki.sendMessage(to," Protection Already Off ")
ki2.sendMessage(to," Protection Already Off ")
ki3.sendMessage(to," Protection Already Off ")
ki4.sendMessage(to," Protection Already Off ")
ki5.sendMessage(to," Protection Already Off ")
ki6.sendMessage(to," Protection Already Off ")
#----------------------------------------------------------------------------------------
elif text.lower() == 'linkprotect on':
if sender in lineMID:
if settings["qrprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already On ")
ki.sendMessage(to,"Linkprotect Already On ")
ki2.sendMessage(to,"Linkprotect Already On ")
ki3.sendMessage(to,"Linkprotect Already On ")
ki4.sendMessage(to,"Linkprotect Already On ")
ki5.sendMessage(to,"Linkprotect Already On ")
ki6.sendMessage(to," Protection Already On ")
else:
line.sendMessage(to,"Linkprotect Already On ")
ki.sendMessage(to,"Linkprotect Already On ")
ki2.sendMessage(to,"Linkprotect Already On ")
ki3.sendMessage(to,"Linkprotect Already On ")
ki4.sendMessage(to,"Linkprotect Already On ")
ki5.sendMessage(to,"Linkprotect Already On ")
ki6.sendMessage(to," Protection Already On ")
else:
settings["qrprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already On ")
ki.sendMessage(to,"Linkprotect Already On ")
ki2.sendMessage(to,"Linkprotect Already On ")
ki3.sendMessage(to,"Linkprotect Already On ")
ki4.sendMessage(to,"Linkprotect Already On ")
ki5.sendMessage(to,"Linkprotect Already On ")
ki6.sendMessage(to," Protection Already On ")
else:
line.sendMessage(to,"Linkprotect Already On ")
ki.sendMessage(to,"Linkprotect Already On ")
ki2.sendMessage(to,"Linkprotect Already On ")
ki3.sendMessage(to,"Linkprotect Already On ")
ki4.sendMessage(to,"Linkprotect Already On ")
ki5.sendMessage(to,"Linkprotect Already On ")
ki6.sendMessage(to," Protection Already On ")
elif text.lower() == 'linkprotect off':
if sender in lineMID:
if settings["qrprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already Off ")
ki.sendMessage(to,"Linkprotect Already Off ")
ki2.sendMessage(to,"Linkprotect Already Off")
ki3.sendMessage(to,"Linkprotect Already Off ")
ki4.sendMessage(to,"Linkprotect Already Off")
ki5.sendMessage(to,"Linkprotect Already Off")
else:
line.sendMessage(to,"Linkprotect Already Off ")
ki.sendMessage(to,"Linkprotect Already Off ")
ki2.sendMessage(to,"Linkprotect Already Off")
ki3.sendMessage(to,"Linkprotect Already Off ")
ki4.sendMessage(to,"Linkprotect Already Off")
ki5.sendMessage(to,"Linkprotect Already Off")
else:
settings["qrprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already Off ")
ki.sendMessage(to,"Linkprotect Already Off ")
ki2.sendMessage(to,"Linkprotect Already Off")
ki3.sendMessage(to,"Linkprotect Already Off ")
ki4.sendMessage(to,"Linkprotect Already Off")
ki5.sendMessage(to,"Linkprotect Already Off")
else:
line.sendMessage(to,"Linkprotect Already Off ")
ki.sendMessage(to,"Linkprotect Already Off ")
ki2.sendMessage(to,"Linkprotect Already Off")
ki3.sendMessage(to,"Linkprotect Already Off ")
ki4.sendMessage(to,"Linkprotect Already Off")
ki5.sendMessage(to,"Linkprotect Already Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'inviteprotect on':
if sender in lineMID:
if settings["inviteprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Invite Already On")
ki.sendMessage(msg.to,"Protection Invite Already On")
ki2.sendMessage(msg.to,"Protection Invite Already On")
ki3.sendMessage(msg.to,"Protection Invite Already On")
ki4.sendMessage(msg.to,"Protection Invite Already On")
ki5.sendMessage(msg.to,"Protection Invite Already On")
else:
line.sendMessage(msg.to,"Protection Invite Already On")
ki.sendMessage(msg.to,"Protection Invite Already On")
ki2.sendMessage(msg.to,"Protection Invite Already On")
ki3.sendMessage(msg.to,"Protection Invite Already On")
ki4.sendMessage(msg.to,"Protection Invite Already On")
ki5.sendMessage(msg.to,"Protection Invite Already On")
else:
settings["inviteprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Invite Already On")
ki.sendMessage(msg.to,"Protection Invite Already On")
ki2.sendMessage(msg.to,"Protection Invite Already On")
ki3.sendMessage(msg.to,"Protection Invite Already On")
ki4.sendMessage(msg.to,"Protection Invite Already On")
ki5.sendMessage(msg.to,"Protection Invite Already On")
else:
line.sendMessage(msg.to,"Protection Invite Already On")
ki.sendMessage(msg.to,"Protection Invite Already On")
ki2.sendMessage(msg.to,"Protection Invite Already On")
ki3.sendMessage(msg.to,"Protection Invite Already On")
ki4.sendMessage(msg.to,"Protection Invite Already On")
ki5.sendMessage(msg.to,"Protection Invite Already On")
elif text.lower() == 'inviteprotect off':
if sender in lineMID:
if settings["inviteprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Invite Already Off")
ki.sendMessage(msg.to,"Protection Invite Already Off")
ki2.sendMessage(msg.to,"Protection Invite Already Off")
ki3.sendMessage(msg.to,"Protection Invite Already Off")
ki4.sendMessage(msg.to,"Protection Invite Already Off")
ki5.sendMessage(msg.to,"Protection Invite Already Off")
else:
line.sendMessage(msg.to,"Protection Invite Already Off")
ki.sendMessage(msg.to,"Protection Invite Already Off")
ki2.sendMessage(msg.to,"Protection Invite Already Off")
ki3.sendMessage(msg.to,"Protection Invite Already Off")
ki4.sendMessage(msg.to,"Protection Invite Already Off")
ki5.sendMessage(msg.to,"Protection Invite Already Off")
else:
settings["inviteprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Invite Already Off")
ki.sendMessage(msg.to,"Protection Invite Already Off")
ki2.sendMessage(msg.to,"Protection Invite Already Off")
ki3.sendMessage(msg.to,"Protection Invite Already Off")
ki4.sendMessage(msg.to,"Protection Invite Already Off")
ki5.sendMessage(msg.to,"Protection Invite Already Off")
else:
line.sendMessage(msg.to,"Protection Invite Already Off")
ki.sendMessage(msg.to,"Protection Invite Already Off")
ki2.sendMessage(msg.to,"Protection Invite Already Off")
ki3.sendMessage(msg.to,"Protection Invite Already Off")
ki4.sendMessage(msg.to,"Protection Invite Already Off")
ki5.sendMessage(msg.to,"Protection Invite Already Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'cancelall on':
if msg._from in admin:
if settings["cancelprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already On")
else:
ki.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already On")
line.sendMessage(msg.to,"Protection Cancel Invite Already On")
else:
settings["cancelprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already On")
else:
line.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already On")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already On")
elif text.lower() == 'cancelall off':
if msg._from in admin:
if settings["cancelprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already Off")
else:
line.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already Off")
else:
settings["cancelprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already Off")
else:
line.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki2.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki3.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki4.sendMessage(msg.to,"Protection Cancel Invite Already Off")
ki5.sendMessage(msg.to,"Protection Cancel Invite Already Off")
#-------------------------------------------------------------------------------
elif text.lower() == 'protectall on':
if msg._from in admin:
settings["protect"] = True
settings["qrprotect"] = True
settings["inviteprotect"] = True
settings["cancelprotect"] = True
line.sendMessage(msg.to,"All Protect Turned On")
ki.sendMessage(msg.to,"All Protect Turned On")
ki2.sendMessage(msg.to,"All Protect Turned On")
ki3.sendMessage(msg.to,"All Protect Turned On")
ki4.sendMessage(msg.to,"All Protect Turned On")
ki5.sendMessage(msg.to,"All Protect Turned On")
else:
line.sendMessage(msg.to,"Just for Owner")
elif text.lower() == 'protectall off':
if msg._from in admin:
settings["protect"] = False
settings["qrprotect"] = False
settings["inviteprotect"] = False
settings["cancelprotect"] = False
line.sendMessage(msg.to,"All Protect Turned Off")
ki.sendMessage(msg.to,"All Protect Turned Off")
ki2.sendMessage(msg.to,"All Protect Turned Off")
ki3.sendMessage(msg.to,"All Protect Turned Off")
ki4.sendMessage(msg.to,"All Protect Turned Off")
ki5.sendMessage(msg.to,"All Protect Turned Off")
ki6.sendMessage(msg.to,"All Protect Turned Off")
else:
line.sendMessage(msg.to,"Just for Owner")
elif text.lower() == 'desah':
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
line.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
line.sendMessage(to, "╔══════════╗\n ❨✪Jumlah {} ✪❩\n╚══════════╝".format(str(len(nama))))
elif text.lower() == 'tag':
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
line.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
line.sendMessage(to, "╔══════════╗\n ❨✪Jumlah {} ✪❩\n╚══════════╝".format(str(len(nama))))
elif text.lower() == 'tag all':
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
line.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
line.sendMessage(to, "╔══════════╗\n ❨✪Jumlah {} ✪❩\n╚══════════╝".format(str(len(nama))))
elif text.lower() == 'dkbots':
if msg._from in admin:
settings["protect"] = True
settings["qrprotect"] = True
settings["inviteprotect"] = True
settings["cancelprotect"] = True
line.sendMessage(msg.to,"╚☆Ⓢⓘⓐⓟ☆╗")
ki.sendMessage(msg.to,"╚☆╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
ki2.sendMessage(msg.to,"╚☆Ⓢⓘⓐⓟ☆╗")
ki3.sendMessage(msg.to,"╚☆╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
ki4.sendMessage(msg.to,"╚☆Ⓐⓜⓐⓝ-☆╗")
ki5.sendMessage(msg.to,"╚ⒷⓄⓈ☆╮╗")
else:
line.sendMessage(msg.to,"Just for Owner")
#==============================================================================#
elif text.lower() == "responsname":
line.sendMessage(msg.to,responsename)
ki.sendMessage(msg.to,"꧁༺şℎαℎ̿̿ Żαίη࿐➻❥1")
ki2.sendMessage(msg.to,"꧁༺şℎαℎ̿̿ Żαίη࿐➻❥2")
ki3.sendMessage(msg.to,"꧁༺şℎαℎ̿̿ Żαίη࿐➻❥3")
ki4.sendMessage(msg.to,"꧁༺şℎαℎ̿̿ Żαίη࿐➻❥4")
ki5.sendMessage(msg.to,"꧁༺şℎαℎ̿̿ Żαίη࿐➻❥5")
ki6.sendMessage(msg.to,"꧁༺şℎαℎ̿̿ Żαίη࿐➻❥6")
elif msg.text.lower() == 'mybots':
if msg._from in lineMID:
line.sendContact(to, kiMID)
line.sendContact(to, ki2MID)
line.sendContact(to, ki3MID)
line.sendContact(to, ki4MID)
line.sendContact(to, ki5MID)
line.sendContact(to, ki6MID)
elif msg.text.lower() == 'allbot':
if msg._from in lineMID:
ki.sendContact(to, kiMID)
ki2.sendContact(to, ki2MID)
ki3.sendContact(to, ki3MID)
ki4.sendContact(to, ki4MID)
ki5.sendContact(to, ki5MID)
ki6.sendContact(to, ki6MID)
elif msg.text.lower() == 'mybot':
if msg._from in lineMID:
ki.sendContact(to, kiMID)
ki2.sendContact(to, ki2MID)
ki3.sendContact(to, ki3MID)
ki4.sendContact(to, ki4MID)
ki5.sendContact(to, ki5MID)
ki6.sendContact(to, ki6MID)
elif msg.text.lower() == 'bots me':
if msg._from in lineMID:
line.sendContact(to, kiMID)
ki.sendContact(to, kiMID)
ki2.sendContact(to, ki2MID)
ki3.sendContact(to, ki3MID)
ki4.sendContact(to, ki4MID)
ki5.sendContact(to, ki5MID)
ki6.sendContact(to, ki6MID)
elif msg.text.lower() == 'kibar':
if msg._from in lineMID:
line.sendContact(to, lineMID)
line.sendContact(to, kiMID)
line.sendContact(to, ki2MID)
line.sendContact(to, ki3MID)
line.sendContact(to, ki4MID)
line.sendContact(to, ki5MID)
line.sendContact(to, ki6MID)
line.sendMessage(to, "█░░╦─╦╔╗╦─╔╗╔╗╔╦╗╔╗░░█\n█░░║║║╠─║─║─║║║║║╠─░░█\n█░░╚╩╝╚╝╚╝╚╝╚╝╩─╩╚╝░░█\n"
"ASSALAMUALAIKUM\n"
" ╭━Ⓓ✒Ⓡ✒ⒼⓄ✒Ⓝ✒\n"
" ╰╮┏━┳┳┓┏┳┳┓┏┳┳┳┓\n"
" ┏┻╋━┻┻┫┣┻┻┫┣┻┻┻┫\n"
" ┃HLO▪┃KMI DTANG LGI┃\n"
" ┗ⓞⓞ┻┻ⓞ━━ⓞ┻┻ⓞ━╯\n"
"UNTUK MENGGUSUR\nROOM KALIAN\n"
".. (҂`_´)\n"
" <,︻╦̵̵̿╤━ ҉ ~ •"
"█۞███████]▄▄▄▄▄▄▃●●\n"
"▂▄▅█████████▅▄▃▂…"
"[██████████████████]\n"
"◥⊙⊙▲⊙▲⊙▲⊙▲⊙▲⊙\n"
"╭━╮╭━╮\n"
"┃┃╰╯┃┃\n"
"┃╭╮╭╮┣┳━╮╭━━┳━━┳┳━╮\n"
"┃┃┃┃┃┣┫╭╮┫╭╮┃╭╮┣┫╭╯\n"
"┃┃┃┃┃┃┃┃┃┃╰╯┃╰╯┃┃┃\n"
"╰╯╰╯╰┻┻╯╰┻━╮┣━╮┣┻╯\n"
"╱╱╱╱╱╱╱╱╱╭━╯┣━╯┃\n"
"╱╱╱╱╱╱╱╱╱╰━━┻━━╯\n"
"👿━━━━━━━━━━━━━👿"
"Ⓣⓜⓟⓐ Ⓑⓐⓢⓐ_Ⓑⓐⓢⓘ\n"
"Ⓡⓐⓣⓐ ⓖⓐ ⓡⓐⓣⓐ\n"
"Ⓨⓖ ⓟⓝⓣⓘⓝⓖ ⓚⓘⓑⓐⓡ\n"
"Ⓣⓐⓝⓖⓚⓘⓢ Ⓖⓞⓑⓛⓞⓚ\n"
"👿━━━━━━━━━━━━━👿\n"
"╔══╗╔═╗╔══╗╔═╦═╗\n"
"╚╗╔╝║╦╝║╔╗║║║║║║\n"
"━║║━║╩╗║╠╣║║║║║║\n"
"━╚╝━╚═╝╚╝╚╝╚╩═╩╝\n"
"👿━━━━━━━━━━━━━👿\n"
"╔══╗ ╔╦╗\n"
"╚╗╗║ ║╔╝\n"
"╔╩╝║ ║╚╗\n"
"╚══╝ ╚╩╝\n"
"👿━━━━━━━━━━━━━👿\n"
"Ⓓⓡⓐⓖⓞⓝ_Ⓚⓘⓛⓛⓔⓡ\n"
"Ⓟⓤⓝⓨⓐ👿━━👿Ⓡⓐⓣⓐ Ⓝⓘ\n"
"Ⓜⓐⓗ━👿━\n"
"╔═╗╔══╗╔══╗╔══╗\n"
"║╬║║╔╗║╚╗╔╝║╔╗║\n"
"║╗╣║╠╣║━║║━║╠╣║\n"
"╚╩╝╚╝╚╝━╚╝━╚╝╚╝\n"
"━━━━━━━━━━━━━━━\n"
"╔═╗╔══╗╔══╗╔══╗\n"
"║╬║║╔╗║╚╗╔╝║╔╗║\n"
"║╗╣║╠╣║━║║━║╠╣║\n"
"╚╩╝╚╝╚╝━╚╝━╚╝╚╝\n"
"━━━━━━━━━━━━━━━\n"
"╔═╗╔══╗╔══╗╔══╗\n"
"║╬║║╔╗║╚╗╔╝║╔╗║\n"
"║╗╣║╠╣║━║║━║╠╣║\n"
"╚╩╝╚╝╚╝━╚╝━╚╝╚╝\n"
"━━━━━━━━━━━━━━━\n"
">>>Ⓑⓨⓔ_Ⓑⓨⓔ ⒼⒸ Ⓛⓐⓚⓝⓐⓣ>><\nⒹⓝⓓⓐⓜ Ⓒⓐⓡⓘ Ⓚⓜⓘ\n<<<<<<<<<>>\nhttp://line.me/ti/p/~reza.p.i.p\nhttp://line.me/ti/p/ryansakra_m1")
elif msg.text.lower() == 'ping':
if msg._from in lineMID:
ki2.sendMessage(to, "pong")
ki3.sendMessage(to, "pong")
ki4.sendMessage(to, "pong")
ki5.sendMessage(to, "pong")
ki.sendMessage(to, "pong")
elif msg.text.lower() == 'dkbot':
if msg._from in lineMID:
line.sendMessage(to, "[🔰 Ⓓⓚ~ⒷⓄⓣ☯t]")
ki2.sendMessage(to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
ki3.sendMessage(to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
ki4.sendMessage(to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
ki5.sendMessage(to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
ki.sendMessage(to, "╚☆Ⓐⓜⓐⓝ-☆╗\n╚ⒷⓄⓈ☆╮╗")
elif msg.text.lower() == 'promo':
if msg._from in lineMID:
line.sendMessage(to, "──────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅──────\nᴼᴾᴱᴺ ᴼᴿᴰᴱᴿ\n────────┅┅───────\n➣ꜱᴇʟꜰʙᴏᴛ ᴏɴʟʏ\n➣ꜱᴇʟꜰʙᴏᴛ + ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 2 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 3 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 4 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 5 ᴀꜱɪꜱᴛ\n➣ʙᴏᴛᴘʀᴏᴛᴇᴄᴛ 3-11 ʙᴏᴛ ᴀꜱɪꜱᴛ\n➣ɴᴇᴡ ꜱᴄʀɪᴘᴛ\n➣ʜʀɢᴀ ʙɪꜱᴀ ɴᴇɢᴏ\n─────────┅┅─────────\n ✯❇͜͡❇͜͡C͜͡r͜͡e͜͡a͜͡t͜͡o͜͡r✯͜͡$͜͡ë͜͡I͜͡F͜͡-͜͡฿͜͜͡͡o͜͡t͜͡ ͜͡❇͜͡❇✯\nline.me/ti/p/~reza.p.i.p\nline.me/ti/p/~reza.p.i.p\n➣ѕєʟғвот κɪcκєʀ_+_ᴘʀᴏᴛᴇᴄᴛ\n────────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅────────")
line.sendMessage(to, "╭══════════\n║⚫─[ DAFTAR HARGA ]─⚫ \n║SELFBOT ONLY = 75K /BLN\n║2 ASSIST = 100K /BLN\n║5 ASSIST = 200K /BLN\n║10 ASSIST = 300K /BLN\n║\n║PROTECT ANTIJS\n║\n║2 BOT + ANTIJS = 150K /BLN\n║5 BOT + ANTIJS = 300K /BLN\n║10 BOT + ANTIJS = 500K /BLN\n║\n║═ই\═ANDA BERMINAT\n║ SILAHKAN ADD CONTACT \n║ DIBAWAH INI \n║\n║http://line.me/ti/p/~reza.p.i.p\n║ TERIMA KASIH \n║\n╰════════════")
elif msg.text.lower() == 'pong':
if msg._from in lineMID:
ki2.sendMessage(to, "ping")
ki3.sendMessage(to, "ping")
ki4.sendMessage(to, "ping")
ki5.sendMessage(to, "ping")
ki.sendMessage(to, "ping")
elif text.lower() == 'dk.pulang' or text.lower() == '.byeall' or text.lower() == '0':
if msg._from in lineMID:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
elif text.lower() == 'dk.masuk' or text.lower() == '.join' or text.lower() == '1':
if msg._from in lineMID:
G = line.getGroup(msg.to)
ginfo = line.getGroup(msg.to)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
G = line.getGroup(msg.to)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
elif "tagall " in msg.text:
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
line.sendMessage(msg.to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
line.sendMessage(msg.to, "The number of members is pushed".format(str(len(nama))))
elif "dk " in msg.text:
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
line.sendMessage(msg.to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
line.sendMessage(msg.to, "Jumlah Member didesah".format(str(len(nama))))
#____________
elif "Bots updname " in msg.text:
if msg._from in lineMID:
separate = msg.text.split("Bots updname ")
string = msg.text.replace(separate[0] + "Bots updname ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
ki.sendMessage(msg.to,"Name changed to " + string + "")
ki2.sendMessage(msg.to,"Name changed to " + string + "")
ki3.sendMessage(msg.to,"Name changed to " + string + "")
ki4.sendMessage(msg.to,"Name changed to " + string + "")
ki5.sendMessage(msg.to,"Name changed to " + string + "")
ki6.sendMessage(msg.to,"Name changed to " + string + "")
#____________
elif "Bots updstatus " in msg.text:
if msg._from in lineMID:
separate = msg.text.split("Bots updstatus ")
string = msg.text.replace(separate[0] + "Bots updstatus ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.statusMessage= string
ki.updateProfile(profile)
profile = ki2.getProfile()
profile.statusMessage= string
ki2.updateProfile(profile)
profile = ki3.getProfile()
profile.statusMessage= string
ki3.updateProfile(profile)
profile = ki4.getProfile()
profile.statusMessage= string
ki4.updateProfile(profile)
profile = ki5.getProfile()
profile.statusMessage= string
ki5.updateProfile(profile)
profile = ki6.getProfile()
profile.statusMessage= string
ki6.updateProfile(profile)
ki.sendMessage(msg.to,"Status replaced so " + string + "")
ki2.sendMessage(msg.to,"Status replaced so " + string + "")
ki3.sendMessage(msg.to,"Status replaced so " + string + "")
ki4.sendMessage(msg.to,"Status replaced so " + string + "")
ki5.sendMessage(msg.to,"Status replaced so " + string + "")
ki6.sendMessage(msg.to,"Status replaced so " + string + "")
#==============================================================================#
elif text.lower() == 'me':
sendMessageWithMention(to, lineMID)
line.sendContact(to, lineMID)
me = line.getContact(lineMID)
line.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'aku':
sendMessageWithMention(to, lineMID)
line.sendContact(to, lineMID)
me = line.getContact(lineMID)
line.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'mid':
line.sendMessage(msg.to,lineMID)
elif text.lower() == 'tagme':
sendMessageWithMention(to, lineMID)
elif text.lower() == 'creator':
line.sendContact(to, "u7e99c5b3e4f01c95c104d0993fc41998")
elif text.lower() == 'myname':
me = line.getContact(lineMID)
line.sendMessage(msg.to,"DisplayName:\n\n" + me.displayName)
elif text.lower() == "update pict":
settings["changePicture"] = True
line.sendMessage(to, "Send image")
elif text.lower() == "update pict group":
if msg.toType == 2:
if to not in settings["changeGroupPicture"]:
settings["changeGroupPicture"].append(to)
line.sendMessage(to, "Send image ")
elif text.lower() == 'mybio':
me = line.getContact(lineMID)
line.sendMessage(msg.to,"StatusMessage:\n\n" + me.statusMessage)
elif text.lower() == 'pp':
me = line.getContact(lineMID)
line.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'myvid':
me = line.getContact(lineMID)
line.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'cover':
me = line.getContact(lineMID)
cover = line.getProfileCoverURL(lineMID)
line.sendImageWithURL(msg.to, cover)
#___________________UNSEND_______________
elif text.lower() == 'unsend:on':
if msg._from in lineMID:
settings["unsendMessage"] = True
line.sendMessage(msg.to, "Unsend message enable")
elif text.lower() == 'unsend:off':
if msg._from in lineMID:
settings["unsendMessage"] = False
line.sendMessage(msg.to, "Unsend message disable")
elif msg.text.lower().startswith("getcontact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
mi_d = contact.mid
line.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
ret_ = ls
line.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("getname "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
line.sendMessage(msg.to, "[ Display Name ]\n" + contact.displayName)
elif msg.text.lower().startswith("getbio "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
line.sendMessage(msg.to, "[ Status Message ]\n{}" + contact.statusMessage)
elif msg.text.lower().startswith("pict "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.line.naver.jp/" + line.getContact(ls).pictureStatus
line.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("get videoprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.line.naver.jp/" + line.getContact(ls).pictureStatus + "/vp"
line.sendImageWithURL(msg.to, str(path))
#_______________VKICK______________________
elif "vkick" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
line.kickoutFromGroup(msg.to, [mention['M']])
line.inviteIntoGroup(msg.to,[mention['M']])
line.cancelGroupInvitation(msg.to,[mention['M']])
except:
line.sendMessage(to, "Error")
#_____________KICK____________________
elif "kick" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
line.kickoutFromGroup(msg.to, [mention['M']])
except:
line.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk1" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki.kickoutFromGroup(msg.to, [mention['M']])
except:
ki.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk2" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki2.kickoutFromGroup(msg.to, [mention['M']])
except:
ki2.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk3" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki3.kickoutFromGroup(msg.to, [mention['M']])
except:
ki3.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk4" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki4.kickoutFromGroup(msg.to, [mention['M']])
except:
ki4.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk5" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki5.kickoutFromGroup(msg.to, [mention['M']])
except:
ki5.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk6" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki6.kickoutFromGroup(msg.to, [mention['M']])
except:
ki6.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
elif "dk" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki.kickoutFromGroup(msg.to, [mention['M']])
ki2.kickoutFromGroup(msg.to, [mention['M']])
ki3.kickoutFromGroup(msg.to, [mention['M']])
ki4.kickoutFromGroup(msg.to, [mention['M']])
ki5.kickoutFromGroup(msg.to, [mention['M']])
ki6.kickoutFromGroup(msg.to, [mention['M']])
except:
ki.kickoutFromGroup(msg.to, [mention['M']])
ki2.kickoutFromGroup(msg.to, [mention['M']])
ki3.kickoutFromGroup(msg.to, [mention['M']])
ki4.kickoutFromGroup(msg.to, [mention['M']])
ki5.kickoutFromGroup(msg.to, [mention['M']])
ki6.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
#_____________NUKE_________________
elif "Nuke" in msg.text:
if msg.toType == 2:
#print "ok"
_name = msg.text.replace("Nuke","")
gs = line.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
line.sendMessage(msg.to,"Not found.")
else:
for target in targets:
try:
line.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
#_____________NK________________
elif "Nk:" in msg.text:
if msg.toType == 2:
#print "ok"
_name = msg.text.replace("Nk:","")
gs = line.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
line.sendMessage(msg.to,"Not found.")
else:
for target in targets:
try:
line.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif msg.text.lower().startswith("cover "):
if line != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = line.getProfileCoverURL(ls)
line.sendImageWithURL(msg.to, str(path))
#____________COPY__________________________________
elif msg.text.lower().startswith("copy "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
line.cloneContactProfile(mention['M'])
jp = line.getContact(mention["M"]).displayName
line.sendMessage(to, "Succes Copy Profile "+jp)
except:
line.sendMessage(msg.to, "Eror")
elif text.lower() == 'backup':
try:
lineProfile.displayName = str(myProfile["displayName"])
lineProfile.statusMessage = str(myProfile["statusMessage"])
lineProfile.pictureStatus = str(myProfile["pictureStatus"])
line.updateProfileAttribute(8, lineProfile.pictureStatus)
line.updateProfile(lineProfile)
line.sendMessage(msg.to, "Done Backup Profile ")
except:
line.sendMessage(msg.to, "Invalid")
#____________COPY__________________________________
elif msg.text.lower().startswith("bots copy "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
ki.cloneContactProfile(mention['M'])
ki2.cloneContactProfile(mention['M'])
jp = line.getContact(mention["M"]).displayName
ki.sendMessage(to, "Succes Copy Profile "+jp)
ki2.sendMessage(to, "Succes Copy Profile "+jp)
except:
line.sendMessage(msg.to, "Eror")
elif text.lower() == 'bots backup':
try:
kiProfile.displayName = str(myProfile["displayName"])
kiProfile.statusMessage = str(myProfile["statusMessage"])
kiProfile.pictureStatus = str(myProfile["pictureStatus"])
ki.updateProfileAttribute(8, kiProfile.pictureStatus)
ki.updateProfile(kiProfile)
ki2Profile.displayName = str(myProfile["displayName"])
ki2Profile.statusMessage = str(myProfile["statusMessage"])
ki2Profile.pictureStatus = str(myProfile["pictureStatus"])
ki2.updateProfileAttribute(8, ki2Profile.pictureStatus)
ki2.updateProfile(ki2Profile)
ki3Profile.displayName = str(myProfile["displayName"])
ki3Profile.statusMessage = str(myProfile["statusMessage"])
ki3Profile.pictureStatus = str(myProfile["pictureStatus"])
ki3.updateProfileAttribute(8, ki3Profile.pictureStatus)
ki3.updateProfile(ki2Profile)
ki4Profile.displayName = str(myProfile["displayName"])
ki4Profile.statusMessage = str(myProfile["statusMessage"])
ki4Profile.pictureStatus = str(myProfile["pictureStatus"])
ki4.updateProfileAttribute(8, ki2Profile.pictureStatus)
ki4.updateProfile(ki4Profile)
ki5Profile.displayName = str(myProfile["displayName"])
ki5Profile.statusMessage = str(myProfile["statusMessage"])
ki5Profile.pictureStatus = str(myProfile["pictureStatus"])
ki5.updateProfileAttribute(8, ki2Profile.pictureStatus)
ki5.updateProfile(ki5Profile)
ki6Profile.displayName = str(myProfile["displayName"])
ki6Profile.statusMessage = str(myProfile["statusMessage"])
ki6Profile.pictureStatus = str(myProfile["pictureStatus"])
ki6.updateProfileAttribute(8, ki2Profile.pictureStatus)
ki6.updateProfile(ki5Profile)
ki.sendMessage(msg.to, "Done Backup Profile ")
ki2.sendMessage(msg.to, "Done Backup Profile ")
ki3.sendMessage(msg.to, "Done Backup Profile ")
ki4.sendMessage(msg.to, "Done Backup Profile ")
ki5.sendMessage(msg.to, "Done Backup Profile ")
ki6.sendMessage(msg.to, "Done Backup Profile ")
except:
line.sendMessage(msg.to, "Invalid")
#____________________________________________
#elif text.lower() == 'leaveallgroups':
#gid = ki.getGroupIdsJoined()
#gid = ki2.getGroupIdsJoined()
#gid = ki3.getGroupIdsJoined()
#gid = ki4.getGroupIdsJoined()
#gid = ki5.getGroupIdsJoined()
#for i in gid:
#ki.leaveGroup(i)
#ki2.leaveGroup(i)
#ki3.leaveGroup(i)
#ki4.leaveGroup(i)
#ki5.leaveGroup(i)
#if wait["lang"] == "JP":
#line.sendMessage(to,"Bots Sudah Leave Dari Semua Group")
#else:
#line.sendMessage(to,"He declined all invitations")
elif text.lower() == 'pmpict':
contact = line.getContact(msg.to)
path =("http://dl.profile.line-cdn.net/" + contact.pictureStatus)
line.sendImageWithURL(msg.to, path)
elif text.lower() == 'pmcover':
contact = line.getContact(msg.to)
cu = line.getProfileCoverURL(msg.to)
cu = channel.getProfileCoverURL(msg.to)
h = client.getHome(msg.to)
path = str(cu)
line.sendImageWithURL(msg.to, path)
elif msg.text.lower().startswith("gcall "):
if msg.toType == 2:
sep = text.split(" ")
strnum = text.replace(sep[0] + " ","")
num = int(strnum)
line.sendMessage(to, "Inviting in call group!" %str(num))
for var in range(0,num):
group = line.getGroup(to)
members = [contact.mid for contact in group.members]
line.acquireGroupCallRoute(to)
line.inviteIntoGroupCall(to, contactIds=members)
elif msg.text.lower().startswith("jumlahtag: "):
if wait["selfbot"] == True:
if sender in lineMID:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["RAlimit"] = num
line.sendMessage(msg.to,"♻Total Spamtag Diubah Menjadi " +strnum)
elif msg.text.lower().startswith("colek "):
if wait["selfbot"] == True:
if sender in lineMID:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["RAlimit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
line.sendMessage1(msg)
except Exception as e:
line.sendMessage(msg.to,str(e))
else:
line.sendMessage(msg.to,"Jumlah melebihi 1000")
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
line.sendMessage(msg.to, teks)
else:
line.sendMessage(msg.to, "")
elif txt[1] == "off":
if jmlh <= 100000:
line.sendMessage(msg.to, tulisan)
else:
line.sendMessage(msg.to, "")
elif msg.text in ["Hi"]:
line.sendMessage(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
elif msg.text in ["Hello"]:
line.sendMessage(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
#==============================================================================#
elif msg.text.lower().startswith("mimicadd "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
line.sendMessage(msg.to,"Target ditambahkan!")
break
except:
line.sendMessage(msg.to,"Added Target Fail !")
break
elif msg.text.lower().startswith("mimicdel "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["mimic"]["target"][target]
line.sendMessage(msg.to,"Target dihapuskan!")
break
except:
line.sendMessage(msg.to,"Deleted Target Fail !")
break
elif text.lower() == 'mimiclist':
if settings["mimic"]["target"] == {}:
line.sendMessage(msg.to,"Tidak Ada Target")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+line.getContact(mi_d).displayName
line.sendMessage(msg.to,mc + "\n╚══[◑ The End ◑]")
elif "autoreject " in msg.text.lower():
xpesan = msg.text.lower()
xres = xpesan.replace("autoreject ","")
if xres == "off":
settings['autorejc'] = False
line.sendMessage(msg.to,"AutoReject already Off")
elif xres == "on":
settings['autorejc'] = True
line.sendMessage(msg.to,"AutoReject already On")
elif text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
line.sendMessage(to,"Contact turned On")
else:
line.sendMessage(to,"Contact turned On")
else:
wait["contact"] = True
if wait["lang"] == "JP":
line.sendMessage(to,"Contact turned On")
else:
line.sendMessage(to,"Contact turned On")
elif text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
line.sendMessage(to,"Contact turned Off")
else:
line.sendMessage(to,"Contact turned Off")
else:
wait["contact"] = False
if wait["lang"] == "JP":
line.sendMessage(to,"Contact turned Off")
else:
line.sendMessage(to,"Contact turned Off")
elif "mimic " in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
line.sendMessage(msg.to,"Reply Message On")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
line.sendMessage(msg.to,"Reply Message Off")
elif msg.text.lower().startswith("sider on"):
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
line.sendMessage(msg.to,"Sider turned On")
elif msg.text.lower().startswith("sider off"):
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
line.sendMessage(msg.to, "ᴄᴄᴛv ʏᴀɴɢ ᴛᴇʀᴛᴀɴɢᴋᴀᴘ:\n"+cctv['sidermem'][msg.to])
line.sendMessage(to,"Sider turned Off")
else:
line.sendMessage(msg.to, "On aja belum ")
#==============================================================================#
elif text.lower() == 'welcome on':
if settings["welcomemsg"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned On")
else:
settings["welcomemsg"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned On")
elif text.lower() == 'welcome off':
if settings["welcomemsg"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned Off")
else:
settings["welcomemsg"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned Off")
#==============================================================================#
elif text.lower() == 'leavemsg on':
if settings["leavemsg"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned On")
else:
settings["leavemsg"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned On")
elif text.lower() == 'leavemsg off':
if settings["leavemsg"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned Off")
else:
settings["leavemsg"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned Off")
#--------------------------------------------------------
elif 'Set welcome ' in msg.text:
if msg._from in lineMID:
spl = msg.text.replace('Set welcome ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Failed to replace Welcome")
else:
settings["welcome"] = spl
line.sendMessage(msg.to, "WelcomeMessage is changed to :\n\n{}".format(str(spl)))
elif text.lower() == "cek welcome":
if msg._from in lineMID:
line.sendMessage(msg.to, "WelcomeMessage :\n\n" + str(settings["welcome"]))
#--------------------------------------------------------
elif 'Set leavemsg ' in msg.text:
if msg._from in lineMID:
spl = msg.text.replace('Set leavemsg ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Failed to replace LeaveMsg")
else:
settings["keluar"] = spl
line.sendMessage(msg.to, "LeaveMessage is changed to :\n\n{}".format(str(spl)))
elif text.lower() == "cek leavemsg":
if msg._from in lineMID:
line.sendMessage(msg.to, "LeaveMessage :\n\n" + str(settings["keluar"]))
#=============RESPON1=============================
elif 'Set respon1 ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set respon1 ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Respon1")
else:
settings["tag"] = spl
line.sendMessage(msg.to, "Respon1 Text Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek respon1":
if sender in lineMID:
line.sendMessage(msg.to, "Respon1 Text Kamu :\n\n" + str(settings["tag"]))
#=============RESPON2=============================
elif 'Set respon2 ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set respon2 ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Respon2")
else:
settings["tag2"] = spl
line.sendMessage(msg.to, "Respon2 Image Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek respon2":
if sender in lineMID:
line.sendMessage(msg.to, "Respon2 TagImage Kamu :\n\n" + str(settings["tag2"]))
#=============RESPON3============================
elif 'Set respon3 ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set respon3 ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Respon3")
else:
settings["tag3"] = spl
line.sendMessage(msg.to, "Respon3 PM Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek respon3":
if sender in lineMID:
line.sendMessage(msg.to, "Respon3 PM Kamu :\n\n" + str(settings["tag3"]))
elif 'Set responpc ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set responpc ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti ResponPc")
else:
settings["responpc"] = spl
line.sendMessage(msg.to, "Respon Pc replaced so :\n\n".format(str(spl)))
elif text.lower() == "cek responpc":
if sender in lineMID:
line.sendMessage(msg.to, "Respon Pc mu :\n\n"+ str(settings["responpc"]))
elif text.lower() == 'gcreator':
group = line.getGroup(to)
GS = group.creator.mid
line.sendContact(to, GS)
elif text.lower() == 'gid':
gid = line.getGroup(to)
line.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'gpict':
group = line.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
line.sendImageWithURL(to, path)
elif text.lower() == 'gname':
gid = line.getGroup(to)
line.sendMessage(to, "[Nama Group : ]\n" + gid.name)
elif text.lower() == 'url':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = line.reissueGroupTicket(to)
line.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
else:
line.sendMessage(to, "Grup qr tidak terbuka silahkan buka terlebih dahulu dengan perintah {}openqr".format(str(settings["keyCommand"])))
elif text.lower() == 'link on':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == False:
line.sendMessage(to, "Grup qr already opened")
else:
group.preventedJoinByTicket = False
line.updateGroup(group)
line.sendMessage(to, "Successful Open group qr")
elif text.lower() == 'link off':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == True:
line.sendMessage(to, "Grup qr already closed")
else:
group.preventedJoinByTicket = True
line.updateGroup(group)
line.sendMessage(to, "Successful Close qr")
elif text.lower() == 'reject':
gid = line.getGroupIdsInvited()
for i in gid:
line.rejectGroupInvitation(i)
if wait["lang"] == "JP":
line.sendMessage(msg.to,"Reject GroupInvited Done")
else:
line.sendMessage(msg.to,"Done")
elif text.lower() == 'cancelall':
if msg._from in lineMID:
if msg.toType == 2:
group = line.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
line.cancelGroupInvitation(msg.to,[_mid])
line.sendMessage(msg.to,"I pretended to cancel and canceled.")
elif msg.text.lower().startswith("Bots say "):
sep = text.split(" ")
say = text.replace(sep[0] + " ","")
line.sendMessage(to,say)
elif text.lower() == 'ginfo':
group = line.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Tidak ditemukan"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(line.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ Nama Group : {}".format(str(group.name))
ret_ += "\n╠ ID Group : {}".format(group.id)
ret_ += "\n╠ Pembuat : {}".format(str(gCreator))
ret_ += "\n╠ Jumlah Member : {}".format(str(len(group.members)))
ret_ += "\n╠ Jumlah Pending : {}".format(gPending)
ret_ += "\n╠ Group Qr : {}".format(gQr)
ret_ += "\n╠ Group Ticket : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
line.sendMessage(to, str(ret_))
line.sendImageWithURL(to, path)
elif text.lower() == 'memlist':
if msg.toType == 2:
group = line.getGroup(to)
ret_ = "╔══[ Member List ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ Total {} ]".format(str(len(group.members)))
line.sendMessage(to, str(ret_))
elif text.lower() == 'groups':
groups = line.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = line.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
line.sendMessage(to, str(ret_))
elif text.lower() == 'bots groups':
groups = ki.groups
groups = ki2.groups
groups = ki3.groups
groups = ki4.groups
groups = ki5.groups
groups = ki6.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = ki.getGroup(gid)
group = ki2.getGroup(gid)
group = ki3.getGroup(gid)
group = ki4.getGroup(gid)
group = ki5.getGroup(gid)
group = ki6.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
ki.sendMessage(to, str(ret_))
ki2.sendMessage(to, str(ret_))
ki3.sendMessage(to, str(ret_))
ki4.sendMessage(to, str(ret_))
ki5.sendMessage(to, str(ret_))
ki6.sendMessage(to, str(ret_))
elif msg.text in ["Autolike"]:
if sender in lineMID:
print ("[Command]Like executed")
line.sendMessage(msg.to,"Auto Like\nDone Bos")
try:
autolike()
except:
pass
elif "/ti/g/" in msg.text.lower():
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = line.findGroupByTicket(ticket_id)
line.acceptGroupInvitationByTicket(group.id,ticket_id)
line.sendMessage(to, "Successful masuk ke group %s" % str(group.name))
# Check viewers command
#
#==============================================================================#
elif text.lower() == 'kalender':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
line.sendMessage(msg.to, readTime)
elif text.lower() == "remove chat":
#if wait["selfbot"] == True:
if msg._from in lineMID:
try:
line.removeAllMessages(op.param2)
line.sendMessage(msg.to,"Chat dibersihkan...")
except:
pass
#-------------------------------------------------------------------------------
elif text.lower() == '.nukeall':
if msg._from in admin:
if msg.toType == 2:
print ("[ 19 ] NUKE")
_name = msg.text.replace(".nukeall","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
ki.sendMessage(msg.to,"Group cleansed")
ki2.sendMessage(msg.to,"Wan")
ki3.sendMessage(msg.to,"chu")
ki4.sendMessage(msg.to,"tri")
ki5.sendMessage(msg.to,"goooo!")
ki6.sendMessage(msg.to,"goooo!")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendMessage(msg.to,"Not Found")
ki2.sendMessage(msg.to,"Not Found")
ki3.sendMessage(msg.to,"Not Found")
ki4.sendMessage(msg.to,"Not Found")
ki5.sendMessage(msg.to,"Not Found")
ki6.sendMessage(msg.to,"Not Found")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
line.sendMessage(msg.to,"")
elif msg.text.lower().startswith("music "):
try:
search = msg.text.lower().replace("fullmusic ","")
r = requests.get("https://farzain.xyz/api/joox.php?apikey=412uH5fxAT7jsNaUwCkVwH4qEUn2Dz&id={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
info = data["info"]
audio = data["audio"]
hasil = "☠ Hasil Music ☠ \n"
hasil += "\nPenyanyi : {}".format(str(info["penyanyi"]))
hasil += "\nJudul : {}".format(str(info["judul"]))
hasil += "\nAlbum : {}".format(str(info["album"]))
hasil += "\n\nLink : \n1. Image : {}".format(str(data["gambar"]))
hasil += "\n\nLink : \n2. MP3 : {}".format(str(audio["mp3"]))
hasil += "\n\nLink : \n3. M4A : {}".format(str(audio["m4a"]))
hasil += "\n\n☠
Lyric Lagu:\n\n{}".format(str(data["lirik"]))
hasil += "\n\n☠ Please Wait For Downloading...!!! \n"
line.sendImageWithURL(msg.to, str(data["gambar"]))
line.sendMessage(msg.to, str(hasil))
line.sendMessage(msg.to, "Result MP3 ")
line.sendAudioWithURL(msg.to, str(audio["mp3"]))
except Exception as error:
line.sendMessage(msg.to, "Result Error:\n" + str(error))
elif "kalkulator" in msg.text.lower():
try:
sep = msg.text.split(" ")
cal = msg.text.replace(sep[0] + " ","")
result = requests.get("http://calcatraz.com/calculator/api?c="+urllib.parse.quote(cal))
data = result.text
line.sendMessage(to,"Hasil:\n\n"+ cal+ " = " +str(data))
except Exception as error:
logError(error)
line.sendMessage(to, str(error))
elif "instagram" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.instagram.com/{}/?__a=1".format(search))
try:
data = json.loads(r.text)
ret_ = "╔══[ Profile Instagram ]"
ret_ += "\n╠ Nama : {}".format(str(data["user"]["full_name"]))
ret_ += "\n╠ Username : {}".format(str(data["user"]["username"]))
ret_ += "\n╠ Bio : {}".format(str(data["user"]["biography"]))
ret_ += "\n╠ Pengikut : {}".format(format_number(data["user"]["followed_by"]["count"]))
ret_ += "\n╠ Diikuti : {}".format(format_number(data["user"]["follows"]["count"]))
if data["user"]["is_verified"] == True:
ret_ += "\n╠ Verifikasi : Sudah"
else:
ret_ += "\n╠ Verifikasi : Belum"
if data["user"]["is_private"] == True:
ret_ += "\n╠ Akun Pribadi : Iya"
else:
ret_ += "\n╠ Akun Pribadi : Tidak"
ret_ += "\n╠ Total Post : {}".format(format_number(data["user"]["media"]["count"]))
ret_ += "\n╚══[ https://www.instagram.com/{} ]".format(search)
path = data["user"]["profile_pic_url_hd"]
line.sendImageWithURL(to, str(path))
line.sendMessage(to, str(ret_))
except:
line.sendMessage(to, "Pengguna tidak ditemukan")
elif msg.text.lower().startswith("movie"):
try:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
apiKey = "zbYyMGhWy06DDsSHeUAK3GHQkEbCL8"
api = requests.get("https://farzain.xyz/api/film.php?apikey={}&id={}".format(str(apiKey), str(search)))
data = api.text
data = json.loads(data)
if data["status"] == "success":
anu = "[ Result Film ]"
anu += "\nTitle : {}".format(str(data["Title"]))
anu += "\nYear : {}".format(str(data["Year"]))
anu += "\nRated : {}".format(str(data["Rated"]))
anu += "\nReleased : {}".format(str(data["Released"]))
anu += "\nDuration : {}".format(str(data["Runtime"]))
anu += "\nGenre : {}".format(str(data["Genre"]))
path = str(data["Poster"])
line.sendImageWithURL(msg.to, str(path))
line.sendMessage(msg.to, str(anu))
else:
sendMentionV2(msg.to, "Maaf @!,hasil pencarin tidak ditemukan", [sender])
except Exception as error:
line.sendMessage(msg.to, str(error))
#___________BROADCAST_______________
elif msg.text.lower().startswith("broadcast "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
groups = line.groups
for group in groups:
line.sendMessage(group, "Broadcast:\n\n{}".format(str(txt)))
#____________________________________
elif "image: " in msg.text.lower():
separate = msg.text.split(" ")
search = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
if data["result"] != []:
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
line.sendImageWithURL(to, str(path))
elif "youtube" in msg.text.lower():
if msg._from in lineMID:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "💿 Judul 🎼〘 " + vid.title + " 〙"
author = '\n\n✏ Author : ' + str(vid.author)
durasi = '\n📟 Duration : ' + str(vid.duration)
suka = '\n👍 Likes : ' + str(vid.likes)
rating = '\n⭐ Rating : ' + str(vid.rating)
deskripsi = '\n📋 Deskripsi : ' + str(vid.description)
line.sendVideoWithURL(msg.to, me)
line.sendMessage(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
line.sendMessage(msg.to,str(e))
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
line.sendMessage(to, str(ret_))
elif wait["contact"] == True:
msg.contentType = 0
line.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = line.getContact(msg.contentMetadata["mid"])
try:
cu = line.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
line.sendMessage(to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = line.getContact(msg.contentMetadata["mid"])
try:
cu = line.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
line.sendMessage(to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 1:
if settings["changePicture"] == True:
path = line.downloadObjectMsg(msg_id)
settings["changePicture"] = False
line.updateProfilePicture(path)
line.sendMessage(to, "Successful mengubah foto profile")
if msg.toType == 2:
if to in settings["changeGroupPicture"]:
path = line.downloadObjectMsg(msg_id)
settings["changeGroupPicture"].remove(to)
line.updateGroupPicture(to, path)
line.sendMessage(to, "Successful mengubah foto group")
elif msg.contentType == 16:
mid = data["actorId"]
postId = data["activityExternalId"]
line.likePost(to, mid, postId, likeType=1001)
line.createComment(to, mid, postId, "AutoLike by: Team Dkz Protection ")
#==============================================================================#
if op.type == 17:
if op.param1 in welcome:
if op.param2 in lineMID:
pass
ginfo = line.getGroup(op.param1)
contact = line.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
welcomeMembers(op.param1, [op.param2])
line.sendImageWithURL(op.param1, image)
if op.type == 13:
if lineMID in op.param3:
if settings["autojj"] == "wl":
if op.param2 in periksa["wl"]:
line.acceptGroupInvitation(op.param1)
else:
if settings['autorejc'] == True:
line.rejectGroupInvitation(op.param1)
else:
pass
elif settings["autojj"] == "all":
line.acceptGroupInvitation(op.param1)
else:
if settings['autorejc'] == True:
line.rejectGroupInvitation(op.param1)
else:
pass
if op.type == 26:
if wait["Mute"] == False:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
if settings["autoBalas"] == True:
if msg.toType == 0:
line.sendChatChecked(sender,msg_id)
contact = line.getContact(sender)
mids = [contact.mid]
text = "[ Auto Respon ]\n\nHallo @!\nMohon Maaf Saya Sedang Sibuk, Ini Adalah Pesan Otomatis, Jika Ada Yang Penting Mohon Hubungi Saya Nanti, Terimakasih..."
summon(op.param1)
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if settings["autoRead"] == True:
line.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
line.sendMessage(msg.to,text)
if settings["unsendMessage"] == True:
try:
msg = op.message
if msg.toType == 0:
line.log("[{} : {}]".format(str(msg._from), str(msg.text)))
else:
line.log("[{} : {}]".format(str(msg.to), str(msg.text)))
msg_dict[msg.id] = {"text": msg.text, "from": msg._from, "createdTime": msg.createdTime, "contentType": msg.contentType, "contentMetadata": msg.contentMetadata}
except Exception as error:
logError(error)
if msg.contentType == 0:
if text is None:
return
if "/ti/g/" in msg.text.lower():
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = line.findGroupByTicket(ticket_id)
line.acceptGroupInvitationByTicket(group.id,ticket_id)
line.sendMessage(to, "Successful join ke group %s" % str(group.name))
#___________________RESPON TEXT__________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoRespon"]:
contact = line.getContact(sender)
line.sendMessage(to, settings["tag"])
break
#___________________RESPON IMAGE_________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoResponImage"]:
contact = line.getContact(sender)
anu = contact.displayName
path = "http://dl.profile.line.naver.jp/" + contact.pictureStatus
line.sendMessage(to, settings["tag2"])
line.sendImageWithURL(msg.to, str(path))
break
#___________________RESPON PM________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoResponPm"]:
contact = line.getContact(sender)
line.sendMessage(sender, settings["tag3"])
break
#___________________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if msg.to in responPc:
G = line.getGroup(to)
contact = line.getContact(sender)
anu = contact.displayName
#sid = str(tikel["sid"])
#spkg = str(tikel["spkg"])
anu = contact.displayName
line.sendMessage(sender, settings["responpc"])
#line.sendSticker(sender, spkg, sid)
break
if op.type == 26:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
line.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
line.sendMessage(msg.to,text)
#________________
if op.type == 65:
print ("[ 65 ] NOTIFIED DESTROY MESSAGE")
if settings["unsendMessage"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict:
if msg_dict[msg_id]["from"]:
contact = line.getContact(msg_dict[msg_id]["from"])
if contact.displayNameOverridden != None:
name_ = contact.displayNameOverridden
else:
name_ = contact.displayName
ret_ = "Send Message cancelled."
ret_ += "\nSender : @!"
ret_ += "\nSend At : {}".format(str(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"]))))
ret_ += "\nType : {}".format(str(Type._VALUES_TO_NAMES[msg_dict[msg_id]["contentType"]]))
ret_ += "\nText : {}".format(str(msg_dict[msg_id]["text"]))
sendMention(at, str(ret_), [contact.mid])
del msg_dict[msg_id]
else:
line.sendMessage(at,"SentMessage cancelled,But I didn't have log data.\nSorry > <")
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
#============================================================================
if op.type == 19:
print ("[ 19 ] KICKOUT JP MESSAGE")
try:
if op.param3 in lineMID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in lineMID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in lineMID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in lineMID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in lineMID:
if op.param2 in ki5MID:
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#=============================================================================
elif op.param3 in lineMID:
if op.param2 in ki6MID:
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
#=============================================================================
if op.param3 in kiMID:
if op.param2 in lineMID:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
else:
G = line.getGroup(op.param1)
line.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in kiMID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in kiMID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in kiMID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#=====================================================================
#------------------------------------------------------------------------------
elif op.param3 in kiMID:
if op.param2 in ki5MID:
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
#=============================================
elif op.param3 in kiMID:
if op.param2 in ki6MID:
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
#=============================================
if op.param3 in ki2MID:
if op.param2 in lineMID:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
else:
G = line.getGroup(op.param1)
line.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
#-----------------------------------------------------------------------------
elif op.param3 in ki2MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki2MID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------------------------------------
elif op.param3 in ki2MID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki2MID:
if op.param2 in ki5MID:
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
#=======================================
elif op.param3 in ki2MID:
if op.param2 in ki6MID:
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
#=======================================
if op.param3 in ki3MID:
if op.param2 in lineMID:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
else:
G = line.getGroup(op.param1)
line.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
#-----------------------------------------------------------------------------
elif op.param3 in ki3MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------------------------------------
elif op.param3 in ki3MID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in ki3MID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki3MID:
if op.param2 in ki5MID:
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
#================================================================
elif op.param3 in ki3MID:
if op.param2 in ki6MID:
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
#================================================================
if op.param3 in ki4MID:
if op.param2 in lineMID:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
else:
G = line.getGroup(op.param1)
line.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in ki4MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in ki4MID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki4MID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in ki4MID:
if op.param2 in ki5MID:
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
#==============================================
elif op.param3 in ki4MID:
if op.param2 in ki6MID:
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
#==============================================
if op.param3 in ki5MID:
if op.param2 in lineMID:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
else:
G = line.getGroup(op.param1)
line.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki5MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in ki5MID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki5MID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki5MID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#==============================================
elif op.param3 in ki5MID:
if op.param2 in ki6MID:
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki6.updateGroup(G)
invsend = 0
Ticket = ki6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki6.getGroup(op.param1)
G.preventedJoinByTicket = True
ki6.updateGroup(G)
G.preventedJoinByTicket(G)
ki6.updateGroup(G)
#==============================================
if op.param3 in ki6MID:
if op.param2 in lineMID:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
else:
G = line.getGroup(op.param1)
line.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
line.updateGroup(G)
G.preventedJoinByTicket(G)
line.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki6MID:
if op.param2 in kiMID:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
G.preventedJoinByTicket(G)
ki.updateGroup(G)
#-------------------------------------------------------------------------------
elif op.param3 in ki6MID:
if op.param2 in ki2MID:
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki2.updateGroup(G)
invsend = 0
Ticket = ki2.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki2.getGroup(op.param1)
G.preventedJoinByTicket = True
ki2.updateGroup(G)
G.preventedJoinByTicket(G)
ki2.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki6MID:
if op.param2 in ki3MID:
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki3.updateGroup(G)
invsend = 0
Ticket = ki3.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki3.getGroup(op.param1)
G.preventedJoinByTicket = True
ki3.updateGroup(G)
G.preventedJoinByTicket(G)
ki3.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki6MID:
if op.param2 in ki4MID:
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki4.updateGroup(G)
invsend = 0
Ticket = ki4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki4.getGroup(op.param1)
G.preventedJoinByTicket = True
ki4.updateGroup(G)
G.preventedJoinByTicket(G)
ki4.updateGroup(G)
#------------------------------------------------------------------------------
elif op.param3 in ki6MID:
if op.param2 in ki5MID:
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
ki5.updateGroup(G)
invsend = 0
Ticket = ki5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki5.getGroup(op.param1)
G.preventedJoinByTicket = True
ki5.updateGroup(G)
G.preventedJoinByTicket(G)
ki5.updateGroup(G)
elif op.param2 not in Bots:
if op.param2 in admin:
pass
elif settings["protect"] == True:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
random.choice(KAC).sendText(op.param1,"(U)(P)")
else:
pass
except:
pass
#==============================================================================#
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in admin:
pass
elif settings["inviteprotect"] == True:
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in admin:
pass
elif settings["cancelprotect"] == True:
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
#-------------------------------------------------------------------------------
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in admin and Bots:
pass
elif settings["qrprotect"] == True:
G = random.choice(KAC).getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(KAC).updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
line.sendMessage(op.param1,"Woiiiiiiiiiiiii")
else:
line.sendMessage(op.param1,"")
#==============================================================================#
#==============================================================================#
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
backupData()
else:
pass
except:
pass
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = line.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n☛ " + Name
summon(op.param1,[op.param2])
zxn=["Tercyduk juga dirimu\nSUBHANALLAH "," Sini ka jangan ngintip mele\nMASYA ALLAH","Mau lari kemana lo\nAnda ttap kecydut Bot\nSUBHANALLAH","Turun kak ikut chat sini\nYA ELLAH ","Dirimu krtangkap basah cctv\nSUBHANNALLAH","Sider MUlu dirimu\nASTAGHFIRULLAH","Istighfar bro\nJgn CCTV mele\nMasya allah","Loha\nSini ikut Ngopi ","Hai idolaku ayo sini ngobrol"]
say = random.choice(zxn) +" "+ Name
line.sendMessage(op.param1, say)
contact = line.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
line.sendImageWithURL(op.param1, image)
#cl.sendMessage(to, None, contentMetadata={"STKID":"26538898","STKPKGID":"10272","STKVER":"1"}, contentType=7)
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
line.sendMessage(msg.to, "[̟]: " + data['result']['response'].encode('utf-8'))
except Exception as error:
logError(error)
#==============================================================================#
def autolike():
count = 1
while True:
try:
for posts in line.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
line.likePost(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print ("Like")
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
line.createComment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def autoLike():
count = 1
while True:
try:
for posts in cl.getFeed(postLimit=10, commentLimit=1, likeLimit=1, order='TIME')["result"]["feed"]:
if hasil["postInfo"]["homeId"]["postId"] is False:
if wait["sukaPost"] == True:
line.likePost(hasil["userMid"]["writerMid"], hasil["postInfo"]["postId"], likeType=1001)
print ("Like")
if wait["commentOn"] == True:
if hasil["homeId"]["writerMid"] in wait["commentBlack"]:
pass
else:
line.createComment(posts["userMid"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
def autoLiked():
if settings["sukaPost"] == True:
lastTimeLiking = time.time()
if time.time() - lastTimeLiking >= 60*60:
listLikeType = 1001
myComment = "[ Auto Like by: Team Dk Protection ]"
feed = client.getFeed()
if feed["message"] != 'succes':
lastTimeLiking = time.time()
return True
del feed["result"]["feedInfos"]
result = ["result"]["feeds"]
for res in result:
postInfo = res["post"]["postInfo"]
homeId = postInfo["homeId"]
postId = postInfo["postId"]
likeStat = postInfo["liked"]
if likeStat == True:
continue
else:
line.likePost(homeId, postId, listLikeType)
line.createComment(homeId, postId, myComment)
lastTimeLiking = time.time()
thread1 = threading.Thread(target=autoLike)
thread1.daemon = True
thread1.start()
while True:
try:
Ops = line.poll.fetchOperations(line.revision, 50)
for op in Ops:
if op.type != 0:
line.revision = max(line.revision, op.revision)
lineBot(op)
except Exception as E:
E = str(E)
if "reason=None" in E:
print (E)
time.sleep(60)
restart_program()
|
main.py
|
try:
from auth import auth
except:
with open("auth.py","w") as a:
a.write("auth = ('<username>','<password>')")
print("Add login info to auth.py!")
quit()
import trainInfomation
import pygame
import datetime
import threading
import time
def firstLetterVowelDetect(string):
if string[0].lower() in ['a','e','i','o','u']:
return True
else:
return False
def updateInfomation(code):
global station
while True:
station = trainInfomation.station(code)
if stopThread:
break
time.sleep(10)
def TrainInfo(station):
if firstLetterVowelDetect(station.trains[0].operator):
scrollText = f"This is an {station.trains[0].operator} service to {station.trains[0].destination}"
else:
scrollText = f"This is a {station.trains[0].operator} service to {station.trains[0].destination}"
scrollText = f"{scrollText} Calling at: "
for i in station.trains[0].callingAt:
if i == station.trains[0].callingAt[-1]:
scrollText = f"{scrollText}and {i}"
else:
scrollText = f"{scrollText}{i}, "
return scrollText
print("\n\nBritishTrainsDepartureBoard")
print("Powered by RealTimeTrains API (https://api.rtt.io/)")
print("--------------------------------------------------------")
code = input("Type in a station code: ")
print("Please wait")
station = trainInfomation.station(code)
shownStation = station
pygame.init()
clock = pygame.time.Clock() # Clock is capital C
height = 320
width = 1200
gameDisplay = pygame.display.set_mode((width, height))
pygame.display.set_caption(f'Train Infomation: {station.inputStaion} - {code.upper()}')
pygame.display.update()
font = pygame.font.SysFont(None, 75)
scrollTextAmount = 0
updateThread = threading.Thread(target=updateInfomation, args=(code,))
stopThread = False
updateThread.start()
while True:
current_time = datetime.datetime.now().strftime("%H:%M:%S")
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
stopThread = True
print("Closing update thread, this may take a few seconds...")
quit()
gameDisplay.fill((0,0,0))
TrainOrderIndicator1 = font.render("1st", True, (255, 165, 0))
gameDisplay.blit(TrainOrderIndicator1, (20, 20))
TrainTimeIndicator1 = font.render(f"{shownStation.trains[0].time}", True, (255, 165, 0))
gameDisplay.blit(TrainTimeIndicator1, (140, 20))
TrainDestinationIndicator1 = font.render(f"{shownStation.trains[0].destination}", True, (255, 165, 0))
gameDisplay.blit(TrainDestinationIndicator1, (320, 20))
TrainEstimation1 = font.render(f"{shownStation.trains[0].getExpectedInfo()}", True, (255, 165, 0))
gameDisplay.blit(TrainEstimation1, (width - TrainEstimation1.get_rect().width-20, 20))
TrainInfomation1 = font.render(f"{TrainInfo(shownStation)}", True, (255, 165, 0))
gameDisplay.blit(TrainInfomation1, (scrollTextAmount, 100))
scrollTextAmount -= 5
if scrollTextAmount < (TrainInfomation1.get_rect().width+5)*-1:
scrollTextAmount = width
shownStation = station
TrainOrderIndicator2 = font.render("2nd", True, (255, 165, 0))
gameDisplay.blit(TrainOrderIndicator2, (20, 180))
TrainTimeIndicator2 = font.render(f"{shownStation.trains[1].time}", True, (255, 165, 0))
gameDisplay.blit(TrainTimeIndicator2, (140, 180))
TrainDestinationIndicator2 = font.render(f"{shownStation.trains[1].destination}", True, (255, 165, 0))
gameDisplay.blit(TrainDestinationIndicator2, (320, 180))
TrainEstimation2 = font.render(f"{shownStation.trains[1].getExpectedInfo()}", True, (255, 165, 0))
gameDisplay.blit(TrainEstimation2, (width - TrainEstimation2.get_rect().width-20, 180))
CurrentTime = font.render(f"{current_time}", True, (255, 165, 0))
gameDisplay.blit(CurrentTime, ((width / 2) - (CurrentTime.get_rect().width / 2), height - CurrentTime.get_rect().height-20))
clock.tick(120)
pygame.display.update()
|
cross_device_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import threading
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import kernels
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
def _get_devices(devices):
if isinstance(devices, (tuple, list)):
return tuple(device_util.resolve(d) for d in devices)
elif isinstance(devices, value_lib.DistributedValues):
return devices._devices
elif isinstance(devices, ops.Tensor):
return (device_util.resolve(devices.device),)
return (device_util.resolve(devices),)
def _make_per_replica(values, devices, regroup=False):
devices = _get_devices(devices)
assert len(values) == len(devices)
# We simulate the result of regroup called on PerReplica which strips the
# PerReplica wrapper if it has only one value.
if len(values) == 1 and regroup:
with ops.device(devices[0]):
placed_v = array_ops.identity(values[0])
return placed_v
index = []
for d, v in zip(devices, values):
with ops.device(d):
placed_v = array_ops.identity(v)
index.append(placed_v)
return distribute_utils.regroup(index)
# pylint: disable=g-doc-args,g-doc-return-or-yield
def _fake_mirrored(value, devices):
"""Create a faked Mirrored object for testing.
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
devices = _get_devices(devices)
values = []
for d in devices:
with ops.device(d):
values.append(array_ops.identity(value))
return distribute_utils.regroup(
values,
wrap_class=value_lib.Mirrored)
def _make_indexed_slices(values, indices, dense_shape, device):
with ops.device(device):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _make_mirrored_indexed_slices(devices, values, indices, dense_shape):
values = [_make_indexed_slices(values, indices, dense_shape, d)
for d in devices]
return distribute_utils.regroup(
values,
wrap_class=value_lib.Mirrored)
_cpu_device = "/device:CPU:0"
class CrossDeviceOpsTestBase(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, ops.IndexedSlices)
self.assertIsInstance(right, ops.IndexedSlices)
self.assertEqual(
device_util.resolve(left.device), device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_mirrored_equal(self,
left_list,
right_list,
sess=None,
run_options=None):
if not isinstance(left_list, list):
left_list, right_list = [left_list], [right_list]
for left, right in zip(left_list, right_list):
self.assertEqual(type(left), type(right))
# Convert Mirrored to a list since sess.run(Mirrored) only returns one
# value.
if isinstance(left, value_lib.Mirrored):
left, right = left.values, right.values
else:
# When there's only one replica Mirrored is automatically unwrapped.
left, right = [left], [right]
for left_value, right_value in zip(left, right):
self.assertEqual(
device_util.resolve(left_value.device),
device_util.resolve(right_value.device))
# Densify IndexedSlices.
left = [ops.convert_to_tensor(v) for v in left]
right = [ops.convert_to_tensor(v) for v in right]
if not context.executing_eagerly():
left, right = sess.run((left, right), options=run_options)
for left_value, right_value in zip(left, right):
self.assertAllEqual(left_value, right_value)
def _testReductionAndBroadcast(self, cross_device_ops, devices):
if context.num_gpus() < sum(1 for d in devices if "GPU" in d.upper()):
self.skipTest("Not enough GPUs")
with self.cached_session() as sess:
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1.,
device_util.resolve(_cpu_device))
destination_str = device_util.resolve(_cpu_device)
all_destinations = [
destination_mirrored,
destination_different,
destination_str,
]
# test reduce()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations), _fake_mirrored(mean, destinations),
sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices), destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations), sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.MEAN,
[(per_replica, d1),
(per_replica_2, d2)]),
[_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)], sess)
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.SUM,
[(per_replica, d1),
(per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices), d1),
_fake_mirrored(mean_2 * len(devices), d2)
], sess)
# test broadcast()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.broadcast(constant_op.constant(1.), destinations),
_fake_mirrored(1., destinations), sess)
def _testIndexedSlicesAllReduce(self, devices, cross_device_ops_instance,
reduce_op, batch_reduce):
with self.cached_session() as sess:
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], dense_shape,
devices[1])
per_replica = value_lib.PerReplica((t0, t1))
if batch_reduce:
result = cross_device_ops_instance.batch_reduce(
reduce_op, [(per_replica, per_replica)])
else:
result = cross_device_ops_instance.reduce(reduce_op, per_replica,
per_replica)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
if reduce_op == reduce_util.ReduceOp.SUM:
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
assert reduce_op == reduce_util.ReduceOp.MEAN
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
total_mirrored_with_dups = _make_mirrored_indexed_slices(
devices, total_values_with_dups, total_indices_with_dups, dense_shape)
total_mirrored_without_dups = _make_mirrored_indexed_slices(
devices, total_values_without_dups, total_indices_without_dups,
dense_shape)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices, as well as when the duplicate indices are summed up.
if batch_reduce:
total_mirrored_with_dups = [total_mirrored_with_dups]
total_mirrored_without_dups = [total_mirrored_without_dups]
self._assert_mirrored_equal(total_mirrored_with_dups, result, sess)
self._assert_mirrored_equal(total_mirrored_without_dups, result, sess)
class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
reduction_to_one_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject("DefaultReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"ReductionToCPUDeviceCrossDeviceOps",
cross_device_ops_lib.ReductionToOneDevice(
reduce_to_device=_cpu_device)),
combinations.NamedObject(
"AccumulateNCrossDeviceOp",
cross_device_ops_lib.ReductionToOneDevice(
accumulation_fn=math_ops.add_n)),
],
devices=[
["/cpu:0"],
["/cpu:0", "/gpu:0"],
["/gpu:0", "/gpu:1"],
],
mode=["graph", "eager"])
allreduce_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"AllReduce",
cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 1)),
combinations.NamedObject(
"AllReduceNoGradientRepacking",
cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 0)),
combinations.NamedObject("NcclAllReduce",
cross_device_ops_lib.NcclAllReduce()),
combinations.NamedObject(
"HierarchicalCopy",
cross_device_ops_lib.HierarchicalCopyAllReduce(8)),
],
devices=[
["/gpu:0", "/gpu:1"],
],
mode=["graph", "eager"])
@combinations.generate(reduction_to_one_combinations + allreduce_combinations)
def testReductionAndBroadcast(self, cross_device_ops, devices):
if isinstance(
cross_device_ops._obj, # pylint: disable=protected-access
cross_device_ops_lib.AllReduceCrossDeviceOps
) and context.executing_eagerly():
self.skipTest("b/149881884")
self._testReductionAndBroadcast(cross_device_ops, devices)
def testChooseAlgorithm(self):
# Not use nccl if there is any cpu device.
self.assertIsInstance(
cross_device_ops_lib.choose_the_best(["/cpu:0"]),
cross_device_ops_lib.ReductionToOneDevice)
# Not use nccl if requested device is not visible to TensorFlow.
# TODO(yuefengz): make `choose_the_best` work with device strings
# self.assertIsInstance(
# cross_device_ops_lib.choose_the_best(["/gpu:100"]),
# cross_device_ops_lib.ReductionToOneDevice)
if context.num_gpus() < 1:
return
devices = ["/gpu:0"]
def mock_get_registered_kernels_for_op(op):
if op == "NcclAllReduce":
return [object]
else:
return []
# Use nccl if nccl kernel is found.
with test.mock.patch.object(kernels, "get_registered_kernels_for_op",
mock_get_registered_kernels_for_op):
self.assertIsInstance(
cross_device_ops_lib.choose_the_best(devices),
cross_device_ops_lib.NcclAllReduce)
# Not use nccl if nccl kernel is not found.
with test.mock.patch.object(kernels,
"get_registered_kernels_for_op", lambda _: []):
self.assertIsInstance(
cross_device_ops_lib.choose_the_best(devices),
cross_device_ops_lib.ReductionToOneDevice)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testSimpleReduceWithIndexedSlices(self):
devices = ["/cpu:0", "/gpu:0"]
t0 = _make_indexed_slices([[1., 2.]], [1], [5, 2], devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], [5, 2], devices[1])
per_replica = value_lib.PerReplica((t0, t1))
result = cross_device_ops_lib._simple_reduce(
per_replica, devices[0], math_ops.add_n, reduce_util.ReduceOp.SUM)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices with and without duplicate indices.
total_with_dups = _make_indexed_slices(
[[1., 2.], [3., 4.], [5., 6.]], [1, 1, 3], [5, 2], devices[0])
total_without_dups = _make_indexed_slices(
[[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
self._assert_indexed_slices_equal(total_with_dups, result)
self._assert_indexed_slices_equal(total_without_dups, result)
@combinations.generate(
combinations.combine(
cross_device_ops_instance=[
combinations.NamedObject(
"ReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"AllReduceCrossDeviceOps",
cross_device_ops_lib.AllReduceCrossDeviceOps())
],
reduce_op=[reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN],
batch_reduce=[True, False],
mode=["graph", "eager"],
required_gpus=1))
def testIndexedSlicesAllReduce(self, cross_device_ops_instance, reduce_op,
batch_reduce):
devices = ["/cpu:0", "/gpu:0"]
self._testIndexedSlicesAllReduce(devices, cross_device_ops_instance,
reduce_op, batch_reduce)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
cross_device_ops_instance=[
combinations.NamedObject(
"ReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"AllReduceCrossDeviceOps",
cross_device_ops_lib.AllReduceCrossDeviceOps("ring"))
],
batch_reduce=[True, False],
mode=["graph", "eager"]))
def testReduceDistributedVariable(self, distribution,
cross_device_ops_instance, batch_reduce):
with distribution.scope():
v = variables.Variable(1.)
if batch_reduce:
result = cross_device_ops_instance.batch_reduce(reduce_util.ReduceOp.MEAN,
[(v, v)])[0]
else:
result = cross_device_ops_instance.reduce(reduce_util.ReduceOp.MEAN, v, v)
for v in result.values:
self.assertIsInstance(v, ops.Tensor)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(result.values), [1.0, 1.0])
class MultiWorkerCrossDeviceOpsTest(multi_worker_test_base.MultiWorkerTestBase,
CrossDeviceOpsTestBase):
worker_devices = [
"/job:worker/replica:0/task:0", "/job:worker/replica:0/task:1"
]
multi_worker_allreduce_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"MultiWorkerAllReduce",
cross_device_ops_lib.MultiWorkerAllReduce(worker_devices, 2,
("pscpu/pscpu", 2, -1),
0)),
combinations.NamedObject(
"MultiWorkerAllReducePack",
cross_device_ops_lib.MultiWorkerAllReduce(worker_devices, 2,
("pscpu/pscpu", 2, -1),
1)),
combinations.NamedObject(
"MultiWorkerAllReduceMultipleSpecs",
cross_device_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, [("pscpu/pscpu", 2, 100),
("xring", 2, -1)], 0)),
],
devices=[
[
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:1/device:CPU:0"
],
[
"/job:worker/replica:0/task:0/device:GPU:0",
"/job:worker/replica:0/task:1/device:GPU:0"
],
[
"/job:worker/replica:0/task:0/device:GPU:0",
"/job:worker/replica:0/task:0/device:GPU:1",
"/job:worker/replica:0/task:1/device:GPU:0",
"/job:worker/replica:0/task:1/device:GPU:1"
],
],
mode=["graph"])
@combinations.generate(multi_worker_allreduce_combinations)
def testReductionAndBroadcast(self, cross_device_ops, devices):
# Mimic the default device of multi-worker strategies.
with ops.device("/job:worker/replica:0/task:0"):
self._testReductionAndBroadcast(cross_device_ops, devices)
NUM_WORKERS = 3
CollectiveCommunication = cross_device_ops_lib.CollectiveCommunication
class CollectiveAllReduceTest(multi_worker_test_base.MultiWorkerTestBase,
CrossDeviceOpsTestBase):
collective_key_base = 100000
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=NUM_WORKERS, num_ps=0)
def setUp(self):
super(CollectiveAllReduceTest, self).setUp()
# Reusing keys is not supported well. So we have to give a different
# collective key base for different tests.
CollectiveAllReduceTest.collective_key_base += 100000
def _get_test_objects(self,
task_type,
task_id,
num_gpus=0,
communication=CollectiveCommunication.AUTO,
use_strategy_object=False,
local_mode=False):
collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=10 + CollectiveAllReduceTest.collective_key_base,
op_instance_key_start=100 + CollectiveAllReduceTest.collective_key_base,
variable_instance_key_start=10000 +
CollectiveAllReduceTest.collective_key_base)
if local_mode:
if num_gpus:
devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
else:
devices = ["/device:CPU:0"]
if use_strategy_object:
strategy = (
collective_all_reduce_strategy.CollectiveAllReduceStrategy
._from_local_devices(devices, communication=communication)) # pylint: disable=protected-access
strategy.extended._collective_keys = collective_keys
strategy.extended._cross_device_ops._collective_keys = collective_keys
strategy.extended._host_cross_device_ops._collective_keys = (
collective_keys)
return strategy, devices, ""
else:
collective_all_reduce_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=devices,
group_size=len(devices),
collective_keys=collective_keys,
communication=communication)
return collective_all_reduce_ops, devices, ""
else:
# NCCL requires physical GPUs for every replica, which we can't do with
# simulated multi host set up now.
assert communication != CollectiveCommunication.NCCL
if num_gpus:
devices = [
"/job:%s/task:%d/replica:0/device:GPU:%d" % (task_type, task_id, i)
for i in range(num_gpus)
]
else:
devices = [
"/job:%s/task:%d/replica:0/device:CPU:0" % (task_type, task_id)
]
if use_strategy_object:
resolver = cluster_resolver.SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(
self._cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": num_gpus})
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
cluster_resolver=resolver, communication=communication)
strategy.extended._collective_keys = collective_keys
strategy.extended._cross_device_ops._collective_keys = collective_keys
return (strategy, devices,
"grpc://" + self._cluster_spec[task_type][task_id])
else:
collective_all_reduce_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=devices,
group_size=len(devices) * NUM_WORKERS,
collective_keys=collective_keys,
communication=communication)
return (collective_all_reduce_ops, devices,
"grpc://" + self._cluster_spec[task_type][task_id])
def _assert_mirrored_equal(self, left_list, right_list, sess=None):
if context.executing_eagerly():
run_options = None
else:
# TODO(b/151025792): figure out why missing run options would make the
# test flaky and whether this is a problem in TF 2.
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 5
super(CollectiveAllReduceTest, self)._assert_mirrored_equal(
left_list, right_list, sess, run_options=run_options)
def _test_reduction(self,
task_type,
task_id,
num_gpus,
communication,
use_strategy_object=False,
local_mode=False,
hints=None):
collective_all_reduce, devices, master_target = self._get_test_objects(
task_type,
task_id,
num_gpus,
communication=communication,
use_strategy_object=use_strategy_object,
local_mode=local_mode)
if local_mode:
num_workers = 1
worker_device = None
else:
num_workers = len(self._cluster_spec.get("chief", [])) + len(
self._cluster_spec.get("worker", []))
worker_device = "/job:%s/task:%d" % (task_type, task_id)
def _reduce(test_object, reduce_op, per_replica, destinations):
if use_strategy_object:
with test_object.scope():
return test_object.extended.reduce_to(reduce_op, per_replica,
destinations, hints)
else:
return test_object.reduce(reduce_op, per_replica, destinations, hints)
def _batch_reduce(test_object, reduce_op, value_destination_pairs):
if use_strategy_object:
with test_object.scope():
return test_object.extended.batch_reduce_to(reduce_op,
value_destination_pairs,
hints)
else:
return test_object.batch_reduce(reduce_op, value_destination_pairs,
hints)
with ops.Graph().as_default(), \
ops.device(worker_device), \
self.cached_session(target=master_target) as sess:
# Collective ops doesn't support scalar tensors, so we have to construct
# 1-d tensors.
values = [constant_op.constant([float(d)]) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = np.array([(len(devices) - 1.) / 2.])
values_2 = [constant_op.constant([d + 1.0]) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = np.array([mean[0] + 1.])
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
all_destinations = [
destination_different, destination_mirrored, destination_str
]
# test reduce()
for destinations in all_destinations:
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations), _fake_mirrored(mean, destinations),
sess)
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices) * num_workers, destinations),
sess)
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices) * num_workers, destinations),
sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_mirrored_equal(
_batch_reduce(collective_all_reduce, reduce_util.ReduceOp.MEAN,
[(per_replica, d1), (per_replica_2, d2)]),
[_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)], sess)
self._assert_mirrored_equal(
_batch_reduce(collective_all_reduce, reduce_util.ReduceOp.SUM,
[(per_replica, d1), (per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices) * num_workers, d1),
_fake_mirrored(mean_2 * len(devices) * num_workers, d2)
], sess)
def _get_indexed_slices(self,
devices,
start_i,
variable_length,
as_per_replica=True):
dense_shape = [10, 2]
values = ([[1., 2.]], [[3., 4.]], [[2., 1.]], [[0., 0.]], [[3., 1.]],
[[2., 1.]])
indices = ([1], [2], [3], [4], [5], [6])
# values and indices that have variable lengths.
vl_values = ([[1., 2.], [3., 4.]], [[3., 4.]], [[2., 1.]], [[0., 0.]],
[[3., 1.], [2., 1.]], [[2., 1.]])
vl_indices = ([1, 2], [2], [3], [4], [5, 6], [6])
indexed_slices = []
for i, d in enumerate(devices):
idx = i + start_i
indexed_slices.append(
_make_indexed_slices(
vl_values[idx] if variable_length else values[idx],
vl_indices[idx] if variable_length else indices[idx], dense_shape,
d))
if as_per_replica:
per_replica = value_lib.PerReplica(indexed_slices)
return per_replica
else:
return indexed_slices
def _test_reduce_indexed_slices(self,
task_type,
task_id,
num_gpus,
communication,
batch_reduce,
variable_length,
local_mode=False):
collective_all_reduce, devices, master_target = self._get_test_objects(
task_type,
task_id,
num_gpus,
communication=communication,
local_mode=local_mode)
if local_mode:
num_workers = 1
worker_device = None
else:
num_workers = len(self._cluster_spec.get("chief", [])) + len(
self._cluster_spec.get("worker", []))
worker_device = "/job:%s/task:%d" % (task_type, task_id)
with ops.Graph().as_default(), \
ops.device(worker_device), \
self.cached_session(target=master_target) as sess:
per_replica = self._get_indexed_slices(devices,
(task_id or 0) * max(num_gpus, 1),
variable_length)
if batch_reduce:
result = collective_all_reduce.batch_reduce(
reduce_util.ReduceOp.SUM, [(per_replica, per_replica)])[0]
else:
result = collective_all_reduce.reduce(reduce_util.ReduceOp.SUM,
per_replica, per_replica)
if num_gpus > 1:
self.assertIsInstance(result, value_lib.Mirrored)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 7
if num_gpus > 1:
result = sess.run([ops.convert_to_tensor(v) for v in result.values],
options=run_options)[0]
else:
result = sess.run(ops.convert_to_tensor(result), options=run_options)
# Reduce the same indexed slices on CPU locally as our expected results.
devices_cpu = [(worker_device or "") + "/device:CPU:0"] * (
max(num_gpus, 1) * num_workers)
per_replica_on_cpu = self._get_indexed_slices(
devices_cpu, 0, variable_length, as_per_replica=False)
expected_result = cross_device_utils.aggregate_tensors_or_indexed_slices(
per_replica_on_cpu)
expected_result = sess.run(ops.convert_to_tensor(expected_result))
self.assertAllEqual(expected_result, result)
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=[0, 1, 2],
use_strategy_object=[True, False],
bytes_per_pack=[0, 1, 4]))
def testReductionDistributed(self, required_gpus, use_strategy_object,
bytes_per_pack):
hints = collective_util.Hints(bytes_per_pack=bytes_per_pack)
self._run_between_graph_clients(
self._test_reduction,
self._cluster_spec,
required_gpus,
communication=CollectiveCommunication.RING,
use_strategy_object=use_strategy_object,
hints=hints)
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=[0, 1, 2],
variable_length=[True, False]))
def testReduceIndexedSlicesDistributed(self, required_gpus, variable_length):
self._run_between_graph_clients(
self._test_reduce_indexed_slices,
self._cluster_spec,
required_gpus,
communication=CollectiveCommunication.RING,
batch_reduce=True,
variable_length=variable_length)
# Collective ops doesn't support strategy with one device.
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=2,
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
],
use_strategy_object=[True, False]))
def testReductionLocal(self, required_gpus, communication,
use_strategy_object):
self._test_reduction(
None,
None,
required_gpus,
communication=communication,
use_strategy_object=use_strategy_object,
local_mode=True)
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=2,
batch_reduce=[True, False],
variable_length=[True, False],
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
]))
def testReduceIndexedSlicesLocal(self, required_gpus, batch_reduce,
variable_length, communication):
self._test_reduce_indexed_slices(
None,
None,
required_gpus,
communication=communication,
batch_reduce=batch_reduce,
variable_length=variable_length,
local_mode=True)
@combinations.generate(
combinations.combine(
required_gpus=2,
mode="eager",
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
]))
def testEagerMultiThread(self, communication):
collective, devices, _ = self._get_test_objects(
None,
None,
num_gpus=2,
communication=communication,
use_strategy_object=False,
local_mode=True)
# We would like to simulate the following sequence:
# thread-0 device0 device1
# thread-1 device0 device1
# If the kernel launch sequence is as-is the program will deadlock since
# NCCL requires the launch order to be same on each device.
v0 = _make_per_replica([1.0 for _ in devices], devices)
v1 = _make_per_replica([2.0 for _ in devices], devices)
# Add a delay to collective_ops.all_reduce according to the input tensors
# index in `sequence.`
sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]
all_reduce = collective_ops.all_reduce
def delayed_all_reduce(input_tensor, *args, **kwargs):
for idx, v in enumerate(sequence):
if input_tensor is v:
time.sleep(idx)
break
return all_reduce(input_tensor, *args, **kwargs)
with test.mock.patch.object(collective_ops, "all_reduce",
delayed_all_reduce):
# We only use NCCL for batch reduce with two or more values, so we use two
# values here.
def thread_fn():
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v0, v0),
(v0, v0)])
self.assertAllEqual(reduced[0].values, [2.0, 2.0])
self.assertAllEqual(reduced[1].values, [2.0, 2.0])
t = threading.Thread(target=thread_fn)
t.start()
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),
(v1, v1)])
self.assertAllEqual(reduced[0].values, [4.0, 4.0])
self.assertAllEqual(reduced[1].values, [4.0, 4.0])
t.join()
@combinations.generate(
combinations.combine(
required_gpus=2,
mode="eager",
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
]))
def testInputsAreFunctionArgs(self, communication):
# Function inputs don't have device placement.
hints = collective_util.Hints(bytes_per_pack=1)
collective, devices, _ = self._get_test_objects(
None,
None,
num_gpus=2,
communication=communication,
use_strategy_object=False,
local_mode=True)
devices = [device_util.canonicalize(d) for d in devices]
@def_function.function
def reduce_fn(v):
self.assertEqual(v.values[0].device, "")
self.assertEqual(v.values[1].device, "")
# We only use NCCL for batch reduce with two or more values, so we use two
# values here.
reduced = collective.batch_reduce(
reduce_util.ReduceOp.SUM, [(v, v), (v, v)], experimental_hints=hints)
self.assertEqual(reduced[0].values[0].device, devices[0])
self.assertEqual(reduced[0].values[1].device, devices[1])
self.assertEqual(reduced[1].values[0].device, devices[0])
self.assertEqual(reduced[1].values[1].device, devices[1])
# Returning Mirrored only evaluates the primary value, which causes
# hanging,
return [reduced[0].values, reduced[1].values]
v = _make_per_replica([1.0, 2.0], devices)
reduced = reduce_fn(v)
self.assertAllEqual(self.evaluate(reduced), [[3.0, 3.0], [3.0, 3.0]])
@combinations.generate(
combinations.combine(
required_gpus=[0, 1],
mode="eager",
communication=[CollectiveCommunication.RING]))
def testTimeoutReduceDense(self, communication, required_gpus):
hints = collective_util.Hints(timeout_seconds=1)
collective, devices, _ = self._get_test_objects(
"worker",
0,
num_gpus=required_gpus,
communication=communication,
use_strategy_object=False)
remote.connect_to_cluster(
multi_worker_util.normalize_cluster_spec(self._cluster_spec),
protocol="grpc")
devices = [device_util.canonicalize(d) for d in devices]
v = _make_per_replica([1.0], devices)
@def_function.function
def reduce_dense():
collective.reduce(reduce_util.ReduceOp.SUM, v, v, hints)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_dense()
# Reset since collective failures poison the context.
context._reset_context() # pylint: disable=protected-access
@combinations.generate(
combinations.combine(
required_gpus=[0, 1],
mode="eager",
communication=[CollectiveCommunication.RING]))
def testTimeoutBatchReduceDense(self, communication, required_gpus):
hints = collective_util.Hints(timeout_seconds=1)
collective, devices, _ = self._get_test_objects(
"worker",
0,
num_gpus=required_gpus,
communication=communication,
use_strategy_object=False)
remote.connect_to_cluster(
multi_worker_util.normalize_cluster_spec(self._cluster_spec),
protocol="grpc")
devices = [device_util.canonicalize(d) for d in devices]
v = _make_per_replica([1.0], devices)
@def_function.function
def batch_reduce_dense():
collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v), (v, v)], hints)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_dense()
# Reset since collective failures poison the context.
context._reset_context() # pylint: disable=protected-access
@combinations.generate(
combinations.combine(
required_gpus=[0, 1],
mode="eager",
communication=[CollectiveCommunication.RING]))
def testTimeoutReduceSparse(self, communication, required_gpus):
hints = collective_util.Hints(timeout_seconds=1)
collective, devices, _ = self._get_test_objects(
"worker",
0,
num_gpus=required_gpus,
communication=communication,
use_strategy_object=False)
remote.connect_to_cluster(
multi_worker_util.normalize_cluster_spec(self._cluster_spec),
protocol="grpc")
devices = [device_util.canonicalize(d) for d in devices]
v = value_lib.PerReplica([
_make_indexed_slices([[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
])
@def_function.function
def reduce_sparse():
collective.reduce(reduce_util.ReduceOp.SUM, v, v, hints)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_sparse()
# Reset since collective failures poison the context.
context._reset_context() # pylint: disable=protected-access
@combinations.generate(
combinations.combine(
required_gpus=[0, 1],
mode="eager",
communication=[CollectiveCommunication.RING]))
def testTimeoutBatchReduceSparse(self, communication, required_gpus):
hints = collective_util.Hints(timeout_seconds=1)
collective, devices, _ = self._get_test_objects(
"worker",
0,
num_gpus=required_gpus,
communication=communication,
use_strategy_object=False)
remote.connect_to_cluster(
multi_worker_util.normalize_cluster_spec(self._cluster_spec),
protocol="grpc")
devices = [device_util.canonicalize(d) for d in devices]
v = value_lib.PerReplica([
_make_indexed_slices([[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
])
@def_function.function
def batch_reduce_sparse():
collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v), (v, v)], hints)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_sparse()
# Reset since collective failures poison the context.
context._reset_context() # pylint: disable=protected-access
if __name__ == "__main__":
# Set default inter op thread pool size to one to ensure we don't exhaust the
# thread pool with the additional executors to run collectives in eager.
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
combinations.main()
|
ncbidownload.py
|
#!/usr/bin/env python 3
import pandas
from Bio import Entrez
from accessoryfunctions.accessoryFunctions import *
__author__ = 'adamkoziol'
Entrez.email = 'adam.koziol@inspection.gc.ca'
class Download(object):
def excelparse(self):
"""
Parses input excel file, and creates objects with headers as keys, and cell data as values for each row
"""
printtime('Loading accessions from file', self.start)
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the excel file, and subsequently convert the pandas data frame to a dictionary
# (.to_dict()). Only read the first fourteen columns (parse_cols=range(14)), as later columns are not
# relevant to this script
dictionary = pandas.read_excel(self.file).to_dict()
# Iterate through the dictionary - each header from the excel file
for header in dictionary:
# Sample is the primary key, and value is the value of the cell for that primary key + header combination
for sample, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[sample].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[sample] = dict()
nesteddictionary[sample].update({header: value})
# Create objects for each of the samples, rather than using a nested dictionary. It may have been possible to
# skip the creation of the nested dictionary, and create the objects from the original dictionary, but there
# seemed to be too many possible places for something to go wrong
for line in nesteddictionary:
# Create an object for each sample
metadata = MetadataObject()
# Set the name of the metadata to be the primary key for the sample from the excel file
metadata.name = line
# Find the headers and values for every sample
for header, value in nesteddictionary[line].items():
# Create each attribute - use the header (in lowercase, and spaces removed) as the attribute name,
# and the value as the attribute value
setattr(metadata, header.replace(' ', '').lower(), value)
# Append the object to the list of objects
self.metadata.append(metadata)
self.download()
def download(self):
"""
Download Genbank files corresponding to the supplied accession number from NCBI
"""
printtime('Downloading and formatting Genbank files from NCBI', self.start)
from threading import Thread
# Create and start threads
for _ in self.metadata:
# Send the threads to the appropriate destination function
threads = Thread(target=self.downloadthreads, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata:
# Create the file name
sample.genbankfile = os.path.join(self.genbankpath, sample.accession + '.gbk')
# Add the sample to the queue
self.queue.put(sample)
# Join the threads
self.queue.join()
# Parse the Genbank files
self.fastaparse()
def downloadthreads(self):
from time import sleep
while True:
sample = self.queue.get()
# Attempt to download up to ten times
for i in range(10):
try:
_ = os.stat(sample.genbankfile).st_size
zero = False
except FileNotFoundError:
zero = True
if zero or not os.path.isfile(sample.genbankfile):
# https://stackoverflow.com/a/2083996
while True:
try:
# Use the efetch utility to download the Genbank record in text format
# from the nucleotide database
handle = Entrez.efetch(db="assembly",
id=sample.accession,
rettype="gb",
retmode="text")
# Write the record to file to keep from having to re-download the file if the
# script needs to be run multiple times
with open(sample.genbankfile, 'w') as genbankfile:
genbankfile.write(handle.read())
# Sleep in order to keep from overloading NCBI servers with too many requests
sleep(0.5)
except Exception:
continue
break
self.queue.task_done()
def fastaparse(self):
"""
Parse the Genbank files to extract the desired FASTA sequences in the correct orientation
"""
from Bio import SeqIO
printtime('Parsing Genbank files', self.start)
for sample in self.metadata:
# Create the name of the FASTA-formatted output file
sample.outputfile = os.path.join(self.sequencepath, '{}_{}.fasta'.format(sample.gene, sample.accession))
# Read in the Genbank record from disk
record = SeqIO.read(sample.genbankfile, 'genbank')
# Set the header to be simply 'gene_accession'
record.id = '{}_{}'.format(sample.gene, sample.accession)
record.name = ''
record.description = ''
# Extract the sequence desired from the whole sequence (as the start position provided is 1-based,
# subtract one in order to make it correspond to the 0-based Python index
record.seq = record.seq[sample.start - 1:sample.stop]
# If the reverse complement is required, change the sequence accordingly
if sample.reverse:
record.seq = record.seq.reverse_complement()
# Write the record to file if the file doesn't already exist
if not os.path.isfile(sample.outputfile):
with open(sample.outputfile, 'w') as out:
SeqIO.write(record, out, 'fasta')
def __init__(self, args):
from queue import Queue
self.path = args.path
self.sequencepath = os.path.join(args.sequencepath, '')
make_path(self.sequencepath)
self.genbankpath = os.path.join(self.path, 'genbank')
make_path(self.genbankpath)
self.file = os.path.join(self.path, args.file)
self.start = args.start
self.metadata = list()
self.queue = Queue(maxsize=5)
self.excelparse()
if __name__ == '__main__':
# Argument parser for user-inputted values, and a nifty help menu
from argparse import ArgumentParser
import time
# Parser for arguments
parser = ArgumentParser(description='Download sequences from Genbank')
parser.add_argument('path',
help='Specify input directory')
parser.add_argument('-s', '--sequencepath',
required=True,
help='Path to store downloaded sequence files')
parser.add_argument('-f', '--file',
help='Name of file with: "Class", "Gene", "Accession", "Start", "Stop", "Reverse" as'
'the headers')
# Get the arguments into an object
arguments = parser.parse_args()
# Define the start time
arguments.start = time.time()
# Run it
Download(arguments)
# Print a bold, green exit statement
print('\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time.time() - arguments.start) + '\033[0m')
|
host_state.py
|
"""
Global shared state about the host.
"""
import threading
import utils
import time
import sys
CLIENT_VERSION = '1.0.3'
class HostState(object):
def __init__(self):
self.host_ip = None
self.host_mac = None
self.gateway_ip = None
self.packet_processor = None
self.user_key = None
self.secret_salt = None
self.client_version = CLIENT_VERSION
self.persistent_mode = True # Always persistent to remove local Flask
self.raspberry_pi_mode = False # If true, app does not auto-quit upon UI inactivity
# The following objects might be modified concurrently.
self.lock = threading.Lock()
self.ip_mac_dict = {} # IP -> MAC
self.pending_dhcp_dict = {} # device_id -> hostname
self.pending_resolver_dict = {} # device_id -> resolver_ip
self.pending_dns_dict = {} # (device_id, domain) -> ip_set
self.pending_flow_dict = {} # flow_key -> flow_stats
self.pending_ua_dict = {} # device_id -> ua_set
self.pending_tls_dict_list = [] # List of tls_dict
self.pending_netdisco_dict = {} # device_id -> device_info_list
self.pending_syn_scan_dict = {} # device_id -> port_list
self.status_text = None
self.device_whitelist = []
self.has_consent = False
self.byte_count = 0
self.is_inspecting_traffic = True
self.fast_arp_scan = True # Persists for first 5 mins
self.last_ui_contact_ts = time.time() # ts of /is_inspecting_traffic
self.quit = False
self.spoof_arp = True
# Constantly checks for IP changes on this host
thread = threading.Thread(target=self.update_ip_thread)
thread.daemon = True
thread.start()
def set_ip_mac_mapping(self, ip, mac):
with self.lock:
self.ip_mac_dict[ip] = mac
def get_ip_mac_dict_copy(self):
with self.lock:
return dict(self.ip_mac_dict)
def is_inspecting(self):
with self.lock:
return self.is_inspecting_traffic
def update_ip_thread(self):
prev_gateway_ip = None
prev_host_ip = None
while True:
try:
self.gateway_ip, _, self.host_ip = utils.get_default_route()
except Exception:
pass
# Upon network changes, clear ARP cache.
if self.gateway_ip != prev_gateway_ip or \
self.host_ip != prev_host_ip:
with self.lock:
self.ip_mac_dict = {}
prev_gateway_ip = self.gateway_ip
prev_host_ip = self.host_ip
time.sleep(15)
|
condition.py
|
import threading
import time
class AsyncTaskManager:
def __init__(self, target, args=(), kwargs={}):
self.target = target
self.args = args
self.kwargs = kwargs
self.condition = threading.Condition()
self.result = None
self.thread = threading.Thread(target=self.worker)
self.thread.start()
self.stopped = False
def worker(self):
while True:
self.condition.acquire()
while self.result is not None:
if self.stopped:
self.condition.release()
return
self.condition.notify()
self.condition.wait()
self.condition.notify()
self.condition.release()
result = (self.target(*self.args, **self.kwargs),)
self.condition.acquire()
self.result = result
self.condition.notify()
self.condition.release()
def get_next(self):
self.condition.acquire()
while self.result is None:
self.condition.notify()
self.condition.wait()
result = self.result[0]
self.result = None
self.condition.notify()
self.condition.release()
return result
def stop(self):
while self.thread.is_alive():
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
def task():
print 'begin sleeping...'
time.sleep(1)
print 'end sleeping.'
task.i += 1
print 'returns', task.i
return task.i
task.i = 0
if __name__ == '__main__':
async = AsyncTaskManager(task)
t = time.time()
for i in range(5):
ret = async.get_next()
# ret = task()
print 'got', ret
time.sleep(1)
async.stop()
print time.time() - t
|
helpers.py
|
"""
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
"""
import base64
import errno
import fnmatch
import functools
import inspect
import logging
import os
import random
import shutil
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import types
from contextlib import contextmanager
import pytest
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.files
import salt.utils.platform
import salt.utils.pycrypto
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import builtins
from saltfactories.exceptions import ProcessFailed
from saltfactories.utils.ports import get_unused_localhost_port
from saltfactories.utils.processes.bases import ProcessResult
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import SkipTest, _id, skip
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
PRE_PYTEST_SKIP_OR_NOT = "PRE_PYTEST_DONT_SKIP" not in os.environ
PRE_PYTEST_SKIP_REASON = (
"PRE PYTEST - This test was skipped before running under pytest"
)
PRE_PYTEST_SKIP = pytest.mark.skipif(
PRE_PYTEST_SKIP_OR_NOT, reason=PRE_PYTEST_SKIP_REASON
)
def no_symlinks():
"""
Check if git is installed and has symlinks enabled in the configuration.
"""
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ""
try:
output = subprocess.Popen(
["git", "config", "--get", "core.symlinks"],
cwd=RUNTIME_VARS.TMP,
stdout=subprocess.PIPE,
).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == "true":
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
"""
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__destructive_test__", True)
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
reason = "Destructive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def expensiveTest(caller):
"""
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__expensive_test__", True)
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
reason = "Expensive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def slowTest(caller):
"""
Mark a test case as a slow test.
.. code-block:: python
class MyTestCase(TestCase):
@slowTest
def test_that_takes_much_time(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__slow_test__", True)
if os.environ.get("SLOW_TESTS", "False").lower() == "false":
reason = "Slow tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def flaky(caller=None, condition=True, attempts=4):
"""
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
flaky(caller=function, condition=condition, attempts=attempts),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
if attempt > 0:
# Run through setUp again
# We only run it after the first iteration(>0) because the regular
# test runner will have already ran setUp the first time
setup = getattr(cls, "setUp", None)
if callable(setup):
setup()
return caller(cls)
except SkipTest as exc:
cls.skipTest(exc.args[0])
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
if isinstance(exc, SkipTest):
six.reraise(*exc_info)
if not isinstance(exc, AssertionError) and log.isEnabledFor(
logging.DEBUG
):
log.exception(exc, exc_info=exc_info)
if attempt >= attempts - 1:
# We won't try to run tearDown once the attempts are exhausted
# because the regular test runner will do that for us
six.reraise(*exc_info)
# Run through tearDown again
teardown = getattr(cls, "tearDown", None)
if callable(teardown):
teardown()
backoff_time = attempt ** 2
log.info("Found Exception. Waiting %s seconds to retry.", backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
"""
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
"""
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if os.environ.get("SSH_DAEMON_RUNNING", "False").lower() == "false":
self.skipTest("SSH tests are disabled")
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get("SSH_DAEMON_RUNNING", "False").lower() == "false":
cls.skipTest("SSH tests are disabled")
return caller(cls)
return wrap
class RedirectStdStreams:
"""
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
"""
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
# pylint: disable=resource-leakage
stdout = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
if stderr is None:
# pylint: disable=resource-leakage
stderr = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception: # pylint: disable=broad-except
pass
try:
self.__stderr.flush()
except Exception: # pylint: disable=broad-except
pass
class TstSuiteLoggingHandler:
"""
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TstSuiteLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
"""
def __init__(self, level=0, format="%(levelname)s:%(message)s"):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
class ForceImportErrorOn:
"""
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
"""
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, "__import__", self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(
self, name, globals_=None, locals_=None, fromlist=None, level=None
):
if six.PY2:
if globals_ is None:
globals_ = {}
if locals_ is None:
locals_ = {}
if level is None:
level = 0
if fromlist is None:
fromlist = []
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError("Forced ImportError raised for {!r}".format(name))
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
"Forced ImportError raised for {!r}".format(
"from {} import {}".format(name, ", ".join(fromlist))
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps:
"""
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
"""
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
"""
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(cls, *args, **kwargs):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest("No local network was detected")
return func(cls)
if os.environ.get("NO_INTERNET"):
cls.skipTest("Environment variable NO_INTERNET is set.")
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in (
"173.194.41.198",
"173.194.41.199",
"173.194.41.200",
"173.194.41.201",
"173.194.41.206",
"173.194.41.192",
"173.194.41.193",
"173.194.41.194",
"173.194.41.195",
"173.194.41.196",
"173.194.41.197",
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except OSError:
# Let's check the next IP
continue
else:
cls.skipTest("No internet network connection was detected")
finally:
sock.close()
return func(cls, *args, **kwargs)
return wrapper
return decorator
def with_system_user(
username, on_existing="delete", delete=True, password=None, groups=None
):
"""
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {!r}".format(username))
kwargs = {"timeout": 60, "groups": groups}
if salt.utils.platform.is_windows():
kwargs.update({"password": password})
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not salt.utils.platform.is_windows() and password is not None:
if salt.utils.platform.is_darwin():
hashed_password = password
else:
hashed_password = salt.utils.pycrypto.gen_hash(password=password)
hashed_password = "'{}'".format(hashed_password)
add_pwd = cls.run_function(
"shadow.set_password", [username, hashed_password]
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {!r}".format(username)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_group(group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username])
log.debug("Creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username])
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
delete_group = cls.run_function("group.delete", [group])
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {!r}".format(username)
)
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
class WithTempfile:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
if "prefix" not in kwargs:
kwargs["prefix"] = "__salt.test."
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
"""
Function decorator which loads and passes the system's grains to the test
case.
"""
@functools.wraps(func)
def decorator(*args, **kwargs):
if not hasattr(requires_system_grains, "__grains__"):
# Late import
from tests.support.sminion import build_minion_opts
opts = build_minion_opts(minion_id="runtests-internal-sminion")
requires_system_grains.__grains__ = salt.loader.grains(opts)
kwargs["grains"] = requires_system_grains.__grains__
return func(*args, **kwargs)
return decorator
@requires_system_grains
def runs_on(grains=None, **kwargs):
"""
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(str(grains.get(kw)).lower() != str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() != str(value).lower():
if reason is None:
reason = "This test runs on {}={}, not {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
"""
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if any(str(grains.get(kw)).lower() == str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() == str(value).lower():
if reason is None:
reason = "This test does not run on {}={}, got {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
def _check_required_sminion_attributes(sminion_attr, *required_items):
"""
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
"""
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id="runtests-internal-sminion")
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = "__not_available_{items}s__".format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if "." not in search_name:
search_name += ".*"
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
"""
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
"""
not_available = _check_required_sminion_attributes("states", *names)
if not_available:
return skip("Unavailable salt states: {}".format(*not_available))
return _id
def requires_salt_modules(*names):
"""
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
"""
not_available = _check_required_sminion_attributes("functions", *names)
if not_available:
return skip("Unavailable salt modules: {}".format(*not_available))
return _id
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop("check_all", False)
message = kwargs.pop("message", None)
if kwargs:
raise RuntimeError(
"The only supported keyword argument is 'check_all' and "
"'message'. Invalid keyword arguments: {}".format(", ".join(kwargs.keys()))
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
"{}The {!r} binary was not found".format(
message and "{}. ".format(message) or "", binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
"{}None of the following binaries was found: {}".format(
message and "{}. ".format(message) or "", ", ".join(binaries)
)
)
return _id
def skip_if_not_root(func):
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(func, "__skip_if_not_root__", True)
if not sys.platform.startswith("win"):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as root to run this test"
)
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != "SYSTEM":
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as an Administrator to run this test"
)
return func
def repeat(caller=None, condition=True, times=5):
"""
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
repeat(caller=function, condition=condition, times=times),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times + 1):
log.info("%s test run %d of %s times", cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
"""
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(salt.ext.tornado.web.RequestHandler):
pass
"""
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header("WWW-Authenticate", "Basic realm=Restricted")
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = base64.b64decode(auth[6:]).split(":", 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
"""
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please replace your call 'generate_random_name({0})' with 'random_string({0}, lowercase=False)' as "
"'generate_random_name' will be removed after {{date}}".format(prefix),
)
return random_string(prefix, size=size, lowercase=False)
def random_string(prefix, size=6, uppercase=True, lowercase=True, digits=True):
"""
Generates a random string.
..versionadded: 3001
Args:
prefix(str): The prefix for the random string
size(int): The size of the random string
uppercase(bool): If true, include uppercased ascii chars in choice sample
lowercase(bool): If true, include lowercased ascii chars in choice sample
digits(bool): If true, include digits in choice sample
Returns:
str: The random string
"""
if not any([uppercase, lowercase, digits]):
raise RuntimeError(
"At least one of 'uppercase', 'lowercase' or 'digits' needs to be true"
)
choices = []
if uppercase:
choices.extend(string.ascii_uppercase)
if lowercase:
choices.extend(string.ascii_lowercase)
if digits:
choices.extend(string.digits)
return prefix + "".join(random.choice(choices) for _ in range(size))
class Webserver:
"""
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
"""
def __init__(self, root=None, port=None, wait=5, handler=None):
"""
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
"""
if port is not None and not isinstance(port, int):
raise ValueError("port must be an integer")
if root is None:
root = RUNTIME_VARS.BASE_FILES
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError("root must be a string")
self.port = port
self.wait = wait
self.handler = (
handler if handler is not None else salt.ext.tornado.web.StaticFileHandler
)
self.web_root = None
def target(self):
"""
Threading target which stands up the tornado application
"""
self.ioloop = salt.ext.tornado.ioloop.IOLoop()
self.ioloop.make_current()
if self.handler == salt.ext.tornado.web.StaticFileHandler:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler, {"path": self.root})]
)
else:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler)]
)
self.application.listen(self.port)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("127.0.0.1", self.port)) == 0
def url(self, path):
"""
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
"""
if self.web_root is None:
raise RuntimeError("Webserver instance has not been started")
err_msg = (
"invalid path, must be either a relative path or a path "
"within {}".format(self.root)
)
try:
relpath = (
path if not os.path.isabs(path) else os.path.relpath(path, self.root)
)
if relpath.startswith(".." + os.sep):
raise ValueError(err_msg)
return "/".join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
"""
Starts the webserver
"""
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = "http://127.0.0.1:{}".format(self.port)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
"Failed to start tornado webserver on 127.0.0.1:{} within "
"{} seconds".format(self.port, self.wait)
)
def stop(self):
"""
Stops the webserver
"""
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
class SaveRequestsPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Save all requests sent to the server.
"""
received_requests = []
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
self.received_requests.append(self.request)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
class MirrorPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Mirror a POST body back to the client
"""
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
body = self.request.body
log.debug("Incoming body: %s Incoming args: %s", body, args)
self.write(body)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
def dedent(text, linesep=os.linesep):
"""
A wrapper around textwrap.dedent that also sets line endings.
"""
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith("\n"):
clean_text += linesep
if not isinstance(text, str):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
class PatchedEnviron:
def __init__(self, **kwargs):
self.cleanup_keys = kwargs.pop("__cleanup__", ())
self.kwargs = kwargs
self.original_environ = None
def __enter__(self):
self.original_environ = os.environ.copy()
for key in self.cleanup_keys:
os.environ.pop(key, None)
# Make sure there are no unicode characters in the self.kwargs if we're
# on Python 2. These are being added to `os.environ` and causing
# problems
if sys.version_info < (3,):
kwargs = self.kwargs.copy()
clean_kwargs = {}
for k in self.kwargs:
key = k
if isinstance(key, str):
key = key.encode("utf-8")
if isinstance(self.kwargs[k], str):
kwargs[k] = kwargs[k].encode("utf-8")
clean_kwargs[key] = kwargs[k]
self.kwargs = clean_kwargs
os.environ.update(**self.kwargs)
return self
def __exit__(self, *args):
os.environ.clear()
os.environ.update(self.original_environ)
patched_environ = PatchedEnviron
class VirtualEnv:
def __init__(self, venv_dir=None):
self.venv_dir = venv_dir or tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
if salt.utils.platform.is_windows():
self.venv_python = os.path.join(self.venv_dir, "Scripts", "python.exe")
else:
self.venv_python = os.path.join(self.venv_dir, "bin", "python")
self.venv_bin_dir = os.path.dirname(self.venv_python)
def __enter__(self):
try:
self._create_virtualenv()
except subprocess.CalledProcessError:
raise AssertionError("Failed to create virtualenv")
return self
def __exit__(self, *args):
salt.utils.files.rm_rf(self.venv_dir)
def install(self, *args, **kwargs):
return self.run(self.venv_python, "-m", "pip", "install", *args, **kwargs)
def run(self, *args, **kwargs):
check = kwargs.pop("check", True)
kwargs.setdefault("cwd", self.venv_dir)
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.PIPE)
kwargs.setdefault("universal_newlines", True)
proc = subprocess.run(args, check=False, **kwargs)
ret = ProcessResult(proc.returncode, proc.stdout, proc.stderr, proc.args)
log.debug(ret)
if check is True:
try:
proc.check_returncode()
except subprocess.CalledProcessError:
raise ProcessFailed(
"Command failed return code check",
cmdline=proc.args,
stdout=proc.stdout,
stderr=proc.stderr,
exitcode=proc.returncode,
)
return ret
def _get_real_python(self):
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, on windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
return os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
raise AssertionError(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
return python
except AttributeError:
return sys.executable
def _create_virtualenv(self):
sminion = create_sminion()
sminion.functions.virtualenv.create(
self.venv_dir, python=self._get_real_python()
)
@contextmanager
def change_cwd(path):
"""
Context manager helper to change CWD for a with code block and restore
it at the end
"""
old_cwd = os.getcwd()
try:
os.chdir(path)
# Do stuff
yield
finally:
# Restore Old CWD
os.chdir(old_cwd)
@functools.lru_cache(maxsize=1)
def get_virtualenv_binary_path():
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(real_prefix, "Scripts", "virtualenv.exe")
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
return virtualenv_binary
|
submitty_autograding_shipper.py
|
#!/usr/bin/env python3
import os
import time
import signal
import json
import shutil
import contextlib
import datetime
import multiprocessing
from pathlib import Path
from submitty_utils import dateutils
import operator
import paramiko
import tempfile
import socket
import traceback
import subprocess
from autograder import autograding_utils
from autograder import packer_unpacker
CONFIG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'config')
with open(os.path.join(CONFIG_PATH, 'submitty.json')) as open_file:
OPEN_JSON = json.load(open_file)
SUBMITTY_DATA_DIR = OPEN_JSON['submitty_data_dir']
SUBMITTY_INSTALL_DIR = OPEN_JSON['submitty_install_dir']
AUTOGRADING_LOG_PATH = OPEN_JSON['autograding_log_path']
AUTOGRADING_STACKTRACE_PATH = os.path.join(OPEN_JSON['site_log_path'], 'autograding_stack_traces')
with open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:
OPEN_JSON = json.load(open_file)
DAEMON_UID = OPEN_JSON['daemon_uid']
INTERACTIVE_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue")
JOB_ID = '~SHIP~'
# ==================================================================================
def initialize(untrusted_queue):
"""
Initializer function for all our processes. We get one untrusted user off our queue which
we then set in our Process. We cannot recycle the shipper process as else the untrusted user
we set for this process will be lost.
:param untrusted_queue: multiprocessing.queues.Queue that contains all untrusted users left to
assign
"""
multiprocessing.current_process().untrusted = untrusted_queue.get()
# ==================================================================================
def add_fields_to_autograding_worker_json(autograding_worker_json, entry):
submitty_config = os.path.join(SUBMITTY_INSTALL_DIR, 'config', 'version.json')
try:
with open(submitty_config) as infile:
submitty_details = json.load(infile)
installed_commit = submitty_details['installed_commit']
most_recent_tag = submitty_details['most_recent_git_tag']
except FileNotFoundError as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, trace=traceback.format_exc())
raise SystemExit("ERROR, could not locate the submitty.json:", e)
autograding_worker_json[entry]['server_name'] = socket.getfqdn()
autograding_worker_json[entry]['primary_commit'] = installed_commit
autograding_worker_json[entry]['most_recent_tag'] = most_recent_tag
return autograding_worker_json
# ==================================================================================
def update_all_foreign_autograding_workers():
success_map = dict()
all_workers_json = os.path.join(SUBMITTY_INSTALL_DIR, 'config', "autograding_workers.json")
try:
with open(all_workers_json, 'r') as infile:
autograding_workers = json.load(infile)
except FileNotFoundError as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, trace=traceback.format_exc())
raise SystemExit("ERROR, could not locate autograding_workers_json :", e)
for key, value in autograding_workers.items():
if value['enabled'] == False:
continue
formatted_entry = {key: value}
formatted_entry = add_fields_to_autograding_worker_json(formatted_entry, key)
success = update_worker_json(key, formatted_entry)
success_map[key] = success
return success_map
# ==================================================================================
# Updates the autograding_worker.json in a workers autograding_TODO folder (tells it)
# how many threads to be running on startup.
def update_worker_json(name, entry):
fd, tmp_json_path = tempfile.mkstemp()
foreign_json = os.path.join(SUBMITTY_DATA_DIR, "autograding_TODO", "autograding_worker.json")
autograding_worker_to_ship = entry
try:
user = autograding_worker_to_ship[name]['username']
host = autograding_worker_to_ship[name]['address']
except Exception as e:
print("ERROR: autograding_workers.json entry for {0} is malformatted. {1}".format(e, name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: autograding_workers.json entry for {0} is malformed. {1}".format(e, name))
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
return False
#create a new temporary json with only the entry for the current machine.
with open(tmp_json_path, 'w') as outfile:
json.dump(autograding_worker_to_ship, outfile, sort_keys=True, indent=4)
#if we are updating the current machine, we can just move the new json to the appropriate spot (no ssh needed)
if host == "localhost":
try:
shutil.move(tmp_json_path,foreign_json)
print("Successfully updated local autograding_TODO/autograding_worker.json")
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Successfully updated local autograding_TODO/autograding_worker.json")
return True
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not mv to local autograding_TODO/autograding_worker.json due to the following error: "+str(e))
print("ERROR: could not mv to local autograding_worker.json due to the following error: {0}".format(e))
return False
finally:
os.close(fd)
#if we are updating a foreign machine, we must connect via ssh and use sftp to update it.
else:
#try to establish an ssh connection to the host
try:
ssh = establish_ssh_connection(None, user, host, only_try_once = True)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not ssh to {0}@{1} due to following error: {2}".format(user, host,str(e)))
print("ERROR: could not ssh to {0}@{1} due to following error: {2}".format(user, host,str(e)))
return False
#try to copy the files over to the host
try:
sftp = ssh.open_sftp()
sftp.put(tmp_json_path,foreign_json)
sftp.close()
print("Successfully forwarded autograding_worker.json to {0}".format(name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Successfully forwarded autograding_worker.json to {0}".format(name))
success = True
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not sftp to foreign autograding_TODO/autograding_worker.json due to the following error: "+str(e))
print("ERROR: could sftp to foreign autograding_TODO/autograding_worker.json due to the following error: {0}".format(e))
success = False
finally:
os.close(fd)
os.remove(tmp_json_path)
sftp.close()
ssh.close()
return success
def establish_ssh_connection(my_name, user, host, only_try_once = False):
"""
Returns a connected paramiko ssh session.
Tries to connect until a connection is established, unless only_try_once
is set to true. If only_try_once is true, raise whatever connection error is thrown.
"""
connected = False
ssh = None
retry_delay = .1
while not connected:
ssh = paramiko.SSHClient()
ssh.get_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname = host, username = user, timeout=10)
connected = True
except:
if only_try_once:
raise
time.sleep(retry_delay)
retry_relay = min(10, retry_delay * 2)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=f"{my_name} Could not establish connection with {user}@{host} going to re-try.")
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
return ssh
# ==================================================================================
def prepare_job(my_name,which_machine,which_untrusted,next_directory,next_to_grade):
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: must be run by DAEMON_USER")
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
if which_machine == 'localhost':
address = which_machine
else:
address = which_machine.split('@')[1]
# prepare the zip files
try:
autograding_zip_tmp,submission_zip_tmp = packer_unpacker.prepare_autograding_and_submission_zip(which_machine,which_untrusted,next_directory,next_to_grade)
fully_qualified_domain_name = socket.getfqdn()
servername_workername = "{0}_{1}".format(fully_qualified_domain_name, address)
autograding_zip = os.path.join(SUBMITTY_DATA_DIR,"autograding_TODO",servername_workername+"_"+which_untrusted+"_autograding.zip")
submission_zip = os.path.join(SUBMITTY_DATA_DIR,"autograding_TODO",servername_workername+"_"+which_untrusted+"_submission.zip")
todo_queue_file = os.path.join(SUBMITTY_DATA_DIR,"autograding_TODO",servername_workername+"_"+which_untrusted+"_queue.json")
with open(next_to_grade, 'r') as infile:
queue_obj = json.load(infile)
queue_obj["which_untrusted"] = which_untrusted
queue_obj["which_machine"] = which_machine
queue_obj["ship_time"] = dateutils.write_submitty_date(microseconds=True)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: failed preparing submission zip or accessing next to grade "+str(e))
print("ERROR: failed preparing submission zip or accessing next to grade ", e)
return False
if address == "localhost":
try:
shutil.move(autograding_zip_tmp,autograding_zip)
shutil.move(submission_zip_tmp,submission_zip)
with open(todo_queue_file, 'w') as outfile:
json.dump(queue_obj, outfile, sort_keys=True, indent=4)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not move files due to the following error: "+str(e))
print("ERROR: could not move files due to the following error: {0}".format(e))
return False
else:
sftp = ssh = None
try:
user, host = which_machine.split("@")
ssh = establish_ssh_connection(my_name, user, host)
sftp = ssh.open_sftp()
sftp.put(autograding_zip_tmp,autograding_zip)
sftp.put(submission_zip_tmp,submission_zip)
with open(todo_queue_file, 'w') as outfile:
json.dump(queue_obj, outfile, sort_keys=True, indent=4)
sftp.put(todo_queue_file, todo_queue_file)
os.remove(todo_queue_file)
print("Successfully forwarded files to {0}".format(my_name))
success = True
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not move files due to the following error: "+str(e))
print("Could not move files due to the following error: {0}".format(e))
success = False
finally:
if sftp:
sftp.close()
if ssh:
ssh.close()
os.remove(autograding_zip_tmp)
os.remove(submission_zip_tmp)
return success
# log completion of job preparation
obj = packer_unpacker.load_queue_file_obj(JOB_ID,next_directory,next_to_grade)
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
item_name = os.path.join(obj["semester"],obj["course"],"submissions",partial_path)
is_batch = "regrade" in obj and obj["regrade"]
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, jobname=item_name, which_untrusted=which_untrusted,
is_batch=is_batch, message="Prepared job for " + which_machine)
return True
# ==================================================================================
# ==================================================================================
def unpack_job(which_machine,which_untrusted,next_directory,next_to_grade):
# variables needed for logging
obj = packer_unpacker.load_queue_file_obj(JOB_ID,next_directory,next_to_grade)
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
item_name = os.path.join(obj["semester"],obj["course"],"submissions",partial_path)
is_batch = "regrade" in obj and obj["regrade"]
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: must be run by DAEMON_USER")
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
if which_machine == 'localhost':
address = which_machine
else:
address = which_machine.split('@')[1]
fully_qualified_domain_name = socket.getfqdn()
servername_workername = "{0}_{1}".format(fully_qualified_domain_name, address)
target_results_zip = os.path.join(SUBMITTY_DATA_DIR,"autograding_DONE",servername_workername+"_"+which_untrusted+"_results.zip")
target_done_queue_file = os.path.join(SUBMITTY_DATA_DIR,"autograding_DONE",servername_workername+"_"+which_untrusted+"_queue.json")
if which_machine == "localhost":
if not os.path.exists(target_done_queue_file):
return False
else:
local_done_queue_file = target_done_queue_file
local_results_zip = target_results_zip
else:
ssh = sftp = fd1 = fd2 = local_done_queue_file = local_results_zip = None
try:
user, host = which_machine.split("@")
ssh = establish_ssh_connection(which_machine, user, host)
sftp = ssh.open_sftp()
fd1, local_done_queue_file = tempfile.mkstemp()
fd2, local_results_zip = tempfile.mkstemp()
#remote path first, then local.
sftp.get(target_done_queue_file, local_done_queue_file)
sftp.get(target_results_zip, local_results_zip)
#Because get works like cp rather tnan mv, we have to clean up.
sftp.remove(target_done_queue_file)
sftp.remove(target_results_zip)
success = True
#This is the normal case (still grading on the other end) so we don't need to print anything.
except (socket.timeout, TimeoutError) as e:
success = False
except FileNotFoundError:
# Remove results files
for var in [local_results_zip, local_done_queue_file]:
if var:
with contextlib.suppress(FileNotFoundError):
os.remove(var)
success = False
#In this more general case, we do want to print what the error was.
#TODO catch other types of exception as we identify them.
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: Could not retrieve the file from the foreign machine "+str(e))
print("ERROR: Could not retrieve the file from the foreign machine.\nERROR: {0}".format(e))
# Remove results files
for var in [local_results_zip, local_done_queue_file]:
if var:
with contextlib.suppress(FileNotFoundError):
os.remove(var)
success = False
finally:
# Close SSH connections
for var in [sftp, ssh]:
if var:
var.close()
# Close file descriptors
for var in [fd1, fd2]:
if var:
try:
os.close(var)
except Exception:
pass
if not success:
return False
# archive the results of grading
try:
success = packer_unpacker.unpack_grading_results_zip(which_machine,which_untrusted,local_results_zip)
except:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID,jobname=item_name,message="ERROR: Exception when unpacking zip. For more details, see traces entry.")
with contextlib.suppress(FileNotFoundError):
os.remove(local_results_zip)
success = False
with contextlib.suppress(FileNotFoundError):
os.remove(local_done_queue_file)
msg = "Unpacked job from " + which_machine if success else "ERROR: failure returned from worker machine"
print(msg)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, jobname=item_name, which_untrusted=which_untrusted, is_batch=is_batch, message=msg)
return True
# ==================================================================================
def grade_queue_file(my_name, which_machine,which_untrusted,queue_file):
"""
Oversees the autograding of single item from the queue
:param queue_file: details of what to grade
:param which_machine: name of machine to send this job to (might be "localhost")
:param which_untrusted: specific untrusted user for this autograding job
"""
my_dir,my_file=os.path.split(queue_file)
pid = os.getpid()
directory = os.path.dirname(os.path.realpath(queue_file))
name = os.path.basename(os.path.realpath(queue_file))
grading_file = os.path.join(directory, "GRADING_" + name)
#TODO: break which_machine into id, address, and passphrase.
try:
# prepare the job
shipper_counter=0
#prep_job_success = prepare_job(my_name,which_machine, which_untrusted, my_dir, queue_file)
while not prepare_job(my_name,which_machine, which_untrusted, my_dir, queue_file):
time.sleep(5)
prep_job_success = True
if not prep_job_success:
print (my_name, " ERROR unable to prepare job: ", queue_file)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR unable to prepare job: " + queue_file)
else:
# then wait for grading to be completed
shipper_counter=0
while not unpack_job(which_machine, which_untrusted, my_dir, queue_file):
shipper_counter+=1
time.sleep(1)
if shipper_counter >= 10:
print (my_name,which_untrusted,"shipper wait for grade: ",queue_file)
shipper_counter=0
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print (my_name, " ERROR attempting to grade item: ", queue_file, " exception=",str(e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR attempting to grade item: " + queue_file + " exception " + repr(e))
# note: not necessary to acquire lock for these statements, but
# make sure you remove the queue file, then the grading file
try:
os.remove(queue_file)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print (my_name, " ERROR attempting to remove queue file: ", queue_file, " exception=",str(e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR attempting to remove queue file: " + queue_file + " exception=" + str(e))
try:
os.remove(grading_file)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print (my_name, " ERROR attempting to remove grading file: ", grading_file, " exception=",str(e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR attempting to remove grading file: " + grading_file + " exception=" + str(e))
# ==================================================================================
# ==================================================================================
def valid_github_user_id(userid):
# Github username may only contain alphanumeric characters or
# hyphens. Github username cannot have multiple consecutive
# hyphens. Github username cannot begin or end with a hyphen.
# Maximum is 39 characters.
#
# NOTE: We only scrub the input for allowed characters.
if (userid==''):
# GitHub userid cannot be empty
return False
checklegal = lambda char: char.isalnum() or char == '-'
filtered_userid = ''.join(list(filter(checklegal,userid)))
if not userid == filtered_userid:
return False
return True
def valid_github_repo_id(repoid):
# Only characters, numbers, dots, minus and underscore are allowed.
if (repoid==''):
# GitHub repoid cannot be empty
return False
checklegal = lambda char: char.isalnum() or char == '.' or char == '-' or char == '_'
filtered_repoid = ''.join(list(filter(checklegal,repoid)))
if not repoid == filtered_repoid:
return False
return True
def checkout_vcs_repo(my_file):
print ("SHIPPER CHECKOUT VCS REPO ", my_file)
with open(my_file, 'r') as infile:
obj = json.load(infile)
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
course_dir = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"], obj["course"])
submission_path = os.path.join(course_dir, "submissions", partial_path)
checkout_path = os.path.join(course_dir, "checkout", partial_path)
results_path = os.path.join(course_dir, "results", partial_path)
is_vcs,vcs_type,vcs_base_url,vcs_subdirectory = packer_unpacker.get_vcs_info(SUBMITTY_DATA_DIR,obj["semester"],obj["course"],obj["gradeable"],obj["who"],obj["team"])
# cleanup the previous checkout (if it exists)
shutil.rmtree(checkout_path,ignore_errors=True)
os.makedirs(checkout_path, exist_ok=True)
job_id = "~VCS~"
try:
# If we are public or private github, we will have an empty vcs_subdirectory
if vcs_subdirectory == '':
with open (os.path.join(submission_path,".submit.VCS_CHECKOUT")) as submission_vcs_file:
VCS_JSON = json.load(submission_vcs_file)
git_user_id = VCS_JSON["git_user_id"]
git_repo_id = VCS_JSON["git_repo_id"]
if not valid_github_user_id(git_user_id):
raise Exception ("Invalid GitHub user/organization name: '"+git_user_id+"'")
if not valid_github_repo_id(git_repo_id):
raise Exception ("Invalid GitHub repository name: '"+git_repo_id+"'")
# construct path for GitHub
vcs_path="https://www.github.com/"+git_user_id+"/"+git_repo_id
# is vcs_subdirectory standalone or should it be combined with base_url?
elif vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
vcs_path = vcs_subdirectory
else:
if '://' in vcs_base_url:
vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)
else:
vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)
# warning: --depth is ignored in local clones; use file:// instead.
if not '://' in vcs_path:
vcs_path = "file:///" + vcs_path
Path(results_path+"/logs").mkdir(parents=True, exist_ok=True)
checkout_log_file = os.path.join(results_path, "logs", "vcs_checkout.txt")
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
# OPTION: A shallow clone with only the most recent commit
# from the submission timestamp.
#
# NOTE: if the student has set their computer time in the
# future, they could be confused that we don't grab their
# most recent code.
# NOTE: github repos currently fail (a bug?) with an error when
# --shallow-since is used:
# "fatal: The remote end hung up unexpectedly"
#
#clone_command = ['/usr/bin/git', 'clone', vcs_path, checkout_path, '--shallow-since='+submission_string, '-b', 'master']
# OPTION: A shallow clone, with just the most recent commit.
#
# NOTE: If the server is busy, it might take seconds or
# minutes for an available shipper to process the git
# clone, and thethe timestamp might be slightly late)
#
# So we choose this option! (for now)
#
clone_command = ['/usr/bin/git', 'clone', vcs_path, checkout_path, '--depth', '1', '-b', 'master']
with open(checkout_log_file, 'a') as f:
print("VCS CHECKOUT", file=f)
print('vcs_base_url', vcs_base_url, file=f)
print('vcs_subdirectory', vcs_subdirectory, file=f)
print('vcs_path', vcs_path, file=f)
print(' '.join(clone_command), file=f)
print("\n====================================\n", file=f)
# git clone may fail -- because repository does not exist,
# or because we don't have appropriate access credentials
try:
subprocess.check_call(clone_command)
os.chdir(checkout_path)
# determine which version we need to checkout
# if the repo is empty or the master branch does not exist, this command will fail
try:
what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', 'master'])
# old method: when we had the full history, roll-back to a version by date
#what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before="'+submission_string+'"', 'master'])
what_version = str(what_version.decode('utf-8')).rstrip()
if what_version == "":
# oops, pressed the grade button before a valid commit
shutil.rmtree(checkout_path, ignore_errors=True)
# old method:
#else:
# # and check out the right version
# subprocess.call(['git', 'checkout', '-b', 'grade', what_version])
subprocess.call(['ls', '-lR', checkout_path], stdout=open(checkout_log_file, 'a'))
print("\n====================================\n", file=open(checkout_log_file, 'a'))
subprocess.call(['du', '-skh', checkout_path], stdout=open(checkout_log_file, 'a'))
obj['revision'] = what_version
# exception on git rev-list
except subprocess.CalledProcessError as error:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message="ERROR: failed to determine version on master branch " + str(error))
os.chdir(checkout_path)
with open(os.path.join(checkout_path,"failed_to_determine_version_on_master_branch.txt"),'w') as f:
print(str(error),file=f)
print("\n",file=f)
print("Check to be sure the repository is not empty.\n",file=f)
print("Check to be sure the repository has a master branch.\n",file=f)
print("And check to be sure the timestamps on the master branch are reasonable.\n",file=f)
# exception on git clone
except subprocess.CalledProcessError as error:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message="ERROR: failed to clone repository " + str(error))
os.chdir(checkout_path)
with open(os.path.join(checkout_path,"failed_to_clone_repository.txt"),'w') as f:
print(str(error),file=f)
print("\n",file=f)
print("Check to be sure the repository exists.\n",file=f)
print("And check to be sure the submitty_daemon user has appropriate access credentials.\n",file=f)
# exception in constructing full git repository url/path
except Exception as error:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message="ERROR: failed to construct valid repository url/path" + str(error))
os.chdir(checkout_path)
with open(os.path.join(checkout_path,"failed_to_construct_valid_repository_url.txt"),'w') as f:
print(str(error),file=f)
print("\n",file=f)
print("Check to be sure the repository exists.\n",file=f)
print("And check to be sure the submitty_daemon user has appropriate access credentials.\n",file=f)
return obj
# ==================================================================================
def get_job(my_name,which_machine,my_capabilities,which_untrusted,overall_lock):
"""
Picks a job from the queue
:param overall_lock: a lock on the directory containing all queue files
"""
time_get_job_begin = dateutils.get_current_time()
overall_lock.acquire()
folder= INTERACTIVE_QUEUE
# ----------------------------------------------------------------
# Our first priority is to perform any awaiting VCS checkouts
# Note: This design is imperfect:
#
# * If all shippers are busy working on long-running autograding
# tasks there will be a delay of seconds or minutes between
# a student pressing the submission button and clone happening.
# This is a minor exploit allowing them to theoretically
# continue working on their submission past the deadline for
# the time period of the delay.
# -- This is not a significant, practical problem.
#
# * If multiple and/or large git submissions arrive close
# together, this shipper job will be tied up performing these
# clone operations. Because we don't release the lock, any
# other shippers that complete their work will also be blocked
# from either helping with the clones or tackling the next
# autograding job.
# -- Based on experience with actual submission patterns, we
# do not anticipate that this will be a significant
# bottleneck at this time.
#
# * If a git clone takes a very long time and/or hangs because of
# network problems, this could halt all work on the server.
# -- We'll need to monitor the production server.
#
# We plan to do a complete overhaul of the
# scheduler/shipper/worker and refactoring this design should be
# part of the project.
# Grab all the VCS files currently in the folder...
vcs_files = [str(f) for f in Path(folder).glob('VCS__*')]
for f in vcs_files:
vcs_file = f[len(folder)+1:]
no_vcs_file = f[len(folder)+1+5:]
# do the checkout
updated_obj = checkout_vcs_repo(folder+"/"+vcs_file)
# save the regular grading queue file
with open(os.path.join(folder,no_vcs_file), "w") as queue_file:
json.dump(updated_obj, queue_file)
# cleanup the vcs queue file
os.remove(folder+"/"+vcs_file)
# ----------------------------------------------------------------
# Grab all the files currently in the folder, sorted by creation
# time, and put them in the queue to be graded
files = [str(f) for f in Path(folder).glob('*')]
files_and_times = list()
for f in files:
try:
my_time = os.path.getctime(f)
except:
continue
tup = (f, my_time)
files_and_times.append(tup)
files_and_times = sorted(files_and_times, key=operator.itemgetter(1))
my_job=""
for full_path_file, file_time in files_and_times:
# get the file name (without the path)
just_file = full_path_file[len(folder)+1:]
# skip items that are already being graded
if (just_file[0:8]=="GRADING_"):
continue
grading_file = os.path.join(folder,"GRADING_"+just_file)
if grading_file in files:
continue
# found something to do
try:
with open(full_path_file, 'r') as infile:
queue_obj = json.load(infile)
except:
continue
#Check to make sure that we are capable of grading this submission
required_capabilities = queue_obj["required_capabilities"]
if not required_capabilities in my_capabilities:
continue
# prioritize interactive jobs over (batch) regrades
# if you've found an interactive job, exit early (since they are sorted by timestamp)
if not "regrade" in queue_obj or not queue_obj["regrade"]:
my_job = just_file
break
# otherwise it's a regrade, and if we don't already have a
# job, take it, but we have to search the rest of the list
if my_job == "":
my_job = just_file
if not my_job == "":
grading_file = os.path.join(folder, "GRADING_" + my_job)
# create the grading file
with open(os.path.join(grading_file), "w") as queue_file:
json.dump({"untrusted": which_untrusted}, queue_file)
overall_lock.release()
time_get_job_end = dateutils.get_current_time()
time_delta = time_get_job_end-time_get_job_begin
if time_delta > datetime.timedelta(milliseconds=100):
print (my_name, " WARNING: submitty_autograding shipper get_job time ", time_delta)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" WARNING: submitty_autograding shipper get_job time "+str(time_delta))
return (my_job)
# ==================================================================================
# ==================================================================================
def shipper_process(my_name,my_data,full_address,which_untrusted,overall_lock):
"""
Each shipper process spins in a loop, looking for a job that
matches the capabilities of this machine, and then oversees the
autograding of that job. Interactive jobs are prioritized over
batch (regrade) jobs. If no jobs are available, the shipper waits
on an event editing one of the queues.
"""
which_machine = full_address
my_capabilities = my_data[my_name]['capabilities']
# ignore keyboard interrupts in the shipper processes
signal.signal(signal.SIGINT, signal.SIG_IGN)
counter=0
while True:
try:
my_job = get_job(my_name,which_machine,my_capabilities,which_untrusted,overall_lock)
if not my_job == "":
counter=0
grade_queue_file(my_name,which_machine,which_untrusted,os.path.join(INTERACTIVE_QUEUE,my_job))
continue
else:
if counter == 0 or counter >= 10:
print ("{0} {1}: no available job".format(my_name, which_untrusted))
counter=0
counter+=1
time.sleep(1)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
my_message = "ERROR in get_job {0} {1} {2}. For more details, see traces entry".format(which_machine,which_untrusted,str(e))
print (my_message)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=my_message)
time.sleep(1)
# ==================================================================================
# ==================================================================================
def launch_shippers(worker_status_map):
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="grade_scheduler.py launched")
# Clean up old files from previous shipping/autograding (any
# partially completed work will be re-done)
for file_path in Path(INTERACTIVE_QUEUE).glob("GRADING_*"):
file_path = str(file_path)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Remove old queue file: " + file_path)
os.remove(file_path)
for file_path in Path(SUBMITTY_DATA_DIR, "autograding_TODO").glob("untrusted*"):
file_path = str(file_path)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Remove autograding TODO file: " + file_path)
os.remove(file_path)
for file_path in Path(SUBMITTY_DATA_DIR, "autograding_DONE").glob("*"):
file_path = str(file_path)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Remove autograding DONE file: " + file_path)
os.remove(file_path)
# this lock will be used to edit the queue or new job event
overall_lock = multiprocessing.Lock()
# The names of the worker machines, the capabilities of each
# worker machine, and the number of workers per machine are stored
# in the autograding_workers json.
try:
autograding_workers_path = os.path.join(SUBMITTY_INSTALL_DIR, 'config', "autograding_workers.json")
with open(autograding_workers_path, 'r') as infile:
autograding_workers = json.load(infile)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
raise SystemExit("ERROR: could not locate the autograding workers json: {0}".format(e))
# There must always be a primary machine, it may or may not have
# autograding workers.
if not "primary" in autograding_workers:
raise SystemExit("ERROR: autograding_workers.json contained no primary machine.")
# One (or more) of the machines must accept "default" jobs.
default_present = False
for name, machine in autograding_workers.items():
if "default" in machine["capabilities"]:
default_present = True
break
if not default_present:
raise SystemExit("ERROR: autograding_workers.json contained no machine with default capabilities")
# Launch a shipper process for every worker on the primary machine and each worker machine
total_num_workers = 0
processes = list()
for name, machine in autograding_workers.items():
if worker_status_map[name] == False:
print("{0} could not be reached, so we are not spinning up shipper threads.".format(name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="{0} could not be reached, so we are not spinning up shipper threads.".format(name))
continue
if 'enabled' in machine and machine['enabled'] == False:
print("{0} is disabled, so we are not spinning up shipper threads.".format(name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="{0} is disabled, so we are not spinning up shipper threads.")
continue
try:
full_address = ""
if machine["address"] != "localhost":
if machine["username"] == "":
raise SystemExit("ERROR: empty username for worker machine {0} ".format(machine["address"]))
full_address = "{0}@{1}".format(machine["username"], machine["address"])
else:
if not machine["username"] == "":
raise SystemExit('ERROR: username for primary (localhost) must be ""')
full_address = machine['address']
num_workers_on_machine = machine["num_autograding_workers"]
if num_workers_on_machine < 0:
raise SystemExit("ERROR: num_workers_on_machine for '{0}' must be non-negative.".format(machine))
single_machine_data = {name : machine}
single_machine_data = add_fields_to_autograding_worker_json(single_machine_data, name)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print("ERROR: autograding_workers.json entry for {0} contains an error: {1}. For more details, see trace entry.".format(name, e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: autograding_workers.json entry for {0} contains an error: {1} For more details, see trace entry.".format(name,e))
continue
# launch the shipper threads
for i in range(0,num_workers_on_machine):
u = "untrusted" + str(i).zfill(2)
p = multiprocessing.Process(target=shipper_process,args=(name,single_machine_data,full_address, u,overall_lock))
p.start()
processes.append(p)
total_num_workers += num_workers_on_machine
# main monitoring loop
try:
while True:
alive = 0
for i in range(0,total_num_workers):
if processes[i].is_alive:
alive = alive+1
else:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: process "+str(i)+" is not alive")
if alive != total_num_workers:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: #shippers="+str(total_num_workers)+" != #alive="+str(alive))
#print ("shippers= ",total_num_workers," alive=",alive)
time.sleep(1)
except KeyboardInterrupt:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="grade_scheduler.py keyboard interrupt")
# just kill everything in this group id right now
# NOTE: this may be a bug if the grandchildren have a different group id and not be killed
os.kill(-os.getpid(), signal.SIGKILL)
# run this to check if everything is dead
# ps xao pid,ppid,pgid,sid,comm,user | grep untrust
# everything's dead, including the main process so the rest of this will be ignored
# but this was mostly working...
# terminate the jobs
for i in range(0,total_num_workers):
processes[i].terminate()
# wait for them to join
for i in range(0,total_num_workers):
processes[i].join()
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="grade_scheduler.py terminated")
# ==================================================================================
if __name__ == "__main__":
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
worker_status_map = update_all_foreign_autograding_workers()
launch_shippers(worker_status_map)
|
ioloop_test.py
|
from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
import contextlib
import datetime
import functools
import socket
import subprocess
import sys
import threading
import time
import types
from unittest import mock
import unittest
from tornado.escape import native_str
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError, PeriodicCallback
from tornado.log import app_log
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog, gen_test
from tornado.test.util import skipIfNonUnix, skipOnTravis
import typing
if typing.TYPE_CHECKING:
from typing import List # noqa: F401
class TestIOLoop(AsyncTestCase):
def test_add_callback_return_sequence(self):
# A callback returning {} or [] shouldn't spin the CPU, see Issue #1803.
self.calls = 0
loop = self.io_loop
test = self
old_add_callback = loop.add_callback
def add_callback(self, callback, *args, **kwargs):
test.calls += 1
old_add_callback(callback, *args, **kwargs)
loop.add_callback = types.MethodType(add_callback, loop) # type: ignore
loop.add_callback(lambda: {}) # type: ignore
loop.add_callback(lambda: []) # type: ignore
loop.add_timeout(datetime.timedelta(milliseconds=50), loop.stop)
loop.start()
self.assertLess(self.calls, 10)
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(
sock.fileno(), lambda fd, events: None, IOLoop.READ
)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(
Exception,
self.io_loop.add_handler,
sock.fileno(),
lambda fd, events: None,
IOLoop.READ,
)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# add_callback should not fail if it races with another thread
# closing the IOLoop. The callbacks are dropped silently
# without executing.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
other_ioloop.add_callback(lambda: None)
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(
self.io_loop.time() + 0.01, functools.partial(server.send, b"asdf") # type: ignore
)
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600, lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = [] # type: List[int]
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0), results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
# The asyncio event loop does not guarantee the order of these
# callbacks.
self.assertEqual(sorted(results), [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None, IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_native_coro(self):
"""The IOLoop examines exceptions from awaitables and logs them."""
async def callback():
# Stop the IOLoop two iterations after raising an exception
# to give the exception time to be logged.
self.io_loop.add_callback(self.io_loop.add_callback, self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# Both add_callback and spawn_callback run directly on the IOLoop,
# so their errors are logged without stopping the test.
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b"abc")
server.send(b"abc")
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.1, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b"abc"])
finally:
client.close()
server.close()
@skipIfNonUnix
@gen_test
def test_init_close_race(self):
# Regression test for #2367
#
# Skipped on windows because of what looks like a bug in the
# proactor event loop when started and stopped on non-main
# threads.
def f():
for i in range(10):
loop = IOLoop()
loop.close()
yield gen.multi([self.io_loop.run_in_executor(None, f) for i in range(2)])
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = None # type: typing.Optional[IOLoop]
IOLoop.clear_current()
def tearDown(self):
if self.io_loop is not None:
self.io_loop.close()
def test_default_current(self):
self.io_loop = IOLoop()
# The first IOLoop with default arguments is made current.
self.assertIs(self.io_loop, IOLoop.current())
# A second IOLoop can be created but is not made current.
io_loop2 = IOLoop()
self.assertIs(self.io_loop, IOLoop.current())
io_loop2.close()
def test_non_current(self):
self.io_loop = IOLoop(make_current=False)
# The new IOLoop is not initially made current.
self.assertIsNone(IOLoop.current(instance=False))
# Starting the IOLoop makes it current, and stopping the loop
# makes it non-current. This process is repeatable.
for i in range(3):
def f():
self.current_io_loop = IOLoop.current()
assert self.io_loop is not None
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
# Now that the loop is stopped, it is no longer current.
self.assertIsNone(IOLoop.current(instance=False))
def test_force_current(self):
self.io_loop = IOLoop(make_current=True)
self.assertIs(self.io_loop, IOLoop.current())
with self.assertRaises(RuntimeError):
# A second make_current=True construction cannot succeed.
IOLoop(make_current=True)
# current() was not affected by the failed construction.
self.assertIs(self.io_loop, IOLoop.current())
class TestIOLoopCurrentAsync(AsyncTestCase):
@gen_test
def test_clear_without_current(self):
# If there is no current IOLoop, clear_current is a no-op (but
# should not fail). Use a thread so we see the threading.Local
# in a pristine state.
with ThreadPoolExecutor(1) as e:
yield e.submit(IOLoop.clear_current)
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
def dummy():
pass
self.io_loop.add_future(
pool.submit(dummy), lambda future: self.stop(future)
)
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
@gen_test
def test_run_in_executor_gen(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
# Note that return value doesn't actually do anything,
# it is just passed through to our final assertion to
# make sure it is passed through properly.
return self_event
# Run two synchronous functions, which would deadlock if not
# run in parallel.
res = yield [
IOLoop.current().run_in_executor(None, sync_func, event1, event2),
IOLoop.current().run_in_executor(None, sync_func, event2, event1),
]
self.assertEqual([event1, event2], res)
@gen_test
def test_run_in_executor_native(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
return self_event
# Go through an async wrapper to ensure that the result of
# run_in_executor works with await and not just gen.coroutine
# (simply passing the underlying concurrent future would do that).
async def async_wrapper(self_event, other_event):
return await IOLoop.current().run_in_executor(
None, sync_func, self_event, other_event
)
res = yield [async_wrapper(event1, event2), async_wrapper(event2, event1)]
self.assertEqual([event1, event2], res)
@gen_test
def test_set_default_executor(self):
count = [0]
class MyExecutor(futures.ThreadPoolExecutor):
def submit(self, func, *args):
count[0] += 1
return super().submit(func, *args)
event = threading.Event()
def sync_func():
event.set()
executor = MyExecutor(1)
loop = IOLoop.current()
loop.set_default_executor(executor)
yield loop.run_in_executor(None, sync_func)
self.assertEqual(1, count[0])
self.assertTrue(event.is_set())
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
with self.assertRaises(gen.BadYieldError):
self.io_loop.run_sync(lambda: 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.moment
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.moment
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.sleep(1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
def test_native_coroutine(self):
@gen.coroutine
def f1():
yield gen.moment
async def f2():
await f1()
self.io_loop.run_sync(f2)
class TestPeriodicCallbackMath(unittest.TestCase):
def simulate_calls(self, pc, durations):
"""Simulate a series of calls to the PeriodicCallback.
Pass a list of call durations in seconds (negative values
work to simulate clock adjustments during the call, or more or
less equivalently, between calls). This method returns the
times at which each call would be made.
"""
calls = []
now = 1000
pc._next_timeout = now
for d in durations:
pc._update_next(now)
calls.append(pc._next_timeout)
now = pc._next_timeout + d
return calls
def dummy(self):
pass
def test_basic(self):
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(
self.simulate_calls(pc, [0] * 5), [1010, 1020, 1030, 1040, 1050]
)
def test_overrun(self):
# If a call runs for too long, we skip entire cycles to get
# back on schedule.
call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
expected = [
1010,
1020,
1030, # first 3 calls on schedule
1050,
1070, # next 2 delayed one cycle
1100,
1130, # next 2 delayed 2 cycles
1170,
1210, # next 2 delayed 3 cycles
1220,
1230, # then back on schedule.
]
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
def test_clock_backwards(self):
pc = PeriodicCallback(self.dummy, 10000)
# Backwards jumps are ignored, potentially resulting in a
# slightly slow schedule (although we assume that when
# time.time() and time.monotonic() are different, time.time()
# is getting adjusted by NTP and is therefore more accurate)
self.assertEqual(
self.simulate_calls(pc, [-2, -1, -3, -2, 0]), [1010, 1020, 1030, 1040, 1050]
)
# For big jumps, we should perhaps alter the schedule, but we
# don't currently. This trace shows that we run callbacks
# every 10s of time.time(), but the first and second calls are
# 110s of real time apart because the backwards jump is
# ignored.
self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]), [1010, 1020, 1030])
def test_jitter(self):
random_times = [0.5, 1, 0, 0.75]
expected = [1010, 1022.5, 1030, 1041.25]
call_durations = [0] * len(random_times)
pc = PeriodicCallback(self.dummy, 10000, jitter=0.5)
def mock_random():
return random_times.pop(0)
with mock.patch("random.random", mock_random):
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
def test_timedelta(self):
pc = PeriodicCallback(lambda: None, datetime.timedelta(minutes=1, seconds=23))
expected_callback_time = 83000
self.assertEqual(pc.callback_time, expected_callback_time)
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
stmt_list = [
"from tornado.ioloop import IOLoop",
"classname = lambda x: x.__class__.__name__",
] + list(statements)
args = [sys.executable, "-c", "; ".join(stmt_list)]
return native_str(subprocess.check_output(args)).strip()
def test_default(self):
# When asyncio is available, it is used by default.
cls = self.run_python("print(classname(IOLoop.current()))")
self.assertEqual(cls, "AsyncIOMainLoop")
cls = self.run_python("print(classname(IOLoop()))")
self.assertEqual(cls, "AsyncIOLoop")
def test_asyncio(self):
cls = self.run_python(
'IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")',
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
def test_asyncio_main(self):
cls = self.run_python(
"from tornado.platform.asyncio import AsyncIOMainLoop",
"AsyncIOMainLoop().install()",
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
if __name__ == "__main__":
unittest.main()
|
client.py
|
import socket
# from thread import *
import sys
import threading
from thread import start_new_thread
from threading import Thread
import pickle
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Socket is not created. Due to error : ' + str(msg[0]) + ' ,Saying : ' + msg[1]
sys.exit()
ClientListener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#ClientListener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) used in Unix and Linux
portL = pickle.load(open("port", "rb"))
pickle.dump(portL + 1, open("port", "wb"))
try:
ClientListener.bind(('', portL))
except socket.error, msg:
print 'Bind failed. Error Code: ' + str(msg[0]) + ' Message: ' + msg[1]
sys.exit()
ClientListener.listen(10)
print "Socket now listening"
def client(host, port, s, portL):
try:
remote_ip = socket.gethostbyname(host)
except socket.gaierror:
print 'Hostname couldn\'t be resolved. Exiting'
sys.exit()
s.connect((remote_ip, port))
print 'Socket connected to ' + host + ' on ip ' + remote_ip
reply = s.recv(4096)
print reply
while True:
input = raw_input(">> ")
if not input:
continue
elif input[0] is 'U':
fileName = raw_input('Enter file name: ')
if not fileName:
print 'Not valid input'
continue
# filePath = raw_input('Enter path: ')
# if not filePath:
# print 'Not valid input'
# continue
message = 'SHARE_FILES\n' + fileName
elif input[0] is 'R':
nickname = raw_input('Enter a nickname: ')
if not nickname:
print 'Not valid input'
continue
message = 'REGISTER\n' + nickname
elif input[0] is 'S':
fileName = raw_input('Enter file name to be searched: ')
if not fileName:
print 'Not valid input'
continue
message = 'SEARCH\n' + fileName
try:
s.sendall(message)
except socket.error:
print 'Send failed'
sys.exit()
reply = s.recv(4096)
if reply == "{}":
print 'No such a File'
continue
elif reply.split('\n')[0] == 'ERROR':
print reply.split('\n')[1]
sys.exit()
usersHavingFile = eval(reply) # convert reply into dictionary
# print usersHavingFile
if not usersHavingFile:
reply.sendall('File not found')
continue
message = 'The following users have the file:\n'
for user in usersHavingFile.keys():
message = message + usersHavingFile[user]['nick'] + ' (' + user + ') (' + usersHavingFile[user][
'filePath'] + ')\n'
print message
response = raw_input(
'Write \"Q\" Then a space followed by the client Port to download the file from the client')
if not response:
print 'Not valid input pickup a chooise again'
continue
response = response.strip()
if response[0] == 'Q':
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
peerIP = response.split(' ')[1]
print peerIP
s1.connect(('127.0.0.1', portL))
queryMessage = 'DOWNLOAD\n' + fileName + '\n' + usersHavingFile[peerIP]['filePath']
print usersHavingFile[peerIP]['filePath']
try:
s1.sendall(queryMessage)
except socket.error:
print 'Send failed'
sys.exit()
fileName = fileName + "_Recived"
fw = open(fileName , 'wb+')
flag = 0
chunk = s1.recv(20480)
while chunk != 'SHUT_WR':
s1.send('received')
if chunk.split('\n')[0] == 'ERROR':
print chunk.split('\n')[0] + ' ' + chunk.split('\n')[1]
flag = 1
break
fw.write(chunk)
chunk = s1.recv(100)
if flag != 1:
print "\nFile saved in the receivedFiles folder inside your current folder"
else:
print "\nError while downloading the file"
fw.close()
s1.close()
continue
elif input[0] is 'E':
s.close()
ClientListener.close()
sys.exit()
break
else:
print 'Unknown command'
continue
try:
s.sendall(message)
except socket.error:
print 'Send failed'
sys.exit()
reply = s.recv(4096)
print reply
s.close()
###########################################
def listenForSharing(ClientListener):
while True:
conn, addr = ClientListener.accept()# should get out the loop
data = conn.recv(1024)
if data.split('\n')[0] == 'DOWNLOAD':
fileName = data.split('\n')[1]
# filePath = data.split('\n')[2]
# print filePath + fileName
try:
fr = open( fileName, 'rb')
except:
conn.sendall('ERROR\nNo such file available')
continue
chunk = fr.read()
conn.send(chunk)
ack = conn.recv(100)
conn.sendall('SHUT_WR')
ClientListener.close()
###########################################
try:
host = 'localhost'
port = 5323
if __name__ == '__main__':
Thread(target=client, args=(host, port, s, portL)).start()
Thread(target=listenForSharing, args=(ClientListener,)).start()
except:
ClientListener.close()
|
email.py
|
from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
# mail.send(msg)
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
openshiftworkarounds.py
|
import os
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import requests
import threading
from datetime import datetime
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_GET(self):
self._set_headers()
uptime = str(datetime.now() - self.server.start_time).split('.')[0]
self.wfile.write("Tofbot uptime: %s" % uptime)
def do_HEAD(self):
self._set_headers()
def serve(server_class=HTTPServer, handler_class=S):
port = int(os.getenv("OPENSHIFT_PYTHON_PORT", "8080"))
ip = os.getenv('OPENSHIFT_PYTHON_IP', '')
server_address = (ip, port)
httpd = server_class(server_address, handler_class)
httpd.start_time = datetime.now()
httpd.serve_forever()
def enable():
server = threading.Thread(target=serve)
server.daemon = True
server.start()
|
tcpserver.py
|
import socket
import threading
bind_ip = '0.0.0.0'
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print('[*] Listening on %s:%d' % (bind_ip, bind_port))
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print('[*] Received: %s' % request)
# send back a packet
client_socket.send(b'ACK!')
client_socket.close()
while True:
client, addr = server.accept()
print('[*] Accepted connection from: %s:%d' % (addr[0], addr[1]))
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
ksl.py
|
import time
import json
import requests
import urllib3
from random import randint
from bs4 import BeautifulSoup
from threading import Thread
urllib3.disable_warnings()
BASE_URL = "https://jobs.ksl.com/search/posted/last-7-days"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
JOBS = {}
def getJobDescriptions(url, headers):
data = requests.get(url=url, headers=headers, verify=False, timeout=20)
data.close()
soup = BeautifulSoup(data.text, "html.parser")
descriptionTag = soup.find_all(
"meta", {"property": "og:description"}, "html.parser"
)
description = descriptionTag[0]["content"]
JOBS[url]["description"] = description
def writeToFile():
global JOBS
with open("sample.json", "w") as outfile:
json.dump(JOBS, outfile)
def getJobListings(url, headers):
dataX = requests.get(url=url, headers=headers, verify=False, timeout=20)
soup = BeautifulSoup(dataX.text, "html.parser")
dataX.close()
script = soup.find_all('script', {'type': 'application/ld+json'})
content = script[0].contents[0]
jobsArray = json.loads(content)["itemListElement"]
threads = []
for job in jobsArray:
JOBS[job["url"]] = {
"name": job["title"],
"employer": job["hiringOrganization"]["name"],
"url": job["url"],
}
t = Thread(target=getJobDescriptions, args=(job["url"], headers))
threads.append(t)
for i in threads:
i.start()
# Making sure all the jobs description is fetched
for i in threads:
i.join()
print(f"Number of jobs Scraped {len(JOBS)}")
writeToFile()
next_page = soup.find("a", {"class": "next link"})
if next_page is not None:
getJobListings(next_page.get("href"), HEADERS)
getJobListings(BASE_URL, HEADERS)
|
color_detector_node.py
|
#!/usr/bin/env python
from anti_instagram.AntiInstagram import *
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D, Twist2DStamped)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.line_detector_plot import *
from line_detector.timekeeper import TimeKeeper
import cv2
import numpy as np
import rospy
import threading
import time
class ColorDetectorNode(object):
def __init__(self):
self.node_name = "ColorDetectorNode"
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lane_recovery = rospy.Publisher("~lane_recovery", BoolStamped, queue_size=1)
self.pub_image_green = rospy.Publisher("~green_hsv", Image, queue_size=1)
self.pub_image_blue = rospy.Publisher("~blue_hsv", Image, queue_size=1)
self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
def cbImage(self, image_msg):
self.stop()
self.stats.received()
if not self.active:
#print "******************** no color detector *********************"
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# set up parameter
hsv_green1 = np.array([70,100,60])
hsv_green2 = np.array([90,255,255])
hsv_blue1 = np.array([90,80,50])
hsv_blue2 = np.array([110,255,255])
# Set the image to be detected
hsv = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2HSV)
green = cv2.inRange(hsv,hsv_green1,hsv_green2)
blue = cv2.inRange(hsv,hsv_blue1,hsv_blue2)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
green = cv2.dilate(green, kernel)
blue = cv2.dilate(blue, kernel)
x = green[90:120,:]
y = blue[90:120,:]
msgg = BoolStamped()
if (x==255).sum() > 250:
print "green line detected!"
time.sleep(4)
print " 4 sec finish"
msgg.data = True
self.pub_lane_recovery.publish(msgg)
elif (y==255).sum() > 250:
print "blue line detected!"
time.sleep(7)
print " 7 sec finish"
msgg.data = True
self.pub_lane_recovery.publish(msgg)
else:
print "only red line detected"
time.sleep(1)
print " 1 sec finish"
msgg.data = True
self.pub_lane_recovery.publish(msgg)
tk.completed('prepared')
# VISUALIZATION only below
if self.verbose:
tk.completed('drawn')
# Publish the frame with lines
image_msg_out_green = self.bridge.cv2_to_imgmsg(green, "mono8")
image_msg_out_green.header.stamp = image_msg.header.stamp
self.pub_image_green.publish(image_msg_out_green)
image_msg_out_blue = self.bridge.cv2_to_imgmsg(blue, "mono8")
image_msg_out_blue.header.stamp = image_msg.header.stamp
self.pub_image_blue.publish(image_msg_out_blue)
tk.completed('pub_image')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def stop(self):
car_control_msg = Twist2DStamped()
car_control_msg.v = 0.0
car_control_msg.omega = 0.0
self.pub_car_cmd.publish(car_control_msg)
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('color_detector',anonymous=False)
color_detector_node = ColorDetectorNode()
rospy.on_shutdown(color_detector_node.onShutdown)
rospy.spin()
|
dsr_service_motion_simple.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import pass : DSR_ROBOT.py
# for single robot
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
def shutdown():
print "shutdown time!"
print "shutdown time!"
print "shutdown time!"
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" actual_mode : %d" % (msg.actual_mode))
print(" actual_space : %d" % (msg.actual_space))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
print(" current_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velj[0],msg.current_velj[1],msg.current_velj[2],msg.current_velj[3],msg.current_velj[4],msg.current_velj[5]))
print(" joint_abs : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_abs[0],msg.joint_abs[1],msg.joint_abs[2],msg.joint_abs[3],msg.joint_abs[4],msg.joint_abs[5]))
print(" joint_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_err[0],msg.joint_err[1],msg.joint_err[2],msg.joint_err[3],msg.joint_err[4],msg.joint_err[5]))
print(" target_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_posj[0],msg.target_posj[1],msg.target_posj[2],msg.target_posj[3],msg.target_posj[4],msg.target_posj[5]))
print(" target_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_velj[0],msg.target_velj[1],msg.target_velj[2],msg.target_velj[3],msg.target_velj[4],msg.target_velj[5]))
print(" current_posx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posx[0],msg.current_posx[1],msg.current_posx[2],msg.current_posx[3],msg.current_posx[4],msg.current_posx[5]))
print(" current_velx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velx[0],msg.current_velx[1],msg.current_velx[2],msg.current_velx[3],msg.current_velx[4],msg.current_velx[5]))
print(" task_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.task_err[0],msg.task_err[1],msg.task_err[2],msg.task_err[3],msg.task_err[4],msg.task_err[5]))
print(" solution_space : %d" % (msg.solution_space))
sys.stdout.write(" rotation_matrix : ")
for i in range(0 , 3):
sys.stdout.write( "dim : [%d]"% i)
sys.stdout.write(" [ ")
for j in range(0 , 3):
sys.stdout.write("%d " % msg.rotation_matrix[i].data[j])
sys.stdout.write("] ")
print ##end line
print(" dynamic_tor : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.dynamic_tor[0],msg.dynamic_tor[1],msg.dynamic_tor[2],msg.dynamic_tor[3],msg.dynamic_tor[4],msg.dynamic_tor[5]))
print(" actual_jts : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_jts[0],msg.actual_jts[1],msg.actual_jts[2],msg.actual_jts[3],msg.actual_jts[4],msg.actual_jts[5]))
print(" actual_ejt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ejt[0],msg.actual_ejt[1],msg.actual_ejt[2],msg.actual_ejt[3],msg.actual_ejt[4],msg.actual_ejt[5]))
print(" actual_ett : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ett[0],msg.actual_ett[1],msg.actual_ett[2],msg.actual_ett[3],msg.actual_ett[4],msg.actual_ett[5]))
print(" sync_time : %7.3f" % (msg.sync_time))
print(" actual_bk : %d %d %d %d %d %d" % (msg.actual_bk[0],msg.actual_bk[1],msg.actual_bk[2],msg.actual_bk[3],msg.actual_bk[4],msg.actual_bk[5]))
print(" actual_bt : %d %d %d %d %d " % (msg.actual_bt[0],msg.actual_bt[1],msg.actual_bt[2],msg.actual_bt[3],msg.actual_bt[4]))
print(" actual_mc : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mc[0],msg.actual_mc[1],msg.actual_mc[2],msg.actual_mc[3],msg.actual_mc[4],msg.actual_mc[5]))
print(" actual_mt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mt[0],msg.actual_mt[1],msg.actual_mt[2],msg.actual_mt[3],msg.actual_mt[4],msg.actual_mt[5]))
#print digital i/o
sys.stdout.write(" ctrlbox_digital_input : ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_input[i])
print ##end line
sys.stdout.write(" ctrlbox_digital_output: ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_output[i])
print
sys.stdout.write(" flange_digital_input : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_input[i])
print
sys.stdout.write(" flange_digital_output : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_output[i])
print
#print modbus i/o
sys.stdout.write(" modbus_state : " )
if len(msg.modbus_state) > 0:
for i in range(0 , len(msg.modbus_state)):
sys.stdout.write("[" + msg.modbus_state[i].modbus_symbol)
sys.stdout.write(", %d] " % msg.modbus_state[i].modbus_value)
print
print(" access_control : %d" % (msg.access_control))
print(" homming_completed : %d" % (msg.homming_completed))
print(" tp_initialized : %d" % (msg.tp_initialized))
print(" mastering_need : %d" % (msg.mastering_need))
print(" drl_stopped : %d" % (msg.drl_stopped))
print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
if __name__ == "__main__":
rospy.init_node('dsr_service_motion_simple_py')
rospy.on_shutdown(shutdown)
t1 = threading.Thread(target=thread_subscriber)
t1.daemon = True
t1.start()
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
set_velx(30,20) # set global task speed: 30(mm/sec), 20(deg/sec)
set_accx(60,40) # set global task accel: 60(mm/sec2), 40(deg/sec2)
velx=[50, 50]
accx=[100, 100]
p1= posj(0,0,0,0,0,0) #joint
p2= posj(0.0, 0.0, 90.0, 0.0, 90.0, 0.0) #joint
x1= posx(400, 500, 800.0, 0.0, 180.0, 0.0) #task
x2= posx(400, 500, 500.0, 0.0, 180.0, 0.0) #task
c1 = posx(559,434.5,651.5,0,180,0)
c2 = posx(559,434.5,251.5,0,180,0)
q0 = posj(0,0,0,0,0,0)
q1 = posj(10, -10, 20, -30, 10, 20)
q2 = posj(25, 0, 10, -50, 20, 40)
q3 = posj(50, 50, 50, 50, 50, 50)
q4 = posj(30, 10, 30, -20, 10, 60)
q5 = posj(20, 20, 40, 20, 0, 90)
qlist = [q0, q1, q2, q3, q4, q5]
x1 = posx(600, 600, 600, 0, 175, 0)
x2 = posx(600, 750, 600, 0, 175, 0)
x3 = posx(150, 600, 450, 0, 175, 0)
x4 = posx(-300, 300, 300, 0, 175, 0)
x5 = posx(-200, 700, 500, 0, 175, 0)
x6 = posx(600, 600, 400, 0, 175, 0)
xlist = [x1, x2, x3, x4, x5, x6]
X1 = posx(370, 670, 650, 0, 180, 0)
X1a = posx(370, 670, 400, 0, 180, 0)
X1a2= posx(370, 545, 400, 0, 180, 0)
X1b = posx(370, 595, 400, 0, 180, 0)
X1b2= posx(370, 670, 400, 0, 180, 0)
X1c = posx(370, 420, 150, 0, 180, 0)
X1c2= posx(370, 545, 150, 0, 180, 0)
X1d = posx(370, 670, 275, 0, 180, 0)
X1d2= posx(370, 795, 150, 0, 180, 0)
seg11 = posb(DR_LINE, X1, radius=20)
seg12 = posb(DR_CIRCLE, X1a, X1a2, radius=21)
seg14 = posb(DR_LINE, X1b2, radius=20)
seg15 = posb(DR_CIRCLE, X1c, X1c2, radius=22)
seg16 = posb(DR_CIRCLE, X1d, X1d2, radius=23)
b_list1 = [seg11, seg12, seg14, seg15, seg16]
while not rospy.is_shutdown():
movej(p2, vel=100, acc=100)
movejx(x1, vel=30, acc=60, sol=0)
movel(x2, velx, accx)
movec(c1, c2, velx, accx)
movesj(qlist, vel=100, acc=100)
movesx(xlist, vel=100, acc=100)
move_spiral(rev=9.5,rmax=20.0,lmax=50.0,time=20.0,axis=DR_AXIS_Z,ref=DR_TOOL)
move_periodic(amp =[10,0,0,0,30,0], period=1.0, atime=0.2, repeat=5, ref=DR_TOOL)
moveb(b_list1, vel=150, acc=250, ref=DR_BASE, mod=DR_MV_MOD_ABS)
print 'good bye!'
|
new_jumpcutter.py
|
import argparse
import misc_func
import playlist_list
import threading
import multiprocessing
import neverland
from pytube import Playlist
from tqdm import tqdm
from time import sleep
import os
import gc
import sys
from colorama import init, Fore, Style
init(autoreset=True)
gc.enable()
parser = argparse.ArgumentParser(description='Modifies a video file to play at different speeds when there is sound vs. silence.')
parser.add_argument('-i', type=str, help='the video file you want modified')
parser.add_argument('-u', type=str, help='A youtube url to download and process')
parser.add_argument('-t', type=int, default=int(multiprocessing.cpu_count()/4), help='Number of threads to use !DANGER do not modify if unaware of the implementation within the code')
parser.add_argument('-o', type=str, default="", help="the output file. (optional. if not included, it'll use the input name)")
parser.add_argument('-dd', type=str, help="The directory to save the output to")
parser.add_argument('-p', type=str, help="A youtube playlist url to download and process")
parser.add_argument('--use_playlist_list', type=int, choices=[0, 1], default=0, help="Use Playlist List file")
parser.add_argument('--silent_threshold', type=float, default=0.03, help="the volume amount that frames' audio needs to surpass to be consider \"sounded\". It ranges from 0 (silence) to 1 (max volume)")
parser.add_argument('-sos', type=float, default=1.4, help="the speed that sounded (spoken) frames should be played at. Typically 1.")
parser.add_argument('-sis', type=float, default=20, help="the speed that silent frames should be played at. 999999 for jumpcutting.")
parser.add_argument('--frame_margin', type=float, default=5, help="some silent frames adjacent to sounded frames are included to provide context. How many frames on either the side of speech should be included? That's this variable.")
parser.add_argument('--sample_rate', type=float, default=44100, help="sample rate of the input and output videos")
parser.add_argument('--frame_rate', type=float, help="frame rate of the input and output videos. optional... I try to find it out myself, but it doesn't always work.")
parser.add_argument('--frame_quality', type=int, default=1, help="quality of frames to be extracted from input video. 1 is highest, 31 is lowest, 1 is the default.")
parser.add_argument('--playlist_init', type=int, default=0, help="If using a list of playlists. Define the initial point. In the case of code erroring out. Default is 0. Obviously. This isn't MATLAB you plebs.")
args = parser.parse_args()
#Globals
if args.frame_rate:
FRAME_RATE = args.frame_rate
else:
FRAME_RATE = None
SAMPLE_RATE = args.sample_rate
SILENT_THRESHOLD = args.silent_threshold
FRAME_SPREADAGE = args.frame_margin
NEW_SPEED = [args.sis, args.sos]
FRAME_QUALITY = args.frame_quality
use_playlist_list = args.use_playlist_list
playlist_itterator = args.playlist_init
threads = args.t
global processCount
processCount = 0
global processLock
if not (use_playlist_list):
playlist_list = [[args.dd, args.p]]
else:
playlist_list = playlist_list.playlist_list
pid_itterator = 0
def jumpcutter(pid, INPUT_FILE, DestiD, out, format, tqdm_func, color):
global processCount
global processLock
OUTPUT_FILE = DestiD + '/' + out + '.' + format
if os.path.exists(OUTPUT_FILE):
tqdm_func.write(color + '{} already exists..... Skipping file'.format(out))
pass
else:
neverland.process(pid, 2*threads, INPUT_FILE, OUTPUT_FILE, FRAME_RATE, SAMPLE_RATE, SILENT_THRESHOLD, FRAME_SPREADAGE, NEW_SPEED, FRAME_QUALITY)
tqdm_func.write(color + 'Deleteing Input File for Process {}'.format(pid))
os.remove(INPUT_FILE)
processLock.acquire()
processCount -= 1 #Locks prevent race condition when modifying global var
processLock.release()
tqdm_func.write(color + 'Process {} complete'.format(pid))
if __name__ == '__main__':
global processLock
processLock = threading.Lock()
for ddplaylist in playlist_list[playlist_itterator:]:
playlist = Playlist(ddplaylist[1])
playlist.populate_video_urls()
dd = ddplaylist[0]
if os.path.isdir(dd):
pass
else:
misc_func.createPath(dd)
print(Fore.RED + 'Processing {}'.format(dd))
for video in tqdm(playlist.video_urls):
try:
while processCount >= threads: #Limits Number Of Active threads, only start new thread after old one is finished
sleep(1)
if pid_itterator%2 == 0:
color = Fore.GREEN
elif pid_itterator%2 == 1:
color = Fore.CYAN
tqdm.write(color + 'Downloading File for process {}'.format(pid_itterator))
INPUT_FILE , OUTPUT_FILE, format = misc_func.downloadFile(video)
tqdm.write(color + '{} downloaded'.format(OUTPUT_FILE))
processLock.acquire()
processCount += 1 #Locks prevent race condition when modifying global var
processLock.release()
tqdm.write(color + 'Starting Process for Session {} on process thread {}'.format(pid_itterator, processCount))
P = threading.Thread(target=jumpcutter, args=(pid_itterator, INPUT_FILE, dd, OUTPUT_FILE, format, tqdm, color)) #Using threading instead of multiprocessing, allows global var modification
P.daemon=True
P.start()
except (KeyboardInterrupt, SystemExit):
print(Fore.RED + '\n! Received keyboard interrupt, quitting threads.\n')
sys.exit()
pid_itterator=pid_itterator+1
|
dag_processing.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
import enum
from typing import NamedTuple, Iterable
import psutil
from setproctitle import setproctitle
import six
from six.moves import reload_module
from sqlalchemy import or_
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.settings import Stats
from airflow.models import errors
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.utils import timezone
from airflow.utils.helpers import reap_process_group
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
if six.PY2:
ConnectionError = IOError
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
def __init__(self, dag, pickle_id=None):
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance(object):
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:type safe_mode: bool
:param include_examples: include example DAGs
:type include_examples: bool
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in f.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = list(patterns)
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = NamedTuple('DagParsingStat', [
('file_paths', Iterable[str]),
('done', bool),
('all_files_processed', bool)
])
DagFileStat = NamedTuple('DagFileStat', [
('num_dags', int),
('import_errors', int),
('last_finish_time', datetime),
('last_duration', float),
('run_count', int),
])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
reload_module(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
reload_module(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (max_threads = {}) "
"when using sqlite. So we set parallelism to 1.".format(self._parallelism)
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats = {} # type: dict(str, DagFileStat)
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
self._find_zombies()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
if STORE_SERIALIZED_DAGS:
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.dag import DagModel
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if self.print_stats_interval > 0 and (
timezone.utcnow() -
self.last_stat_print_time).total_seconds() > self.print_stats_interval:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time).total_seconds() if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.error(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied
from test_operator import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_contrib_operator import test_multibox_target_op
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/11517")
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride, pooling_convention='valid',
global_pool=False, p_value=2, count_include_pad=True, tol=None):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool', p_value=p_value, count_include_pad=count_include_pad))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, p_value=p_value, cudnn_off=False,
name='pool', count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, p_value=p_value,
cudnn_off=False, name='pool', count_include_pad=count_include_pad))
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_1d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value, count_include_pad=count_include_pad)
def test_2d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value)
else:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, count_include_pad=count_include_pad)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0, 0)
stride = (1, 1)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value)
else:
if count_include_pad:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False,
count_include_pad=count_include_pad)
else:
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False,
count_include_pad=count_include_pad)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value)
else:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, count_include_pad=count_include_pad)
def test_3d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value, count_include_pad=count_include_pad)
test_1d_pooling('max')
test_1d_pooling('avg', count_include_pad=True)
test_1d_pooling('avg', count_include_pad=False)
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg', count_include_pad=True)
test_2d_pooling('avg', count_include_pad=False)
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
test_3d_pooling('max')
test_3d_pooling('avg', count_include_pad=True)
test_3d_pooling('avg', count_include_pad=False)
test_3d_pooling('sum')
test_3d_pooling('lp', p_value=1)
test_3d_pooling('lp', p_value=2)
test_3d_pooling('lp', p_value=3)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
atol = 1e-3, rtol = 1e-3)
@with_seed()
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
assert_almost_equal(exe_list[ref_idx].outputs[0].asnumpy(), exe.outputs[0].asnumpy(), rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'].asnumpy(), data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
if __name__ == '__main__':
import nose
nose.runmodule()
|
listen.py
|
# Script baseado no modelo proposto na página Black Hat Python
# http://bt3gl.github.io/black-hat-python-networking-the-socket-module.html
import socket
import threading
BIND_IP = '0.0.0.0'
BIND_PORT = 80
def handle_client(client_socket):
request = client_socket.recv(1024)
print "[*] Received: " + request
client_socket.send('ACK')
client_socket.close()
def tcp_server():
server = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
server.bind(( BIND_IP, BIND_PORT))
server.listen(5)
print"[*] Listening on %s:%d" % (BIND_IP, BIND_PORT)
while 1:
client, addr = server.accept()
print "[*] Accepted connection from: %s:%d" %(addr[0], addr[1])
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
if __name__ == '__main__':
tcp_server()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = import_helper.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
threading_helper.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
gc.collect() # For PyPy or other GCs.
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(os_helper.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = os_helper.TESTFN
self.addCleanup(os_helper.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, encoding="utf-8") as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = os_helper.TESTFN
self.addCleanup(os_helper.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, encoding="utf-8") as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with os_helper.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w', encoding="utf-8") as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with import_helper.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_nested_queue(self):
a = self.list() # Test queue inside list
a.append(self.Queue())
a[0].put(123)
self.assertEqual(a[0].get(), 123)
b = self.dict() # Test queue inside dict
b[0] = self.Queue()
b[0].put(456)
self.assertEqual(b[0].get(), 456)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
gc.collect() # For PyPy or other GCs.
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with warnings_helper.check_warnings(
('unclosed running multiprocessing pool', ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Verify __repr__
self.assertIn(sms.name, str(sms))
self.assertIn(str(sms.size), str(sms))
# Test pickling
sms.buf[0:6] = b'pickle'
pickled_sms = pickle.dumps(sms)
sms2 = pickle.loads(pickled_sms)
self.assertEqual(sms.name, sms2.name)
self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle')
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify __repr__
self.assertIn(sl.shm.name, str(sl))
self.assertIn(str(list(sl)), str(sl))
# Index Out of Range (get)
with self.assertRaises(IndexError):
sl[7]
# Index Out of Range (set)
with self.assertRaises(IndexError):
sl[7] = 2
# Assign value without format change (str -> str)
current_format = sl._get_packing_format(0)
sl[0] = 'howdy'
self.assertEqual(current_format, sl._get_packing_format(0))
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
gc.collect() # For PyPy or other GCs.
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
gc.collect() # For PyPy or other GCs.
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
gc.collect() # For PyPy or other GCs.
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with threading_helper.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in not_exported are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
not_exported=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
led_master.py
|
#!/usr/bin/env python 2.7
from time import strftime
from blinkybase import BlinkyBase
from rgbmatrix import graphics
import time
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
import datetime
# import feedparser, bitly_api
import urllib2
import json
import os
import threading
import random
import pickle
# from workers import *
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('MASTER')
logger.setLevel(logging.INFO)
class RunText(BlinkyBase):
def __init__(self, *args, **kwargs):
super(RunText, self).__init__(*args, **kwargs)
logger.warning('Init: LED Loop')
def Run(self):
offscreenCanvas = self.matrix.CreateFrameCanvas()
#Format
time_font = graphics.Font()
time_font.LoadFont("./fonts/4x6.bdf")
time_color = graphics.Color(157, 31, 186)
count_font = graphics.Font()
count_font.LoadFont("./fonts/4x6.bdf")
count_color = graphics.Color(198, 29, 41)
line_a_color = graphics.Color(27, 93, 198)
count_label_font = graphics.Font()
count_label_font.LoadFont("./fonts/4x6.bdf")
count_label_color = graphics.Color(198, 29, 41)
count_wx_font = graphics.Font()
count_wx_font.LoadFont("./fonts/4x6.bdf")
count_wx_color = graphics.Color(204, 171, 40)
ticker_font = graphics.Font()
ticker_font.LoadFont("./fonts/8x13.bdf")
ticker_color = graphics.Color(99, 127, 115)
self.pos = offscreenCanvas.width
led_width = offscreenCanvas.width
self.curr_temp = ''
self.curr_tweet = ''
while True:
# try:
# self.weather = pickle.load(open('weather.pickle', 'rb'))
# logger.debug('Weather Pickle: %s', self.weather)
# self.curr_temp = self.weather['curr_temp']
# self.twitter = pickle.load(open('twitter.pickle', 'rb'))
# logger.debug('Twitter Pickle: %s', self.twitter)
# self.curr_tweet = self.twitter["curr_tweet"]
# except Exception as err:
# logger.error('Pickle Error: %s', err)
offscreenCanvas.Clear()
self.clock = time_now.value
self.count_down = count_down.value
graphics.DrawText(offscreenCanvas, count_font, 1, 31, count_color, self.count_down)
graphics.DrawLine(offscreenCanvas, 0, 18, 128, 18, line_a_color)
graphics.DrawLine(offscreenCanvas, 68, 19, 68, 32, line_a_color)
graphics.DrawText(offscreenCanvas, time_font, 71, 31, time_color, self.clock)
# graphics.DrawText(offscreenCanvas, count_label_font, 1, 25, count_label_color, 'NOT MY PRESIDENT!')
# graphics.DrawText(offscreenCanvas, count_wx_font, 104, 25, count_wx_color, self.curr_temp)
# Top Twitter Ticker
# len = graphics.DrawText(offscreenCanvas, ticker_font, self.pos, 14, ticker_color, self.curr_tweet)
# self.pos -= 1
# if (self.pos + len < 0):
# self.pos = offscreenCanvas.width
time.sleep(0.025)
offscreenCanvas = self.matrix.SwapOnVSync(offscreenCanvas)
def led_update():
parser = RunText()
if (not parser.process()):
parser.print_help()
class led_clock():
def __init__(self, *args, **kwargs):
while True:
dt = datetime.datetime
t = dt.now()
time_now.value = t.strftime('%m/%d/%y %H:%M')
#print(time_now.value)
time.sleep(5)
class countdown_clock():
def __init__(self, *args, **kwargs):
while True:
dt = datetime.datetime
count = dt(2021,1,21,9) - dt.now()
count_down.value = '%dDays %dH %dM' % (count.days, count.seconds/3600, count.seconds%3600/60)
#print(count_down.value)
time.sleep(5)
def main():
pass
if __name__ == "__main__":
try:
print 'Work Started: PID %d' % os.getpid()
jobs = []
lock = Lock()
#Process Variables
time_now = Array('c', b'88/88/88 88:88' ,lock=lock)
count_down = Array('c', b'8888Days 88H 88M' ,lock=lock)
#Start TWITTER WORKER
# rt = Process(target=twitter.tweet_query)
# jobs.append(rt)
# rt.start()
#Start WEATHER WORKER
# rt = Process(target=weather.get_temp)
# jobs.append(rt)
# rt.start()
#Start LED_CLOCK LOOP
rt = Process(target=led_clock)
jobs.append(rt)
rt.start()
#Start LED_CLOCK LOOP
rt = Process(target=countdown_clock)
jobs.append(rt)
rt.start()
#Start LED UPDATE LOOP
rt = Process(target=led_update)
jobs.append(rt)
rt.start()
#JOIN ALL JOBS
for j in jobs:
j.join()
print(j)
except KeyboardInterrupt:
for j in jobs:
j.terminate()
time.sleep(2)
print(j, j.is_alive())
|
test_dynamic_routing.py
|
import logging
import threading
import time
import pytest
from google.protobuf import json_format
from jina.helper import random_identity
from jina.parsers import set_pea_parser
from jina.peapods.zmq import Zmqlet, AsyncZmqlet, ZmqStreamlet
from jina.proto import jina_pb2
from jina.types.message import Message
from jina.types.routing.table import RoutingTable
def get_args():
"""Calculates a fresh set of ports with every call implicitly."""
return set_pea_parser().parse_args(
[
'--host-in',
'0.0.0.0',
'--host-out',
'0.0.0.0',
'--socket-in',
'ROUTER_BIND',
'--socket-out',
'DEALER_CONNECT',
'--timeout-ctrl',
'-1',
'--dynamic-routing-out',
]
)
def callback(msg):
pass
def test_simple_dynamic_routing_zmqlet():
args1 = get_args()
args2 = get_args()
logger = logging.getLogger('zmq-test')
with Zmqlet(args1, logger) as z1, Zmqlet(args2, logger) as z2:
assert z1.msg_sent == 0
assert z1.msg_recv == 0
assert z2.msg_sent == 0
assert z2.msg_recv == 0
req = jina_pb2.RequestProto()
req.request_id = random_identity()
d = req.data.docs.add()
d.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
routing_pb = jina_pb2.RoutingTableProto()
routing_table = {
'active_pod': 'pod1',
'pods': {
'pod1': {
'host': '0.0.0.0',
'port': args1.port_in,
'expected_parts': 0,
'out_edges': [{'pod': 'pod2'}],
},
'pod2': {
'host': '0.0.0.0',
'port': args2.port_in,
'expected_parts': 1,
'out_edges': [],
},
},
}
json_format.ParseDict(routing_table, routing_pb)
msg.envelope.routing_table.CopyFrom(routing_pb)
z2.recv_message(callback)
assert z2.msg_sent == 0
assert z2.msg_recv == 0
z1.send_message(msg)
z2.recv_message(callback)
assert z1.msg_sent == 1
assert z1.msg_recv == 0
assert z2.msg_sent == 0
assert z2.msg_recv == 1
def test_double_dynamic_routing_zmqlet():
args1 = get_args()
args2 = get_args()
args3 = get_args()
logger = logging.getLogger('zmq-test')
with Zmqlet(args1, logger) as z1, Zmqlet(args2, logger) as z2, Zmqlet(
args3, logger
) as z3:
assert z1.msg_sent == 0
assert z2.msg_sent == 0
assert z3.msg_sent == 0
req = jina_pb2.RequestProto()
req.request_id = random_identity()
d = req.data.docs.add()
d.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
routing_table = {
'active_pod': 'pod1',
'pods': {
'pod1': {
'host': '0.0.0.0',
'port': args1.port_in,
'expected_parts': 0,
'out_edges': [{'pod': 'pod2'}, {'pod': 'pod3'}],
},
'pod2': {
'host': '0.0.0.0',
'port': args2.port_in,
'expected_parts': 1,
'out_edges': [],
},
'pod3': {
'host': '0.0.0.0',
'port': args3.port_in,
'expected_parts': 1,
'out_edges': [],
},
},
}
msg.envelope.routing_table.CopyFrom(RoutingTable(routing_table).proto)
number_messages = 100
trips = 10
for i in range(trips):
for j in range(number_messages):
z1.send_message(msg)
time.sleep(1)
for i in range(number_messages):
z2.recv_message(callback)
z3.recv_message(callback)
total_number_messages = number_messages * trips
assert z1.msg_sent == 2 * total_number_messages
assert z2.msg_sent == 0
assert z2.msg_recv == total_number_messages
assert z3.msg_sent == 0
assert z3.msg_recv == total_number_messages
async def send_msg(zmqlet, msg):
await zmqlet.send_message(msg)
@pytest.mark.asyncio
async def test_double_dynamic_routing_async_zmqlet():
args1 = get_args()
args2 = get_args()
args3 = get_args()
logger = logging.getLogger('zmq-test')
with AsyncZmqlet(args1, logger) as z1, AsyncZmqlet(
args2, logger
) as z2, AsyncZmqlet(args3, logger) as z3:
assert z1.msg_sent == 0
assert z2.msg_sent == 0
assert z3.msg_sent == 0
req = jina_pb2.RequestProto()
req.request_id = random_identity()
d = req.data.docs.add()
d.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
routing_pb = jina_pb2.RoutingTableProto()
routing_table = {
'active_pod': 'pod1',
'pods': {
'pod1': {
'host': '0.0.0.0',
'port': args1.port_in,
'expected_parts': 0,
'out_edges': [{'pod': 'pod2'}, {'pod': 'pod3'}],
},
'pod2': {
'host': '0.0.0.0',
'port': args2.port_in,
'expected_parts': 1,
'out_edges': [],
},
'pod3': {
'host': '0.0.0.0',
'port': args3.port_in,
'expected_parts': 1,
'out_edges': [],
},
},
}
json_format.ParseDict(routing_table, routing_pb)
msg.envelope.routing_table.CopyFrom(routing_pb)
await send_msg(z1, msg)
await z2.recv_message(callback)
await z3.recv_message(callback)
assert z1.msg_sent == 2
assert z1.msg_recv == 0
assert z2.msg_sent == 0
assert z2.msg_recv == 1
assert z3.msg_sent == 0
assert z3.msg_recv == 1
def test_double_dynamic_routing_zmqstreamlet():
args1 = get_args()
args2 = get_args()
args3 = get_args()
logger = logging.getLogger('zmq-test')
with ZmqStreamlet(args1, logger) as z1, ZmqStreamlet(
args2, logger
) as z2, ZmqStreamlet(args3, logger) as z3:
assert z1.msg_sent == 0
assert z2.msg_sent == 0
assert z3.msg_sent == 0
req = jina_pb2.RequestProto()
req.request_id = random_identity()
d = req.data.docs.add()
d.tags['id'] = 2
msg = Message(None, req, 'tmp', '')
routing_pb = jina_pb2.RoutingTableProto()
routing_table = {
'active_pod': 'pod1',
'pods': {
'pod1': {
'host': '0.0.0.0',
'port': args1.port_in,
'expected_parts': 0,
'out_edges': [{'pod': 'pod2'}, {'pod': 'pod3'}],
},
'pod2': {
'host': '0.0.0.0',
'port': args2.port_in,
'expected_parts': 1,
'out_edges': [],
},
'pod3': {
'host': '0.0.0.0',
'port': args3.port_in,
'expected_parts': 1,
'out_edges': [],
},
},
}
json_format.ParseDict(routing_table, routing_pb)
msg.envelope.routing_table.CopyFrom(routing_pb)
for pea in [z1, z2, z3]:
thread = threading.Thread(target=pea.start, args=(callback,))
thread.daemon = True
thread.start()
number_messages = 1000
for i in range(number_messages):
z1.send_message(msg)
time.sleep(5)
assert z1.msg_sent == 2 * number_messages
assert z1.msg_recv == 0
assert z2.msg_sent == 0
assert z2.msg_recv == number_messages
assert z3.msg_sent == 0
assert z3.msg_recv == number_messages
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 13077
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_text_messages.py
|
def test_text_message_of_length_1(session):
conn, server = session
server.send_message_to_all('$')
assert conn.recv() == '$'
def test_text_message_of_length_125B(session):
conn, server = session
msg = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqr125'
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_of_length_126B(session):
conn, server = session
msg = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrs126'
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_of_length_127B(session):
conn, server = session
msg = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrst127'
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_of_length_208B(session):
conn, server = session
msg = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvw208'
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_of_length_1251B(session):
conn, server = session
msg = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\
'abcdefghijklmnopqr125'*10)+'1'
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_of_length_68KB(session):
conn, server = session
msg = '$'+('a'*67993)+'68000'+'^'
assert len(msg) == 68000
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_of_length_1500KB(session):
""" An enormous message (well beyond 65K) """
conn, server = session
msg = '$'+('a'*1499991)+'1500000'+'^'
assert len(msg) == 1500000
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_with_unicode_characters(session):
conn, server = session
msg = '$äüö^'
server.send_message_to_all(msg)
assert conn.recv() == msg
def test_text_message_stress_bursts(session):
""" Scenario: server sends multiple different message to the same conn
at once """
from threading import Thread
NUM_THREADS = 100
MESSAGE_LEN = 1000
conn, server = session
messages_received = []
# Threads receing
threads_receiving = []
for i in range(NUM_THREADS):
th = Thread(
target=lambda fn: messages_received.append(fn()),
args=(conn.recv,)
)
th.daemon = True
threads_receiving.append(th)
# Threads sending different characters each of them
threads_sending = []
for i in range(NUM_THREADS):
message = chr(i)*MESSAGE_LEN
th = Thread(
target=server.send_message_to_all,
args=(message,)
)
th.daemon = True
threads_sending.append(th)
# Run scenario
for th in threads_receiving:
th.start()
for th in threads_sending:
th.start()
# Wait for all threads to finish
print('WAITING FOR THREADS TO FINISH')
for th in threads_receiving:
th.join()
for th in threads_sending:
th.join()
for message in messages_received:
first_char = message[0]
assert message.count(first_char) == len(message)
print()
|
util.py
|
import os
import shutil
import sys
import ctypes
from pathlib import Path
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
print("\nPlease restart with Python 3.6+\n")
print("Current Python version:", sys.version_info)
exit(-1)
tc_core = None
def in_docker():
if os.environ.get("TI_IN_DOCKER", "") == "":
return False
else:
return True
def import_tc_core(tmp_dir=None):
global tc_core
if get_os_name() != 'win':
old_flags = sys.getdlopenflags()
sys.setdlopenflags(258) # 258 = RTLD_NOW | RTLD_GLOBAL
else:
pyddir = os.path.join(package_root(), 'lib')
os.environ['PATH'] += ';' + pyddir
try:
import taichi_core as core
except Exception as e:
if isinstance(e, ImportError):
print("Share object taichi_core import failed. If you are on Windows, please consider installing \"Microsoft Visual C++ Redistributable\" (https://aka.ms/vs/16/release/vc_redist.x64.exe)")
raise e
tc_core = core
if get_os_name() != 'win':
sys.setdlopenflags(old_flags)
lib_dir = os.path.join(package_root(), 'lib')
core.set_lib_dir(locale_encode(lib_dir))
if tmp_dir is not None:
core.set_tmp_dir(locale_encode(tmp_dir))
def locale_encode(s):
try:
import locale
encoding = locale.getdefaultlocale()[1]
except:
encoding = 'utf8'
return s.encode(encoding)
def is_ci():
return os.environ.get('TI_CI', '') == '1'
def package_root():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
def is_release():
return os.environ.get('TAICHI_REPO_DIR', '') == ''
from colorama import Fore, Back, Style
def get_core_shared_object():
if is_release():
directory = os.path.join(package_root(), 'lib')
else:
directory = get_bin_directory()
return os.path.join(directory, 'libtaichi_core.so')
def get_repo():
from git import Repo
import taichi as tc
repo = Repo(tc.get_repo_directory())
return repo
def print_red_bold(*args, **kwargs):
print(Fore.RED + Style.BRIGHT, end='')
print(*args, **kwargs)
print(Style.RESET_ALL, end='')
def format(all=False):
import os
import taichi as tc
from yapf.yapflib.yapf_api import FormatFile
repo = get_repo()
print('Code formatting ...')
if all:
directories = ['taichi', 'tests', 'examples', 'misc', 'python']
files = []
for d in directories:
files += list(Path(os.path.join(tc.get_repo_directory(), d)).rglob('*'))
else:
files = repo.index.diff('HEAD')
files = list(
map(lambda x: os.path.join(tc.get_repo_directory(), x.a_path), files))
for fn in map(str, files):
if fn.endswith('.py'):
print(fn, '...')
FormatFile(
fn,
in_place=True,
style_config=os.path.join(tc.get_repo_directory(), 'misc',
'.style.yapf'))
if fn.endswith('.cpp') or fn.endswith('.h'):
print(fn, '...')
os.system('clang-format-6.0 -i -style=file {}'.format(fn))
print('Formatting done!')
from taichi.misc.settings import get_output_directory, get_build_directory, get_bin_directory, get_repo_directory, get_runtime_directory
from taichi.misc.util import get_os_name, get_unique_task_id
create_sand_box_on_windows = True
def build():
assert False
tmp_cwd = os.getcwd()
bin_dir = get_build_directory()
try:
os.mkdir(bin_dir)
except:
pass
os.chdir(bin_dir)
flags = ' -DPYTHON_EXECUTABLE:FILEPATH="{}"'.format(sys.executable)
print('Running cmake...')
if is_ci():
print(' Note: building for CI.')
if get_os_name() == 'win':
flags += ' -G "Visual Studio 15 Win64"'
cmake_ret = os.system('cmake .. ' + flags)
if cmake_ret != 0:
print(' Error: CMake failed.')
exit(-1)
import multiprocessing
print('Building taichi...')
num_make_threads = min(20, multiprocessing.cpu_count())
if get_os_name() == 'win':
make_ret = os.system(
"msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln")
else:
make_ret = os.system('make -j {}'.format(num_make_threads))
if make_ret != 0:
print(' Error: Build failed.')
exit(-1)
os.chdir(tmp_cwd)
def prepare_sandbox(src):
global g_tmp_dir
assert os.path.exists(src)
import atexit
import shutil
from tempfile import mkdtemp
tmp_dir = mkdtemp(prefix='taichi-')
atexit.register(shutil.rmtree, tmp_dir)
print(f'[Taichi] preparing sandbox at {tmp_dir}')
dest = os.path.join(tmp_dir, 'taichi_core.so')
shutil.copy(src, dest)
os.mkdir(os.path.join(tmp_dir, 'runtime/'))
print(f'[Taichi] sandbox prepared')
return tmp_dir
if is_release():
print("[Taichi] mode=release")
sys.path.append(os.path.join(package_root(), 'lib'))
if get_os_name() != 'win':
link_src = os.path.join(package_root(), 'lib', 'taichi_core.so')
link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so')
# For llvm jit to find the runtime symbols
if not os.path.exists(link_dst):
os.symlink(link_src, link_dst)
import_tc_core()
if get_os_name() != 'win':
dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_GLOBAL)
tc_core.set_python_package_dir(package_root())
os.makedirs(tc_core.get_repo_dir(), exist_ok=True)
else:
print("[Taichi] mode=development")
if get_os_name() == 'osx':
bin_dir = get_bin_directory()
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory()
lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib')
tmp_cwd = os.getcwd()
tmp_dir = prepare_sandbox(lib_path)
os.chdir(tmp_dir)
sys.path.append(tmp_dir)
import taichi_core as tc_core
os.chdir(tmp_cwd)
elif get_os_name() == 'linux':
bin_dir = get_bin_directory()
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/'
else:
os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/'
lib_path = os.path.join(bin_dir, 'libtaichi_core.so')
assert os.path.exists(lib_path)
tmp_cwd = os.getcwd()
tmp_dir = prepare_sandbox(lib_path)
os.chdir(tmp_dir)
sys.path.append(tmp_dir)
try:
import_tc_core(tmp_dir)
except Exception as e:
from colorama import Fore, Back, Style
print_red_bold("Taichi core import failed: ", end='')
print(e)
exit(-1)
os.chdir(tmp_cwd)
elif get_os_name() == 'win':
bin_dir = get_bin_directory()
dll_path1 = os.path.join(bin_dir, 'RelWithDebInfo', 'taichi_core.dll')
dll_path2 = os.path.join(bin_dir, 'libtaichi_core.dll')
assert os.path.exists(dll_path1) and not os.path.exists(dll_path2)
# On windows when an dll/pyd is loaded, we can not write to it any more
old_wd = os.getcwd()
os.chdir(bin_dir)
if create_sand_box_on_windows:
# Create a sandbox for separated core lib development and loading
dir = os.path.join(get_output_directory(), 'tmp', get_unique_task_id())
lib_dir = os.path.join(get_repo_directory(), 'external', 'lib')
os.environ['PATH'] += ';' + lib_dir
os.makedirs(dir)
if os.path.exists(dll_path1):
shutil.copy(dll_path1, os.path.join(dir, 'taichi_core.pyd'))
else:
shutil.copy(dll_path2, os.path.join(dir, 'taichi_core.pyd'))
os.environ['PATH'] += ';' + dir
sys.path.append(dir)
else:
shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd'))
sys.path.append(bin_dir)
try:
import taichi_core as tc_core
except Exception as e:
print(e)
print()
print('Is taichi\external\lib correctly set to branch msvc or mingw?')
print()
raise e
os.chdir(old_wd)
log_level = os.environ.get('TI_LOG_LEVEL', '')
if log_level:
tc_core.set_logging_level(log_level)
def get_dll_name(name):
if get_os_name() == 'linux':
return 'libtaichi_%s.so' % name
elif get_os_name() == 'osx':
return 'libtaichi_%s.dylib' % name
elif get_os_name() == 'win':
return 'taichi_%s.dll' % name
else:
assert False, "Unknown OS"
def load_module(name, verbose=True):
if verbose:
print('Loading module', name)
try:
if get_os_name() == 'osx':
mode = ctypes.RTLD_LOCAL
else:
mode = ctypes.RTLD_GLOBAL
if '.so' in name:
ctypes.PyDLL(name, mode=mode)
else:
ctypes.PyDLL(
os.path.join(get_repo_directory(), 'build', get_dll_name(name)),
mode=mode)
except Exception as e:
print(Fore.YELLOW +
"Warning: module [{}] loading failed: {}".format(name, e) +
Style.RESET_ALL)
def at_startup():
if not is_release():
output_dir = get_output_directory()
if not os.path.exists(output_dir):
print('Making output directory')
os.mkdir(output_dir)
tc_core.set_core_state_python_imported(True)
def start_memory_monitoring(output_fn, pid=-1, interval=1):
# removing dependency on psutil
return
import os, psutil, time
if pid == -1:
pid = os.getpid()
import multiprocessing
def task():
with open(output_fn, 'w') as f:
process = psutil.Process(pid)
while True:
try:
mem = process.memory_info().rss
except:
mem = -1
time.sleep(interval)
print(time.time(), mem, file=f)
f.flush()
proc = multiprocessing.Process(target=task, daemon=True)
proc.start()
def require_version(major, minor=None, patch=None):
versions = [
int(tc_core.get_version_major()),
int(tc_core.get_version_minor()),
int(tc_core.get_version_patch()),
]
match = major == versions[0] and (minor < versions[1] or minor == versions[1] and patch <= versions[2])
if match:
return
else:
print("Taichi version mismatch. required >= {}.{}.{}".format(major, minor, patch))
print("Installed =", tc_core.get_version_string())
raise Exception("Taichi version mismatch")
at_startup()
device_string = 'cpu only' if not tc_core.with_cuda() else 'cuda {}'.format(
tc_core.cuda_version())
print(f'[Taichi] version {tc_core.get_version_string()}, {device_string}, commit {tc_core.get_commit_hash()[:8]}, python {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}')
if not is_release():
tc_core.set_core_trigger_gdb_when_crash(True)
|
threaded.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
import time
import os.path
import math
import re
import argparse
import traceback
import json
import bz2
import gzip
from nltk.tokenize import TweetTokenizer
from flashtext import KeywordProcessor
import hashlib
def makedirs(fld):
if not os.path.exists(fld):
os.makedirs(fld)
PICKLE_MAX_LEN = 1e4
TAG_COMMENT = 't1_'
TAG_SUBMISSION = 't3_'
dontuse = '__dontuse__'
url_str = '__url__'
parser = argparse.ArgumentParser()
parser.add_argument("dump_name", help="YYYY-MM, dumped files to be loaded")
parser.add_argument("--bl_words", help="list of offensive words, to avoid in responses")
parser.add_argument("--ignore_keys", default=False, type=bool, help="If true ignore any keys provided as arguments")
parser.add_argument("--keep_keys", help="hashes of instances to keep")
parser.add_argument("--discard_tgt_keys", help="hashes of targets to discard")
parser.add_argument("--freq_words", help="words sorted by their corpus frequencies")
parser.add_argument("--bl_subreddits", help="blocklist of offensive subreddits")
parser.add_argument("--wl_subreddits", help="whitelist of relatively safe subreddits")
parser.add_argument("--reddit_input", default="d:/data/reddit/bz2/", help="Location of the input reddit data (bz2 files)")
parser.add_argument("--reddit_output", default="d:/data/reddit/", help="Location of the output reddit data (conversations)")
parser.add_argument("--max_len", default=30, type=int)
# 30 words means roughly 70 characters on average for Reddit
parser.add_argument("--max_len_type", default='w') # w for words, c for chars
parser.add_argument("--min_depth", default=2, type=int)
parser.add_argument("--max_depth", default=10, type=int)
parser.add_argument("--min_score", default=0, type=int)
parser.add_argument("--use_title", default=1, type=int)
parser.add_argument("--leaves_only", default=0, type=int)
parser.add_argument("--split_size", default=int(5e5), type=int)
parser.add_argument("--task", default='conv')
parser.add_argument("--parallel", default=False, type=bool)
parser.add_argument("--pre_tok", default=False, type=bool, help="whether to tokenize during the extract step")
parser.add_argument("--clean", default=False, type=bool, help="apply some filters to significantly reduce number of instances")
args = parser.parse_args()
print("Args: %s" % args, file=sys.stderr)
fields_subm = [ "id", "score", "num_comments", "domain", "permalink", "title" ]
fields_comm = [ "id", "author", "parent_id", "link_id", "score", "n_char", "body"]
bl_words = KeywordProcessor()
bl_subreddits = {}
wl_subreddits = {}
keys = {}
keys_rm = {}
def get_submission_id(submission):
return TAG_SUBMISSION + submission["id"]
def get_comment_id(comment):
return TAG_COMMENT + comment["id"]
def norm_sentence(txt, is_extract):
if is_extract:
return minimal_norm_sentence(txt)
else:
return gpt_norm_sentence(txt)
def minimal_norm_sentence(txt):
txt = txt.replace(chr(92),'') # chr(92) = '\'. as twitter has 'b\/c' rather than 'b/c'
txt = txt.replace('\n', ' ')
txt = txt.replace('\r', ' ')
txt = txt.replace('\t', ' ')
#print ("Tokenized: [%s]" % txt, file=sys.stderr)
return txt
def gpt_norm_sentence(txt):
# url and tag
words = []
for word in txt.split():
if word[0] == '#': # don't allow tag
continue
i = word.lower().find('http')
if i >= 0:
word = word[:i] + ' ' + '__url__'
words.append(word.strip())
txt = ' '.join(words)
# remove illegal char
txt = txt.replace(chr(92),'') # chr(92) = '\'. as twitter has 'b\/c' rather than 'b/c'
txt = txt.replace("b/c","because").replace('j/k','just kidding').replace('w/o','without').replace('w/','with')
txt = re.sub('__mention__','MENTION',txt)
txt = re.sub('__url__','URL',txt)
txt = re.sub(r"[^A-Za-z0-9()\[\]:,.!?'“” ]", " ", txt)
txt = re.sub('MENTION','__mention__',txt)
txt = re.sub('URL','__url__',txt)
tokenizer = TweetTokenizer(preserve_case=True)
txt = ' ' + ' '.join(tokenizer.tokenize(txt)) + ' '
# remove un-necessary space
return ' '.join(txt.split())
def extract_submissions(fld_bz2, fld_split, which, size=2e5):
path_in = fld_bz2 + '/RS_%s.bz2'%args.dump_name
n = 0
m = 0
n2 = 0
m2 = 0
sub = 0
sid2 = []
sids = []
lines = []
try:
submissions = dict()
subreddit = reddit.subreddit(which)
for submission2 in subreddit.top(limit=100000):
try:
n += 1
#if n%1e4 == 0:
#print('[%s] selected %.3fM from %.2fM submissions'%(
#args.dump_name, m/1e6, n/1e6))
try:
submission = {}
submission["id"] = submission2.id
submission["score"] = submission2.score
submission["domain"] = submission2.domain
submission["permalink"] = submission2.permalink
submission["title"] = submission2.title
submission["num_comments"] = submission2.num_comments
if int(submission['num_comments']) >= 2: # filter 1
submission['title'] = norm_sentence(submission['title'], True)
submission = submission
submissions[get_submission_id(submission)] = submission
lines.append('\t'.join([str(submission[k]) for k in fields_subm]))
m += 1
sid2.append(get_submission_id(submission))
if len(sid2) == size:
#print('writing submissions_sub%i'%sub)
sids.append(set(sid2))
with open(fld_split + '/rs_sub%i.tsv'%sub, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
sid2 = []
lines = []
except Exception as e:
print(e)
traceback.print_exc()
continue
lines2 = []
#for sub in range(n_sub):
# open(fld_split + '/rc_sub%i.tsv'%sub, 'w')
comments = dict()
for top_level_comment in submission2.comments:
try:
n2 += 1
comment = {}
comment["id"] = top_level_comment.id
try:
if top_level_comment.author is not None:
comment["author"] = top_level_comment.author.name
else:
comment["author"] = "None"
except:
comment["author"] = "None"
comment["parent_id"] = top_level_comment.parent_id
try:
comment["link_id"] = top_level_comment.link_id
comment["score"] = top_level_comment.score
comment["body"] = top_level_comment.body
except:
comment["link_id"] = comment["parent_id"]
comment["score"] = 0
comment["body"] = ""
#if args.keep_keys:
# k = '\t'.join([comment['link_id'], get_comment_id(comment), 'dep'])
# if k not in keys.keys():
# continue
if comment['body'] != '[deleted]': # filter 1
#if '>' in comment['body'] or '>' in comment['body']: # filter 3: '>' means '>'
# continue
#sid = comment['link_id']
comment['n_char'] = len(comment['body'])
comment['body'] = norm_sentence(comment['body'], True)
#print(comment)
if len(comment['body'].split()) >= 2: # filter 2
comment = comment
comments[get_comment_id(comment)] = comment
lines2.append('\t'.join([str(comment[k]) for k in fields_comm]))
m2 += 1
#break
except Exception as e:
print(e)
traceback.print_exc()
sorted_id = sorted([(
comments[cid]['link_id'],
comments[cid]['parent_id'],
cid
) for cid in comments])
n = len(comments)
#print('total comments: %i'%n)
i = 0
m = 0
lines = []
sum_resp_len = 0
skip_id = {}
if args.leaves_only:
for _, pid, _ in sorted_id:
skip_id[pid] = 1
#print("leaves ratio : %f" % (len(skip_id) / len(sorted_id)), file=sys.stderr)
for sid, pid, cid in sorted_id:
i += 1
if i%1e5 == 0:
#print('selected %.2fM from %.1f/%.1fM comments'%(m/1e6, i/1e6, n/1e6), file=sys.stderr)
if len(lines) > 0:
with open(path_out, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
lines = []
subreddit = ''
domain = ''
if sid in submissions.keys():
subreddit = submissions[sid]['permalink'].split('/')[2].lower()
domain = submissions[sid]['domain'].lower()
info = subreddit + '\t' + domain
#if args.bl_subreddits:
# if not subreddit:
#print("skip\tmissing\t%s\tN/A\tmissing submission: %s" % (info, sid), file=sys.stderr)
# continue
# if subreddit in bl_subreddits:
#print("skip\tbad_subreddit\t%s\tN/A\toffensive subreddit: %s" % (info, subreddit), file=sys.stderr)
# continue
comment = comments[cid]
if comment['score'] == 'None':
score = 0
else:
score = int(comment['score'])
if score < args.min_score: # filter 1
#print("skip\tlow_score\t%s\t%s\tscore %d < %d" % (info, comment['body'], score, args.min_score), file=sys.stderr)
continue
txts = []
for c in comments:
txts.append(comments[c]['body'])
#print(txts)
#txts = get_convo(sid, cid, cid, submissions, comments) # filter 2
#print(len(txts))
if len(txts) < args.min_depth: # filter 3
#print("skip\tmin_depth\t%s\t%s\tdepth %d < %d: %s" % (info, comment['body'], len(txts), args.min_depth, "|".join(txts)), file=sys.stderr)
continue
for i in range(len(txts)):
txts[i] = norm_sentence(txts[i], False)
if args.leaves_only and args.clean:
sc = '1.0'
skip_target = False
if args.discard_tgt_keys:
tgt_h = hashlib.sha224(txts[i].encode("utf-8")).hexdigest()
if tgt_h in keys_rm.keys():
skip_target = True
if bl_words.extract_keywords(txts[i]) or skip_target:
sc = '0.0'
txts[i] = sc + ' ' + txts[i]
src = ' EOS '.join(txts[:-1])
tgt = txts[-1]
header = ','.join([sid, pid, cid])
lines.append(header + '\t' + src + '\t' + tgt)
sum_resp_len += len(tgt.split())
m += 1
#avg_len = sum_resp_len/m
with open(fld_split + '/%s.tsv'%args.dump_name, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
#print('finally selected %i/%i'%(m, n))#, avg_len))
with open(fld_split + '/rc_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
#print(lines2[sub])
f.write('\n'.join(lines2))
except Exception as e:
print(e)
traceback.print_exc()
#sids, ms, ns, mc, ns = extract_submissions(fld_root_in, fld_split, size=args.split_size)
#mc, nc = extract_comments(fld_root_in, fld_split, sids)
#with open(fld_split + '/stat.tsv', 'a') as f:
# f.write('\t'.join(map(str, [args.dump_name, m2, n2, m, n])) + '\n')
#print('extract_comments done.\n')
#return m, n
#print('writing submissions_sub%i'%sub)
sids.append(set(sid))
with open(fld_split + '/rs_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines))
lines = []
sub += 1
except Exception as e:
print(e)
print('extract_submissions done.\n')
return sids, m, n, m2, n2
def extract_comments(fld_bz2, fld_split, sids):
path_in = fld_bz2 + '/RC_%s.bz2'%args.dump_name
n = 0
m = 0
n_sub = len(sids)
lines = [[] for i in range(n_sub)]
#for sub in range(n_sub):
# open(fld_split + '/rc_sub%i.tsv'%sub, 'w')
try:
subreddit = reddit.subreddit(subreddits[0])
for submission2 in subreddit.top(limit=5000):
try:
submission = {}
submission["id"] = submission2.id
submission["score"] = submission2.score
submission["domain"] = submission2.domain
submission["permalink"] = submission2.permalink
submission["title"] = submission2.title
submission["num_comments"] = submission2.num_comments
n += 1
if n%1e4 == 0:
print('[%s] selected %.3fM from %.2fM comments'%(
args.dump_name, m/1e6, n/1e6))
for sub in range(n_sub):
print(' sub %i: %i'%(sub, len(lines[sub])))
if len(lines[sub]) > 0:
with open(fld_split + '/rc_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines[sub]) + '\n')
lines[sub] = []
for top_level_comment in submission2.comments:
try:
comment = {}
comment["id"] = top_level_comment.id
if top_level_comment.author is not None:
comment["author"] = top_level_comment.author.name
else:
comment["author"] = "None"
comment["parent_id"] = top_level_comment.parent_id
comment["link_id"] = top_level_comment.link_id
comment["score"] = top_level_comment.score
comment["body"] = top_level_comment.body
if args.keep_keys:
k = '\t'.join([comment['link_id'], get_comment_id(comment), 'dep'])
if k not in keys.keys():
continue
if comment['body'] == '[deleted]': # filter 1
continue
if '>' in comment['body'] or '>' in comment['body']: # filter 3: '>' means '>'
continue
sid = comment['link_id']
for sub in range(n_sub):
if sid in sids[sub]:
comment['n_char'] = len(comment['body'])
comment['body'] = norm_sentence(comment['body'], True)
if len(comment['body'].split()) < 2: # filter 2
break
lines[sub].append('\t'.join([str(comment[k]) for k in fields_comm]))
m += 1
break
except Exception:
traceback.print_exc()
except Exception as e:
print(e)
except Exception as e:
print(e)
print('the rest...')
for sub in range(n_sub):
print(' sub %i: %i'%(sub, len(lines[sub])))
with open(fld_split + '/rc_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines[sub]))
print('extract_comments done.\n')
return m, n
def get_convo(sid, rootid, cid, submissions, comments, depth=10, txts2=[]):
#print(depth)
if depth == 0:
return []
comment = comments[cid]
pid = comment['link_id']
txts2.append(comment['body'])
#print(txts2)
for c in comments:
if pid == comments[c]['link_id']:
txts2.append(comments[c]['body'])
print(comments[c]['body'])
#print(txts2)
#if args.max_len_type == 'w' and len(c['body'].split()) > args.max_len: # len filter
#return []
#if args.max_len_type == 'c' and int(c['n_char']) > args.max_len:
#return []
return txts2
def filter_instance(src, tgt, info):
# Remove offensive words:
if args.bl_words and not args.leaves_only:
bad_words = bl_words.extract_keywords(tgt)
if bad_words:
print("skip\toffensive\t%s\t%s\tbad word(s): %s" % (info, tgt, bad_words), file=sys.stderr)
return True
# Remove empty targets:
tgttoks = tgt.split()
if len(tgttoks) <= 1: # 1 means there is only a weight, and 0 means there's a bug..
print("skip\temptytarget\t%s\t%s" % (info, tgt), file=sys.stderr)
return True
# Skip if word too long:
toolong = False
for w in tgttoks:
if len(w) > 30:
toolong = True
break
if toolong:
print("skip\tlongword\t%s\t%s\tword too long" % (info, tgt), file=sys.stderr)
return True
srctoks = src.split()
# Remove empty sources: (should probably uncomment, but left for reproducibility)
#if len(srctoks) <= 1: # 1 means there is only a weight, and 0 means there's a bug..
# print("skip\temptysource\t%s\t%s" % (info, src), file=sys.stderr)
# return True
# Remove too long turns:
nsrctgt = len(srctoks) + len(tgttoks)
if nsrctgt > 200:
print("skip\ttoolong\t%s\t%s\tsrc+tgt too long, src=[%s]" % (info, tgt, src), file=sys.stderr)
return True
# Skip turns with URLs:
srctgt = src + " " + tgt
if "__url__" in srctgt:
print("skip\turl\t%s\t%s\turl in tgt, or src =[%s]" % (info, tgt, src), file=sys.stderr)
return True
# Skip responses with meta data:
if re.search("[\[\]\(\)]", srctgt) != None:
print("skip\ttags\t%s\t%s\ttag in tgt (or src: [%s])" % (info, tgt, src), file=sys.stderr)
return True
# Skip yelling:
if re.search("[A-Z]{5,}", srctgt) != None:
print("skip\tallcaps\t%s\t%s\tall caps in tgt (or src: [%s])" % (info, tgt, src), file=sys.stderr)
return True
# Skip word repetitions:
reps = False
for i in range(2, len(tgttoks)):
if tgttoks[i-2] == tgttoks[i] and tgttoks[i-1] == tgttoks[i]:
reps = True
break
if reps:
print("skip\trepetitions\t%s\t%s\ttoo many repetitions" % (info, tgt), file=sys.stderr)
return True
return False
import praw
import codecs
import os
subreddits = [os.environ['sub']]
reddit = praw.Reddit(
client_id="tc1xRzCUpCBQNg",
client_secret="YSzJ2wK4mFyhnquUEH_ILxtkxSc",
user_agent="my user agent"
)
f = codecs.open('./redditnew.txt', "a", "utf-8")
import re
def save_convo(path_rs, path_rc, path_out):
print(path_rc)
#print('reading submissions...')
submissions = dict()
with gzip.open(path_rs, mode='rt', encoding='utf-8') as f:
for line in f:
cells = line.strip('\n').strip().split('\t')
try:
submission = dict([(fields_subm[i], cells[i]) for i in range(len(fields_subm))])
except Exception:
#traceback.print_exc()
continue
submissions[get_submission_id(submission)] = submission
#print('reading comments...')
comments = dict()
with gzip.open(path_rc, mode='rt', encoding='utf-8') as f:
for line in f:
cells = line.strip('\n').strip().split('\t')
try:
comment = dict([(fields_comm[i], cells[i]) for i in range(len(fields_comm))])
except Exception:
traceback.print_exc()
continue
comments[get_comment_id(comment)] = comment
sorted_id = sorted([(
comments[cid]['link_id'],
comments[cid]['parent_id'],
cid
) for cid in comments])
n = len(comments)
print('total comments: %i'%n)
i = 0
m = 0
lines = []
sum_resp_len = 0
skip_id = {}
if args.leaves_only:
for _, pid, _ in sorted_id:
skip_id[pid] = 1
#print("leaves ratio : %f" % (len(skip_id) / len(sorted_id)), file=sys.stderr)
for sid, pid, cid in sorted_id:
i += 1
if i%1e5 == 0:
#print('selected %.2fM from %.1f/%.1fM comments'%(m/1e6, i/1e6, n/1e6), file=sys.stderr)
if len(lines) > 0:
with open(path_out, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
lines = []
subreddit = ''
domain = ''
if sid in submissions.keys():
subreddit = submissions[sid]['permalink'].split('/')[2].lower()
domain = submissions[sid]['domain'].lower()
info = subreddit + '\t' + domain
#if args.bl_subreddits:
# if not subreddit:
#print("skip\tmissing\t%s\tN/A\tmissing submission: %s" % (info, sid), file=sys.stderr)
# continue
# if subreddit in bl_subreddits:
#print("skip\tbad_subreddit\t%s\tN/A\toffensive subreddit: %s" % (info, subreddit), file=sys.stderr)
# continue
comment = comments[cid]
if comment['score'] == 'None':
score = 0
else:
score = int(comment['score'])
if score < args.min_score: # filter 1
#print("skip\tlow_score\t%s\t%s\tscore %d < %d" % (info, comment['body'], score, args.min_score), file=sys.stderr)
continue
txts = get_convo(sid, cid, cid, submissions, comments) # filter 2
#print(len(txts))
if len(txts) < args.min_depth: # filter 3
#print("skip\tmin_depth\t%s\t%s\tdepth %d < %d: %s" % (info, comment['body'], len(txts), args.min_depth, "|".join(txts)), file=sys.stderr)
continue
for i in range(len(txts)):
txts[i] = norm_sentence(txts[i], False)
if args.leaves_only and args.clean:
sc = '1.0'
skip_target = False
if args.discard_tgt_keys:
tgt_h = hashlib.sha224(txts[i].encode("utf-8")).hexdigest()
if tgt_h in keys_rm.keys():
skip_target = True
if bl_words.extract_keywords(txts[i]) or skip_target:
sc = '0.0'
txts[i] = sc + ' ' + txts[i]
src = ' EOS '.join(txts[:-1])
tgt = txts[-1]
header = ','.join([sid, pid, cid])
lines.append(header + '\t' + src + '\t' + tgt)
sum_resp_len += len(tgt.split())
m += 1
#avg_len = sum_resp_len/m
with open(path_out, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
print('finally selected %i/%i'%(m, n))#, avg_len))
return m, n, 1
import random
import threading
from time import sleep
def extract():
makedirs(fld_split)
print(threading.active_count())
for sub in subreddits:
#sleep(random.randint(0,1))
t = threading.Thread(target=extract_submissions, args=(fld_root_in, fld_split, sub,))
t.daemon = True
t.start()
done = False
while done == False:
sleep(1)
print(threading.active_count())
if threading.active_count() == 1:
done = True
#sids, ms, ns, mc, nc = extract_submissions(fld_root_in, fld_split, size=args.split_size)
#mc, nc = extract_comments(fld_root_in, fld_split, sids)
#with open(fld_split + '/stat.tsv', 'a') as f:
#f.write('\t'.join(map(str, [args.dump_name, mc, nc, ms, ns])) + '\n')
def build_conv(fld_out):
makedirs(fld_out)
path_out = fld_out + '/%s.tsv'%args.dump_name
print(path_out)
if args.parallel:
fs = open(fld_out + '/' + args.dump_name + '.stat.tsv', 'w')
else:
fs = open(fld_out + '/stat.tsv', 'a')
sub = 0
sum_m = 0
sum_n = 0
while True:
path_rs = fld_split + '/rs_sub%i.tsv.gz'%sub
if not os.path.exists(path_rs):
if sub == 0:
print('no such file: '+path_rs)
break
print('-'*10 + ' sub%i '%sub + '-'*10)
path_rc = path_rs.replace('/rs_', '/rc_')
m, n, avg_len = save_convo(path_rs, path_rc, path_out)
fs.write('\t'.join([args.dump_name, str(sub), str(m), str(n), '%.2f'%avg_len]) + '\n')
sum_m += m
sum_n += n
sub += 1
fs.write('\t'.join([args.dump_name, 'all', str(sum_m), str(sum_n), '']) + '\n')
fs.close()
def load_keys(key_file):
d = {}
with gzip.open(key_file, 'rt', encoding="utf-8") as f:
for line in f:
k = line.rstrip()
if args.task == 'conv' and k.endswith('\tdep'):
continue
d[k] = 1
return d
if args.freq_words:
with open(args.freq_words, 'rt', encoding="utf-8") as f:
n = 0
for line in f:
n += 1
w = line.rstrip().lower()
args.freq_words[w] = n
if args.bl_words:
with open(args.bl_words, 'rt', encoding="utf-8") as f:
for line in f:
if line[0] == '#':
continue
w = line.rstrip()
bl_words.add_keyword(w)
if args.bl_subreddits:
with open(args.bl_subreddits, 'rt', encoding="utf-8") as f:
for line in f:
if line[0] == '#':
continue
s = line.rstrip().lower()
bl_subreddits[s] = 1
if args.ignore_keys:
args.keep_keys = None
args.discard_tgt_keys = None
else:
if args.keep_keys:
keys = load_keys(args.keep_keys)
if args.discard_tgt_keys:
keys_rm = load_keys(args.discard_tgt_keys)
fld_root_in = args.reddit_input
fld_root_out = args.reddit_output
fld_split = fld_root_out + '/extract/%s'%(args.dump_name)
if args.task == 'extract':
extract()
elif args.task == 'conv':
fld_out = fld_root_out + '/conv'
build_conv(fld_out)
else:
print("Unknown task: %s" % args.task, file=sys.stderr)
|
run_status.py
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
import glob
import re
import os
import sys
import time
import threading
s_comment = re.compile('^#')
s_general_read_len = re.compile('^READ_LENGTH ')
s_read_len = re.compile('^[1-8]+:READ_LENGTH ')
s_firecrest = None
# FIRECREST PATTERNS
# _p2f(<pattern>, lane, tile, cycle)
PATTERN_FIRECREST_QCM = 's_%s_%s_%s_qcm.xml'
# _p2f(<pattern>, lane, tile)
PATTERN_FIRECREST_INT = 's_%s_%s_02_int.txt'
PATTERN_FIRECREST_NSE = 's_%s_%s_nse.txt.gz'
PATTERN_FIRECREST_POS = 's_%s_%s_pos.txt'
PATTERN_FIRECREST_IDX = 's_%s_%s_idx.txt'
PATTERN_FIRECREST_CLU1 = 's_%s_%s_01_1_clu.txt'
PATTERN_FIRECREST_CLU2 = 's_%s_%s_01_2_clu.txt'
PATTERN_FIRECREST_CLU3 = 's_%s_%s_01_3_clu.txt'
PATTERN_FIRECREST_CLU4 = 's_%s_%s_01_4_clu.txt'
# BUSTARD PATTERNS
# _p2f(<pattern>, lane, tile)
PATTERN_BUSTARD_SIG2 = 's_%s_%s_sig2.txt'
PATTERN_BUSTARD_PRB = 's_%s_%s_prb.txt'
# GERALD PATTERNS
# _p2f(<pattern>, lane, tile)
PATTERN_GERALD_ALLTMP = 's_%s_%s_all.txt.tmp'
PATTERN_GERALD_QRAWTMP = 's_%s_%s_qraw.txt.tmp'
PATTERN_GERALD_ALLPNGTMP = 's_%s_%s_all.tmp.png'
PATTERN_GERALD_ALIGNTMP = 's_%s_%s_align.txt.tmp'
PATTERN_GERALD_QVALTMP = 's_%s_%s_qval.txt.tmp'
PATTERN_GERALD_SCORETMP = 's_%s_%s_score.txt.tmp'
PATTERN_GERALD_PREALIGNTMP = 's_%s_%s_prealign.txt.tmp'
PATTERN_GERALD_REALIGNTMP = 's_%s_%s_realign.txt.tmp'
PATTERN_GERALD_RESCORETMP = 's_%s_%s_rescore.txt.tmp'
PATTERN_GERALD_RESCOREPNG = 's_%s_%s_rescore.png'
PATTERN_GERALD_ERRORSTMPPNG = 's_%s_%s_errors.tmp.png'
PATTERN_GERALD_QCALTMP = 's_%s_%s_qcal.txt.tmp'
PATTERN_GERALD_QVAL = 's_%s_%s_qval.txt'
# _p2f(<pattern>, lane)
PATTERN_GERALD_SEQPRETMP = 's_%s_seqpre.txt.tmp'
PATTERN_GERALD_RESULTTMP = 's_%s_eland_result.txt.tmp'
PATTERN_GERALD_SIGMEANSTMP = 's_%s_Signal_Means.txt.tmp'
PATTERN_GERALD_CALLPNG = 's_%s_call.png'
PATTERN_GERALD_ALLPNG = 's_%s_all.png'
PATTERN_GERALD_PERCENTALLPNG = 's_%s_percent_all.png'
PATTERN_GERALD_PERCENTCALLPNG = 's_%s_percent_call.png'
PATTERN_GERALD_PERCENTBASEPNG = 's_%s_percent_base.png'
PATTERN_GERALD_FILTTMP = 's_%s_filt.txt.tmp'
PATTERN_GERALD_FRAGTMP = 's_%s_frag.txt.tmp'
PATTERN_GERALD_QREPORTTMP = 's_%s_qreport.txt.tmp'
PATTERN_GERALD_QTABLETMP = 's_%s_qtable.txt.tmp'
PATTERN_GERALD_QCALREPORTTMP = 's_%s_qcalreport.txt.tmp'
PATTERN_GERALD_SEQUENCETMP = 's_%s_sequence.txt.tmp'
PATTERN_GERALD_LANEFINISHED = 's_%s_finished.txt'
def _p2f(pattern, lane, tile=None, cycle=None):
"""
Converts a pattern plus info into file names
"""
# lane, and cycle provided (INVALID)
if tile is None and cycle is not None:
msg = "Handling of cycle without tile is not currently implemented."
raise ValueError(msg)
# lane, tile, cycle provided
elif cycle:
return pattern % (lane,
"%04d" % (tile,),
"%02d" % (cycle,))
# lane, tile provided
elif tile:
return pattern % (lane, "%04d" % (tile,))
# lane provided
else:
return pattern % (lane)
class GARunStatus(object):
def __init__(self, conf_filepath):
"""
Given an eland config file in the top level directory
of a run, predicts the files that will be generated
during a run and provides methods for retrieving
(completed, total) for each step or entire run.
"""
#print('self._conf_filepath = %s' % (conf_filepath))
self._conf_filepath = conf_filepath
self._base_dir, junk = os.path.split(conf_filepath)
self._image_dir = os.path.join(self._base_dir, 'Images')
self.lanes = []
self.lane_read_length = {}
self.tiles = None
self.cycles = None
self.status = {}
self.status['firecrest'] = {}
self.status['bustard'] = {}
self.status['gerald'] = {}
self._process_config()
self._count_tiles()
self._count_cycles()
self._generate_expected()
def _process_config(self):
"""
Grabs info from self._conf_filepath
"""
f = open(self._conf_filepath, 'r')
for line in f:
#Skip comment lines for now.
if s_comment.search(line):
continue
mo = s_general_read_len.search(line)
if mo:
read_length = int(line[mo.end():])
#Handle general READ_LENGTH
for i in range(1,9):
self.lane_read_length[i] = read_length
mo = s_read_len.search(line)
if mo:
read_length = int(line[mo.end():])
lanes, junk = line.split(':')
#Convert lanes from string of lanes to list of lane #s.
lanes = [ int(i) for i in lanes ]
for lane in lanes:
#Keep track of which lanes are being run.
if lane not in self.lanes:
self.lanes.append(lane)
#Update with lane specific read lengths
self.lane_read_length[lane] = read_length
self.lanes.sort()
def _count_tiles(self):
"""
Count the number of tiles being used
"""
self.tiles = len(glob.glob(os.path.join(self._image_dir,
'L001',
'C1.1',
's_1_*_a.tif')))
def _count_cycles(self):
"""
Figures out the number of cycles that are available
"""
#print('self._image_dir = %s' % (self._image_dir))
cycle_dirs = glob.glob(os.path.join(self._image_dir, 'L001', 'C*.1'))
#print('cycle_dirs = %s' % (cycle_dirs))
cycle_list = []
for cycle_dir in cycle_dirs:
junk, c = os.path.split(cycle_dir)
cycle_list.append(int(c[1:c.find('.')]))
self.cycles = max(cycle_list)
def _generate_expected(self):
"""
generates a list of files we expect to find.
"""
firecrest = self.status['firecrest']
bustard = self.status['bustard']
gerald = self.status['gerald']
for lane in self.lanes:
for tile in range(1,self.tiles+1):
for cycle in range(1, self.cycles+1):
##########################
# LANE, TILE, CYCLE LAYER
# FIRECREST
firecrest[_p2f(PATTERN_FIRECREST_QCM, lane, tile, cycle)] = False
###################
# LANE, TILE LAYER
# FIRECREST
firecrest[_p2f(PATTERN_FIRECREST_INT, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_NSE, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_POS, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_IDX, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_CLU1, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_CLU2, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_CLU3, lane, tile)] = False
firecrest[_p2f(PATTERN_FIRECREST_CLU4, lane, tile)] = False
# BUSTARD
bustard[_p2f(PATTERN_BUSTARD_SIG2, lane, tile)] = False
bustard[_p2f(PATTERN_BUSTARD_PRB, lane, tile)] = False
# GERALD
#gerald[_p2f(PATTERN_GERALD_ALLTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_QRAWTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_ALLPNGTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_ALIGNTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_QVALTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_SCORETMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_PREALIGNTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_REALIGNTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_RESCORETMP, lane, tile)] = False
gerald[_p2f(PATTERN_GERALD_RESCOREPNG, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_ERRORSTMPPNG, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_QCALTMP, lane, tile)] = False
#gerald[_p2f(PATTERN_GERALD_QVAL, lane, tile)] = False
###################
# LANE LAYER
# GERALD
#gerald[_p2f(PATTERN_GERALD_SEQPRETMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_RESULTTMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_SIGMEANSTMP, lane)] = False
gerald[_p2f(PATTERN_GERALD_CALLPNG, lane)] = False
gerald[_p2f(PATTERN_GERALD_ALLPNG, lane)] = False
gerald[_p2f(PATTERN_GERALD_PERCENTALLPNG, lane)] = False
gerald[_p2f(PATTERN_GERALD_PERCENTCALLPNG, lane)] = False
gerald[_p2f(PATTERN_GERALD_PERCENTBASEPNG, lane)] = False
#gerald[_p2f(PATTERN_GERALD_FILTTMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_FRAGTMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_QREPORTTMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_QTABLETMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_QCALREPORTTMP, lane)] = False
#gerald[_p2f(PATTERN_GERALD_SEQUENCETMP, lane)] = False
gerald[_p2f(PATTERN_GERALD_LANEFINISHED, lane)] = False
#################
# LOOPS FINISHED
# FIRECREST
firecrest['offsets_finished.txt'] = False
firecrest['finished.txt'] = False
# BUSTARD
bustard['finished.txt'] = False
# GERALD
gerald['tiles.txt'] = False
gerald['FullAll.htm'] = False
#gerald['All.htm.tmp'] = False
#gerald['Signal_Means.txt.tmp'] = False
#gerald['plotIntensity_for_IVC'] = False
#gerald['IVC.htm.tmp'] = False
gerald['FullError.htm'] = False
gerald['FullPerfect.htm'] = False
#gerald['Error.htm.tmp'] = False
#gerald['Perfect.htm.tmp'] = False
#gerald['Summary.htm.tmp'] = False
#gerald['Tile.htm.tmp'] = False
gerald['finished.txt'] = False
def statusFirecrest(self):
"""
returns (<completed>, <total>)
"""
firecrest = self.status['firecrest']
total = len(firecrest)
completed = firecrest.values().count(True)
return (completed, total)
def statusBustard(self):
"""
returns (<completed>, <total>)
"""
bustard = self.status['bustard']
total = len(bustard)
completed = bustard.values().count(True)
return (completed, total)
def statusGerald(self):
"""
returns (<completed>, <total>)
"""
gerald = self.status['gerald']
total = len(gerald)
completed = gerald.values().count(True)
return (completed, total)
def statusTotal(self):
"""
returns (<completed>, <total>)
"""
#f = firecrest c = completed
#b = bustard t = total
#g = gerald
fc, ft = self.statusFirecrest()
bc, bt = self.statusBustard()
gc, gt = self.statusGerald()
return (fc+bc+gc, ft+bt+gt)
def statusReport(self):
"""
Generate the basic percent complete report
"""
def _percentCompleted(completed, total):
"""
Returns precent completed as float
"""
return (completed / float(total)) * 100
fc, ft = self.statusFirecrest()
bc, bt = self.statusBustard()
gc, gt = self.statusGerald()
tc, tt = self.statusTotal()
fp = _percentCompleted(fc, ft)
bp = _percentCompleted(bc, bt)
gp = _percentCompleted(gc, gt)
tp = _percentCompleted(tc, tt)
report = ['Firecrest: %s%% (%s/%s)' % (fp, fc, ft),
' Bustard: %s%% (%s/%s)' % (bp, bc, bt),
' Gerald: %s%% (%s/%s)' % (gp, gc, gt),
'-----------------------',
' Total: %s%% (%s/%s)' % (tp, tc, tt),
]
return report
def updateFirecrest(self, filename):
"""
Marks firecrest filename as being completed.
"""
self.status['firecrest'][filename] = True
def updateBustard(self, filename):
"""
Marks bustard filename as being completed.
"""
self.status['bustard'][filename] = True
def updateGerald(self, filename):
"""
Marks gerald filename as being completed.
"""
self.status['gerald'][filename] = True
##################################################
# Functions to be called by Thread(target=<func>)
def _cmdLineStatusMonitorFunc(conf_info):
"""
Given a ConfigInfo object, provides status to stdout.
You should probably use startCmdLineStatusMonitor()
instead of ths function.
.. python:
def example_launch():
t = threading.Thread(target=_cmdLineStatusMonitorFunc,
args=[conf_info])
t.setDaemon(True)
t.start()
"""
SLEEP_AMOUNT = 30
while 1:
if conf_info.status is None:
print("No status object yet.")
time.sleep(SLEEP_AMOUNT)
continue
report = conf_info.status.statusReport()
print( os.linesep.join(report))
print()
time.sleep(SLEEP_AMOUNT)
#############################################
# Start monitor thread convenience functions
def startCmdLineStatusMonitor(conf_info):
"""
Starts a command line status monitor given a conf_info object.
"""
t = threading.Thread(target=_cmdLineStatusMonitorFunc, args=[conf_info])
t.setDaemon(True)
t.start()
from optparse import OptionParser
def make_parser():
usage = "%prog: config file"
parser = OptionParser()
return parser
def main(cmdline=None):
parser = make_parser()
opt, args = parser.parse_args(cmdline)
if len(args) != 1:
parser.error("need name of configuration file")
status = GARunStatus(args[0])
print(os.linesep.join(status.statusReport()))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
from platform import python_implementation
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark.sql import SQLContext, IntegerType, Row
from pyspark import shuffle
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 14
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10, partitions=3)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = range(10240)
random.shuffle(l)
rdd = sc.parallelize(l, 10)
self.assertEquals(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEquals(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.func_code.co_names)
ser.dumps(foo)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name, batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__, batchSize=2)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def testAggregateByKey(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_large_closure(self):
N = 1000000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEquals(N, rdd.first())
self.assertTrue(rdd._broadcast is not None)
rdd = self.sc.parallelize(range(1), 1).map(lambda x: 1)
self.assertEqual(1, rdd.first())
self.assertTrue(rdd._broadcast is None)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.04) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEquals(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEquals(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEquals(rdd.getNumPartitions(), 10)
self.assertEquals(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, batchSize=2, conf=conf)
def test_profiler(self):
def heavy_foo(x):
for i in range(1 << 20):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
profiles = self.sc._profile_stats
self.assertEqual(1, len(profiles))
id, acc, _ = profiles[0]
stats = acc.value
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
self.sc.show_profiles()
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
class SQLTests(ReusedPySparkTestCase):
def setUp(self):
self.sqlCtx = SQLContext(self.sc)
def test_udf(self):
self.sqlCtx.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.sqlCtx.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.sqlCtx.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.sqlCtx.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.sqlCtx.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist()
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
def test_distinct(self):
rdd = self.sc.parallelize(['{"a": 1}', '{"b": 2}', '{"c": 3}']*10, 10)
srdd = self.sqlCtx.jsonRDD(rdd)
self.assertEquals(srdd.getNumPartitions(), 10)
self.assertEquals(srdd.distinct().count(), 3)
result = srdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
def test_apply_schema_to_row(self):
srdd = self.sqlCtx.jsonRDD(self.sc.parallelize(["""{"a":2}"""]))
srdd2 = self.sqlCtx.applySchema(srdd.map(lambda x: x), srdd.schema())
self.assertEqual(srdd.collect(), srdd2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
srdd3 = self.sqlCtx.applySchema(rdd, srdd.schema())
self.assertEqual(10, srdd3.count())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
srdd = self.sqlCtx.inferSchema(rdd)
row = srdd.first()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = srdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = srdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = srdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
ec = (u'1',
{u'__class__': u'org.apache.spark.api.python.TestWritable',
u'double': 54.0, u'int': 123, u'str': u'test1'})
self.assertEqual(clazz[0], ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
batchSize=1).collect())
self.assertEqual(unbatched_clazz[0], ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_unbatched_save_and_read(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei, len(ei)).saveAsSequenceFile(
basepath + "/unbatched/")
unbatched_sequence = sorted(self.sc.sequenceFile(
basepath + "/unbatched/",
batchSize=1).collect())
self.assertEqual(unbatched_sequence, ei)
unbatched_hadoopFile = sorted(self.sc.hadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_hadoopFile, ei)
unbatched_newAPIHadoopFile = sorted(self.sc.newAPIHadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopFile, ei)
oldconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_hadoopRDD = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=oldconf,
batchSize=1).collect())
self.assertEqual(unbatched_hadoopRDD, ei)
newconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_newAPIHadoopRDD = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=newconf,
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopRDD, ei)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(range(100), 1)
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
view_script.pyw
|
#Konstantinos Routsis
import socket, threading, sys, os, time
import tkinter as tk
from tkinter import ttk
from PIL import ImageTk, Image
# office rooms image filenames
ROOM_FILES = os.listdir('./rooms')
ROOMS = [i[:-4] for i in ROOM_FILES]
connections = {}
client_list = []
input_window = None
room_choice = ROOMS[0]
window = tk.Tk()
img = []
img_size = []
def gui(room_img, change_room = False, start_server_thread = False):
rooms_client_list = []
i = ROOMS.index(room_img)
if start_server_thread:
#create server thread
server_thread = threading.Thread(target=start_server)
server_thread.daemon = True
server_thread.start()
window.title("LAN-Viewer SERVER "+HOST+" ("+str(PORT)+")")
else:
widget_count = len(window.winfo_children())
for child in window.winfo_children():
if widget_count < 4:
child.destroy()
widget_count -= 1
if change_room:
for child in window.winfo_children():
child.destroy()
global canvas
canvas = tk.Canvas(width=800, height=600)
canvas.create_image(0, 0, image=img[i], anchor=tk.NW)
def on_closing():
from tkinter import messagebox
if messagebox.askokcancel("Quit", "Do you want to quit?"):
window.destroy()
#sys.exit()
os._exit(0)
def create_ind(name, ip, pos, active):
x, y = pos
text = " "+name+"\n IP: "+ip+"\n Active: "+str(active)
if active == True:
color_fill = "green"
else:
color_fill = "red"
oval = canvas.create_oval(x-8, y-8, x+8, y+8, fill=color_fill, tags=ip)
text_item = canvas.create_text(x+10, y-8, anchor="nw", text=text, tags=ip)
bbox = canvas.bbox(text_item)
rect_item = canvas.create_rectangle(bbox, outline="black", fill="white", tags=ip)
canvas.tag_raise(text_item,rect_item)
def scroll_start(event):
canvas.scan_mark(event.x, event.y)
def scroll_move(event):
canvas.scan_dragto(event.x, event.y, gain=1)
def choose_room(selection):
global room_choice
room_choice = selection
gui(room_choice, True)
def prev_room():
global room_choice
i = ROOMS.index(room_choice)
room_choice = ROOMS[i-1]
gui(room_choice, True)
def next_room():
global room_choice
i = ROOMS.index(room_choice)
if i == len(ROOMS)-1:
room_choice = ROOMS[0]
else:
room_choice = ROOMS[i+1]
gui(room_choice, True)
def help_b():
from tkinter import messagebox
messagebox.showinfo(title="Help", message="Press double right click to add a new computer.\nPress right click on existing computer to change its values or delete it.\nUse arrows or menu list to navigate through rooms.\nPress print to take a snapshot of current state of the room.")
def print_b():
t = time.strftime("%d-%m-%Y_%H-%M-%S", time.localtime())
file_name = room_choice+"_"+t+".eps"
canvas.yview_moveto('0.0')
canvas.xview_moveto('0.0')
img_info = "SERVER: "+HOST+" PORT: "+str(PORT)+"\n"+room_choice+"\n"+text_info_room+"\n"+time.strftime("%d-%m-%Y %H:%M:%S", time.localtime())
text_item = canvas.create_text(15, 15, anchor="nw", justify='center', text=img_info)
bbox = canvas.bbox(text_item)
rect_item = canvas.create_rectangle(bbox, outline="black", fill="white")
canvas.tag_raise(text_item,rect_item)
canvas.postscript(file=file_name, width=img_size[i][0], height=img_size[i][1])
canvas.delete(text_item,rect_item)
#img = Image.open("room.eps")
#img.save("room.jpg")
#Mouse events
for num,client in enumerate(client_list):
canvas.tag_bind(client[0],"<ButtonPress-3>", lambda event, num=num, client=client:create_input_window(event, client, num))
canvas.bind("<Double-ButtonPress-3>",lambda event:create_input_window(event))
canvas.bind("<ButtonPress-1>", scroll_start)
canvas.bind("<B1-Motion>", scroll_move)
#Manage clients
for client in client_list:
active = False
for addr in connections.keys():
if client[0] == addr[0]:
active = True
ip = client[0]
name = client[1]
pos = eval(client[2])
if client[3] == room_img:
rooms_client_list.append((client, active))
create_ind(name, ip, pos, active)
#Scrollbars
xsb = tk.Scrollbar(orient="horizontal", command=canvas.xview)
ysb = tk.Scrollbar(orient="vertical", command=canvas.yview)
canvas.configure(yscrollcommand=ysb.set, xscrollcommand=xsb.set)
canvas.configure(scrollregion=(0,0,img_size[i][0],img_size[i][1]))
xsb.grid(row=1, column=0, sticky="ew")
ysb.grid(row=0, column=1, sticky="ns")
window.grid_rowconfigure(0, weight=1)
window.grid_columnconfigure(0, weight=1)
#Info Frame
info_frame = tk.Frame(window, borderwidth=2, relief="raised")
variable = tk.StringVar()
variable.set(room_img) # default value
#Room selection
w = tk.OptionMenu(info_frame, variable, *ROOMS, command=choose_room)
w.pack()
#Room's info
label_frame_room = ttk.LabelFrame(info_frame, text = room_choice)
label_frame_room.pack(expand = 'yes', fill = 'both')
active_clients_in_room = 0
for rooms_client in rooms_client_list:
if rooms_client[1] == True:
active_clients_in_room +=1
text_info_room = str(len(rooms_client_list))+" Computers\n"+str(active_clients_in_room)+" Online\n"+str(len(rooms_client_list) - active_clients_in_room)+" Offline"
label_room = tk.Label(label_frame_room, text=text_info_room, anchor="e", justify=tk.LEFT)
label_room.pack()
#Total info
label_frame_all = ttk.LabelFrame(info_frame, text = "Total")
label_frame_all.pack(expand = True, fill = 'both')
text_info_all = str(len(ROOMS))+" Rooms\n"+str(len(client_list))+" Computers\n"+str(len(connections))+" Online\n"+str(len(client_list)-len(connections))+" Offline"
label_total = tk.Label(label_frame_all, text=text_info_all, anchor="e", justify=tk.LEFT)
label_total.pack()
info_frame.place(x = 10, y = 10)
#Prev Next Buttons
button_frame = tk.Frame(info_frame)
b_previous = tk.Button(button_frame, text =" << ", command=prev_room)
b_previous.pack(side=tk.LEFT)
b_next = tk.Button(button_frame, text =" >> ", command=next_room)
b_next.pack(side=tk.RIGHT)
button_frame.pack()
#Help Print Buttons
help_frame = tk.Frame(info_frame)
help_button = tk.Button(help_frame, text =" Help ", command=help_b)
print_button = tk.Button(help_frame, text =" Print ", command=print_b)
help_button.pack(side=tk.LEFT)
print_button.pack(side=tk.RIGHT)
help_frame.pack()
'''
#Server-Port info
info_frame.update()
lbl_frame = tk.Frame(window, borderwidth=2, relief="raised")
lbl_text = "SERVER",HOST,"(",PORT,")"
lbl = tk.Label(lbl_frame, text=lbl_text, font=("Arial Bold", 12))
lbl.pack()
lbl_frame.place(x = info_frame.winfo_width() + 30, y = 10)
'''
canvas.grid(row=0, column=0, sticky="nsew")
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
def create_input_window(event, client=None, num=None):
global input_window
if input_window is not None:
input_window.destroy()
input_window = tk.Toplevel(window)
input_window.resizable(width=False, height=False)
input_window.attributes('-toolwindow', True)
input_window.lift(aboveThis=window)
pos_text = tk.StringVar()
room_text = tk.StringVar()
name_text = tk.StringVar()
ip_text = tk.StringVar()
if client == None:
delete_state = tk.DISABLED
input_window.title("New Computer")
x, y = canvas.canvasx(event.x), canvas.canvasy(event.y)
pos_text.set(str((x, y)))
room_text.set(room_choice)
else:
delete_state = tk.NORMAL
input_window.title("Change/Delete Computer")
ip_text.set(client[0])
name_text.set(client[1])
pos_text.set(client[2])
room_text.set(client[3])
def delete_computer():
f = open("client_list.txt", "r")
lines = f.readlines()
f.close()
del lines[num]
f = open("client_list.txt", "w")
last_line = len(lines)
for i,line in enumerate(lines):
if i+1 == last_line and num == last_line:
f.write(line[:-1])
else:
f.write(line)
f.close()
canvas.delete(client[0])
def save_input():
if (len(name_text.get()) != 0) and (len(ip_text.get()) != 0) and (len(room_text.get()) != 0):
input_window.destroy()
if client != None:
delete_computer()
with open("client_list.txt", "a") as f:
f.write("\n"+ip_text.get()+";"+name_text.get()+";"+pos_text.get()+";"+room_text.get())
client_list.clear()
read_client_list()
gui(room_choice)
else:
from tkinter import messagebox
messagebox.showwarning('Submit new computer','Write a computer name and IP.\n Then press Submit button.')
return False
tk.Label(input_window, text="Position").grid(row=0, pady=1)
tk.Label(input_window, text="Name").grid(row=1, pady=1)
tk.Label(input_window, text="IP").grid(row=2, pady=1)
tk.Label(input_window, text="Room").grid(row=3, pady=1)
e1 = tk.Entry(input_window, textvariable=pos_text)
e2 = tk.Entry(input_window, textvariable=name_text)
e3 = tk.Entry(input_window, textvariable=ip_text)
e4 = tk.Entry(input_window, textvariable=room_text)
e1.grid(row=0, column=1, padx=4, pady=1)
e2.grid(row=1, column=1, padx=4, pady=1)
e3.grid(row=2, column=1, padx=4, pady=1)
e4.grid(row=3, column=1, padx=4, pady=1)
quit_button = tk.Button(input_window, text=' Quit ', command=input_window.destroy)
quit_button.grid(row=4, column=1, sticky='ne', padx=4, pady=4)
clear_button = tk.Button(input_window, text=' Delete ', state=delete_state, command = lambda:[delete_computer(), input_window.destroy()])
clear_button.grid(row=4, column=1, sticky='nw', padx=4, pady=4)
submit_button = tk.Button(input_window, text=' Submit ', command=save_input)
submit_button.grid(row=4, column=0, padx=4, pady=4)
def manage_room_images():
global img
global img_size
for option in ROOM_FILES:
option = "rooms/"+option
raw_img = Image.open(option)
img_width, img_height = raw_img.size
img_size.append((img_width, img_height))
img.append(ImageTk.PhotoImage(raw_img))
def handle_client(conn, addr):
connections[addr] = conn
while True:
try:
# receave message from client
sig = conn.recv(64)
except Exception as e:
print(addr, ":", e)
# disconnect the server
conn.close()
del connections[addr]
window.after(0, lambda:gui(room_choice))
# kill thread
sys.exit()
def start_server():
# allow maximum 3 connections to the socket
s.listen(4)
while True:
# wait till a client accept connection
conn, addr = s.accept()
# create a thread to handle each connection
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
window.after(0, lambda:gui(room_choice))
def read_client_list():
global PORT
try:
with open("client_list.txt", "r") as f:
if len(f.readline()) <= 6:
f.seek(0)
PORT = int(next(f))
else:
f.seek(0)
PORT = 5055
for client in f.readlines():
client_list.append(client.strip().split(";"))
except FileNotFoundError:
PORT = 5055
if __name__ == "__main__":
read_client_list()
# take the server name and port name
HOST = socket.gethostbyname(socket.gethostname())
#PORT = 5055
# create a socket at server side using TCP / IP protocol
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket with server and port number
s.bind((HOST, PORT))
manage_room_images()
gui(room_choice, True, True)
|
example_stream_buffer_extended.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_stream_buffer_extended.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
# create instance of BinanceWebSocketApiManager
binance_websocket_api_manager = BinanceWebSocketApiManager()
markets = ['xrpbearbusd', 'zeceth', 'cndbtc', 'dashbtc', 'atompax', 'perlbtc', 'ardreth', 'zecbnb', 'bchabctusd',
'usdsbusdt', 'winbnb', 'xzcxrp', 'bchusdc', 'wavesbnb', 'kavausdt', 'btsusdt', 'chzbnb', 'tusdbnb',
'xtzbusd', 'bcptusdc', 'dogebnb', 'eosbearusdt', 'ambbnb', 'wrxbnb', 'poabtc', 'wanbtc', 'ardrbtc', 'icnbtc',
'tusdusdt', 'atombusd', 'nxseth', 'bnbusdt', 'trxxrp', 'erdpax', 'erdbtc', 'icxbusd', 'nulsbtc', 'hotusdt',
'wavespax', 'zilbnb', 'arnbtc', 'nulsusdt', 'wintrx', 'npxsbtc', 'busdtry', 'qtumbnb', 'eosbtc', 'xlmpax',
'tomobnb', 'eosbnb', 'engbtc', 'linketh', 'xrpbtc', 'fetbtc', 'stratusdt', 'navbnb', 'bcneth', 'yoyobtc',
'nanobnb', 'saltbtc', 'tfuelusdc', 'skybnb', 'fuelbtc', 'bnbusdc', 'inseth', 'btcpax', 'batbtc', 'rlceth',
'arketh', 'ltcpax', 'ltcbusd', 'duskbtc', 'mftusdt', 'bntusdt', 'mdabtc', 'enjbtc', 'poabnb', 'nanobusd',
'paxtusd', 'hotbtc', 'bcdbtc', 'beambnb', 'trxeth', 'omgbnb', 'cdtbtc', 'eosusdc', 'dashbusd', 'cocosbtc',
'dasheth', 'xrptusd', 'atomtusd', 'rcneth', 'rpxeth', 'xlmusdc', 'aionbusd', 'nxsbtc', 'chateth', 'repbtc',
'tctusdt', 'linkusdt', 'nasbtc', 'usdsusdc', 'xvgbtc', 'elfeth', 'ctxcbtc', 'cmteth', 'gnteth', 'usdspax',
'zilbtc', 'batpax', 'stratbtc', 'xzcbtc', 'iotausdt', 'etcbnb', 'ankrusdt', 'xlmeth', 'loombtc', 'erdusdc',
'rdnbnb', 'icneth', 'vetbtc', 'cvcusdt', 'ftmpax', 'ethbullusdt', 'edoeth', 'steemeth', 'gobnb', 'hsrbtc',
'ambbtc', 'bchabcbtc', 'dntbtc', 'btctusd', 'denteth', 'snglsbtc', 'eosbullusdt', 'xlmtusd', 'tnteth',
'sysbnb', 'renusdt', 'zrxusdt', 'xlmbtc', 'stormbtc', 'ncashbnb', 'omgusdt', 'troyusdt', 'venbtc', 'modbtc',
'dogepax', 'ontusdc', 'eurbusd', 'tctbnb', 'gxsbtc', 'celrbnb', 'adausdt', 'beambtc', 'elfbtc', 'celrbtc',
'rvnusdt', 'poaeth', 'wavesusdc', 'trxbnb', 'trxusdc', 'ethbearusdt', 'ethpax', 'bateth', 'kavabtc',
'paxbtc', 'trigbnb', 'btcusdc', 'oneusdc', 'xrptry', 'stxusdt', 'strateth', 'lendeth', 'neousdc',
'mithusdt', 'btcngn', 'blzeth', 'evxeth', 'dnteth', 'grsbtc', 'arneth', 'iotabnb', 'waneth', 'xtzbnb',
'subeth', 'btsbtc', 'cvceth', 'ethusdc', 'etctusd', 'cloakbtc', 'grseth', 'eospax', 'cdteth', 'bchusdt',
'lskusdt', 'enjbusd', 'drepbtc', 'manaeth', 'tomousdt', 'algobnb', 'wtceth', 'linkpax', 'batbnb', 'sceth',
'rvnbusd', 'cvcbnb', 'manabtc', 'gasbtc', 'stxbtc', 'cloaketh', 'neotusd', 'lrceth', 'thetabtc', 'dogeusdt',
'aionbnb', 'viabtc', 'keyeth', 'nanoeth', 'ncasheth', 'bgbpusdc', 'ltobnb', 'snmeth', 'adabtc', 'btseth',
'qtumbusd', 'wtcbnb', 'dcrbtc', 'fttbnb', 'paxbnb', 'insbtc', 'gntbnb', 'etheur', 'dashusdt', 'rcnbtc',
'btcusdt', 'wanusdt', 'powrbnb', 'xmrbnb', 'trigeth', 'xzceth', 'bchbtc', 'qspbnb', 'scbnb', 'mcoeth',
'powrbtc', 'algotusd', 'ankrbtc', 'tusdeth', 'keybtc', 'usdcusdt', 'ftmusdc', 'atombnb', 'zenbtc', 'dockbtc',
'neobtc', 'phbbnb', 'bnbpax', 'brdbnb', 'trxusdt', 'trxbusd', 'mtlbtc', 'ftmtusd', 'perlusdc', 'mithbnb',
'eosbullbusd', 'reqeth', 'bccbnb', 'veneth', 'loombnb', 'trxpax', 'usdcpax', 'stormusdt', 'ognbtc', 'gvtbtc',
'iotaeth', 'naseth', 'drepusdt', 'gvteth', 'wrxusdt', 'bchabcpax', 'ongbtc', 'usdcbnb', 'dgdeth', 'salteth',
'mtleth', 'bcnbnb', 'neblbnb', 'wanbnb', 'ontusdt', 'npxsusdt', 'mftbtc', 'eosbearbusd', 'bntbtc', 'gtoeth',
'modeth', 'etcusdc', 'veteth', 'bcptpax', 'atomusdc', 'duskpax', 'kavabnb', 'lunbtc', 'adxbtc', 'bnteth',
'funbtc', 'knceth', 'dogebtc', 'bchsvpax', 'bcpttusd', 'osteth', 'oaxeth', 'wabibtc', 'appcbtc', 'qkcbtc',
'nanousdt', 'wingsbtc', 'hbarusdt', 'eurusdt', 'waveseth', 'asteth', 'linkbusd', 'btttusd', 'zecusdc',
'bnbusds', 'linkbtc', 'venusdt', 'hotbnb', 'usdtrub', 'tctbtc', 'ankrpax', 'btctry', 'adabnb', 'polybtc',
'bcceth', 'enjeth', 'bnbbusd', 'repbnb', 'bullusdt', 'vitebtc', 'btgbtc', 'renbtc', 'thetausdt', 'troybtc',
'dentbtc', 'ostbtc', 'nxsbnb', 'mithbtc', 'xmrbtc', 'tomobtc', 'nulseth', 'phbbtc', 'duskbnb', 'yoyoeth',
'ontbusd', 'btgeth', 'etcusdt', 'atomusdt', 'hcbtc', 'brdbtc', 'fttbtc', 'celrusdt', 'lskbnb', 'phbpax',
'xtzbtc', 'batusdt', 'viteusdt', 'trxbtc', 'bchtusd', 'xtzusdt', 'ftmbtc', 'enjbnb', 'arkbtc', 'wavesusdt',
'ftmusdt', 'neobusd', 'stormbnb', 'luneth', 'gntbtc', 'gtousdt', 'chzusdt', 'sntbtc', 'bandbnb', 'hoteth',
'wingseth', 'mcobtc', 'docketh', 'drepbnb', 'eosusdt', 'eostusd', 'npxseth', 'thetaeth', 'iotxbtc', 'phxbnb',
'enjusdt', 'tfuelbnb', 'mcobnb', 'ontpax', 'dcrbnb', 'batusdc', 'snglseth', 'qlcbtc', 'qspeth', 'cndeth',
'appcbnb', 'wprbtc', 'sysbtc', 'iostusdt', 'btceur', 'mtlusdt', 'ethrub', 'tfuelpax', 'maticusdt', 'ftmbnb',
'xrpbusd', 'iotxusdt', 'tusdbtusd', 'trigbtc', 'atombtc', 'bchpax', 'eosbusd', 'zileth', 'gtotusd',
'xrpbullusdt', 'onetusd', 'algobtc', 'bchsvusdt', 'gtopax', 'etceth', 'vibebtc', 'bttusdt', 'repeth',
'iostbnb', 'usdttry', 'btsbnb', 'ankrbnb', 'dltbnb', 'snteth', 'linktusd', 'nknusdt', 'rpxbtc', 'rdneth',
'cocosusdt', 'etcbusd', 'btttrx', 'bandbtc', 'steembnb', 'zecpax', 'viabnb', 'cosbnb', 'mtheth', 'xrpusdc',
'xemeth', 'pivxbnb', 'phxbtc', 'zilusdt', 'poeeth', 'bnbeur', 'bandusdt', 'vetbnb', 'lendbtc', 'xlmbnb',
'duskusdt', 'mfteth', 'funusdt', 'adabusd', 'perlbnb', 'btcbusd', 'ltobtc', 'nasbnb', 'algousdt', 'zeneth',
'bchsvusdc', 'mcousdt', 'venbnb', 'hceth', 'fetusdt', 'edobtc', 'mftbnb', 'cosusdt', 'arpausdt', 'xmrusdt',
'ctxcusdt', 'bqxbtc', 'npxsusdc', 'icxbnb', 'bchbnb', 'phbusdc', 'tomousdc', 'nulsbnb', 'rcnbnb', 'arpabnb',
'qtumbtc', 'keyusdt', 'agibtc', 'mblbtc', 'eoseth', 'tusdbtc', 'aioneth', 'storjbtc', 'lsketh', 'bchsvbtc',
'bntbusd', 'ncashbtc', 'mblbnb', 'polybnb', 'aebnb', 'ltceth', 'dogeusdc', 'wpreth', 'syseth', 'bcnbtc',
'ognusdt', 'nanobtc', 'astbtc', 'zrxeth', 'adxeth', 'gxseth', 'ethbearbusd', 'onepax', 'scbtc', 'icxbtc',
'ontbnb', 'qlceth', 'btsbusd', 'rlcbtc', 'chatbtc', 'wabibnb', 'renbnb', 'xrpbullbusd', 'wavesbtc', 'funeth',
'rlcbnb', 'phxeth', 'winbtc', 'storjeth', 'wavesbusd', 'iostbtc', 'icxeth', 'adatusd', 'nknbnb', 'btcrub',
'pivxbtc', 'perlusdt', 'bullbusd', 'bttusdc', 'bcptbtc', 'aebtc', 'ethusdt', 'ltousdt', 'subbtc', 'thetabnb',
'blzbtc', 'tfuelusdt', 'evxbtc', 'hbarbtc', 'ambeth', 'winusdt', 'qtumeth', 'dgdbtc', 'adaeth', 'busdusdt',
'xrpbnb', 'adapax', 'usdsbusds', 'cocosbnb', 'navbtc', 'rvnbtc', 'tnbbtc', 'bnbbtc', 'neopax', 'bearusdt',
'usdstusd', 'snmbtc', 'rvnbnb', 'gtobnb', 'phbtusd', 'hcusdt', 'btcusds', 'reqbtc', 'ognbnb', 'lrcbtc',
'xrpeth', 'loometh', 'zectusd', 'vibeeth', 'gobtc', 'bnbtry', 'bcdeth', 'qkceth', 'neoeth', 'paxusdt',
'bchsvtusd', 'fetbnb', 'yoyobnb', 'xlmbusd', 'skyeth', 'paxeth', 'ltcbtc', 'xvgeth', 'tnbeth', 'stratbusd',
'agieth', 'xlmusdt', 'lskbtc', 'bearbusd', 'hsreth', 'ctxcbnb', 'oaxbtc', 'qspbtc', 'iotxeth', 'qlcbnb',
'algousdc', 'etcpax', 'fueleth', 'aionusdt', 'xmreth', 'maticbtc', 'dashbnb', 'oneusdt', 'brdeth', 'viaeth',
'omgeth', 'ankrtusd', 'usdsusdt', 'ethtusd', 'wavestusd', 'iosteth', 'cmtbnb', 'ostbnb', 'ltcusdt', 'ethtry',
'zrxbtc', 'bchabcusdt', 'onebnb', 'beamusdt', 'nebleth', 'bcptbnb', 'adxbnb', 'ontbtc', 'bttbnb', 'dockusdt',
'bccbtc', 'omgbtc', 'algopax', 'neousdt', 'xrprub', 'busdngn', 'appceth', 'dentusdt', 'xzcbnb', 'tfueltusd',
'xembnb', 'arpabtc', 'ankrusdc', 'adausdc', 'kmdeth', 'troybnb', 'bnbeth', 'ltcusdc', 'databtc', 'blzbnb',
'naveth', 'btcbbtc', 'battusd', 'bnbngn', 'bchbusd', 'busdrub', 'ltctusd', 'vetbusd', 'ongbnb', 'fttusdt',
'bccusdt', 'ongusdt', 'engeth', 'usdctusd', 'etcbtc', 'gtousdc', 'mdaeth', 'vitebnb', 'erdusdt', 'dltbtc',
'bnbtusd', 'wtcbtc', 'xrpusdt', 'xrpeur', 'agibnb', 'trxtusd', 'ethbullbusd', 'iotabtc', 'xembtc',
'bchabcusdc', 'duskusdc', 'xrppax', 'mblusdt', 'kmdbtc', 'neblbtc', 'maticbnb', 'bnbrub', 'bcpteth',
'bttbtc', 'stxbnb', 'dlteth', 'onteth', 'vetusdt', 'ppteth', 'ethbtc', 'onebtc', 'ethbusd', 'zecbtc',
'erdbnb', 'xrpbearusdt', 'stratbnb', 'cmtbtc', 'cvcbtc', 'kncbtc', 'rpxbnb', 'zenbnb', 'cndbnb', 'ardrbnb',
'bchabcbusd', 'ltcbnb', 'pivxeth', 'skybtc', 'tntbtc', 'poebtc', 'steembtc', 'icxusdt', 'tfuelbtc', 'chzbtc',
'vibeth', 'winusdc', 'gtobtc', 'linkusdc', 'batbusd', 'rdnbtc', 'dataeth', 'bttpax', 'zrxbnb', 'vibbtc',
'neobnb', 'cosbtc', 'powreth', 'rlcusdt', 'hbarbnb', 'wabieth', 'bqxeth', 'aionbtc', 'aeeth', 'mthbtc',
'wrxbtc', 'pptbtc', 'nknbtc', 'zecusdt', 'stormeth', 'qtumusdt']
channels = ['kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w', 'trade',
'miniTicker', 'depth20']
for channel in channels:
binance_websocket_api_manager.create_stream(channel, markets, stream_buffer_name=channel)
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
print("print trades only")
time.sleep(10)
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer("trade")
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
try:
print(oldest_stream_data_from_stream_buffer)
except Exception:
# not able to process the data? write it back to the stream_buffer
binance_websocket_api_manager.add_to_stream_buffer(oldest_stream_data_from_stream_buffer)
# start a worker process to process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
|
client.py
|
import pygame
from game import Game
import constants as c
import socket
import time
import pickle
from sprite import Sprite
import threading
def network_data_handle():
""" The method handles the in/out data when playing over a network or internet
It is used as a thread to decouple the pygame loop from the data handling
so the conection won't affect the fps of the game
"""
global client, game
while not game.done:
try:
data = client.recv(2048)
sprites_dict_data, death_info, message, scoreboard_data = pickle.loads(data)
if message:
game.message = message
for key, sprites in sprites_dict_data.items():
for sprite in sprites:
if sprite.is_player:
sprite.image = game.player_image
else:
sprite.image = game.projectile_image
if death_info[0] and death_info[1]:
game.posX, game.posY = death_info
game.sprites = sprites_dict_data.copy()
game.scoreboard_data = scoreboard_data.copy()
except Exception as e:
print(e)
try:
projectile_data = 0
if game.shoot:
game.shoot = False
projectile_data = Sprite(game.posX, game.posY, game.dirX, game.dirY, 0, 0.2)
send_data = pickle.dumps((game.posX, game.posY, projectile_data))
client.send(send_data)
except Exception as e:
print(e)
time.sleep(0.001)
client.close()
pygame.init()
screen = pygame.display.set_mode((c.SCREEN_WIDTH, c.SCREEN_HEIGHT))
clock = pygame.time.Clock()
game = Game()
# Hide mouse pointer and set grab as True
# This enables pygame relative virtual mouse movement used on mouse input camera movement
pygame.mouse.set_visible(False)
pygame.event.set_grab(True)
# Read txt config file to check if game is local or over network/internet
f = open("ip_port_to_connect.txt", "r")
CONDITION, PLAYER_NAME, IP, PORT = f.read().splitlines()
print("1: {} 2: {} 3: {} 4: {}".format(CONDITION, PLAYER_NAME, IP, PORT))
if CONDITION == "YES" or CONDITION == "Yes" or CONDITION == "yes":
game.is_connected = True
print("Connected? {}".format(game.is_connected))
PORT = int(PORT)
f.close()
# Case connected, create conection with server, send name and receive client ID
if game.is_connected:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = (IP, PORT)
client.connect(addr)
client.send(str.encode(PLAYER_NAME))
val = client.recv(8)
print("Received id: {}".format(val.decode()))
game.my_id = int(val)
t = threading.Thread(target=network_data_handle)
t.start()
# Main loop
while not game.done:
events = pygame.event.get()
game.draw(screen)
game.input_handle()
pygame.display.flip()
clock.tick()
pygame.quit()
|
simulation_master_heat_datamanager.py
|
import HEAT as heat
import elinkmanager
from datamanager import DataManager
from counterdown import CounterDown
import threading
from logger import InfoLogger, DataLogger, AdcsLogger
from time import sleep
import sys
#import RPi.GPIO as GPIO
import json
import Paths as paths
import Pins as pins
class Master:
__instance = None
def __init__(self, ground_ip):
self.status_vector = dict()
self.command_vector = dict()
self.ground_ip = ground_ip
self.info_logger = InfoLogger()
self.data_logger = DataLogger()
self.adcs_logger = AdcsLogger()
self.elink = elinkmanager.ELinkManager(self,self.ground_ip)
self.thread_elink = None
self.data_manager = DataManager(self, self.info_logger, self.data_logger)
self.thread_data_manager = None
self.heat = heat.HEAT(self)
self.thread_heat = None
self.counterdown = CounterDown(self)
self.paths = paths.Paths()
self.pin_powerB = pins.Pins().pin_powerB # @TODO change it in boot/config.txt
#GPIO.setmode(GPIO.BOARD)
#GPIO.setup(self.pin_powerB, GPIO.OUT)
Master.__instance = self
@staticmethod
def get_instance():
if Master.__instance is None:
Master()
return Master.__instance
def start(self):
self.init_experiment()
while not self.get_command('KILL'):
sleep(self.counterdown.master_time_runs)
if self.get_command('REBOOT_SLAVE'):
self.command_vector['REBOOT_SLAVE'] = 0
self.reboot_slave()
if self.get_command('REBOOT'):
pass
json.dump(self.status_vector, open(self.paths.file_status_vector, 'w'))
# kill threads
self.status_vector['KILL'] = 1
self.info_logger.write_warning('MASTER_ESRANGE: SHADE IS TERMINATED')
print('shade is terminated')
# @TODO RESTART SHADE n REBOOT
def init_experiment(self):
self.status_vector = json.load(open(self.paths.file_status_vector))
self.command_vector = json.load(open(self.paths.file_command_vector))
self.init_elink()
self.init_data_manager()
self.init_subsystems()
def init_elink(self):
self.thread_elink = threading.Thread(target=self.elink.start).start()
def init_data_manager(self):
pass
# self.thread_data_manager = threading.Thread(target=self.data_manager.start).start()
def init_subsystems(self):
self.thread_heat = threading.Thread(target=self.heat.start).start()
def get_command(self, command):
try:
return self.command_vector[command]
except:
return 0
def reboot_slave(self):
pass
#power off and power on the other ras
#GPIO.output(self.pin_powerB, GPIO.LOW)
#GPIO.output(self.pin_powerB, GPIO.HIGH)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("""
[+] Run master program with one argument.
[+] The argument indicates the ground IP
[+] e.g python master_esrange.py 195.168.0.1
[+] For Testing purposes use 'local' as argument
[+] to simulate a connection locally
[+] e.g python master_esrange.py local
""")
else:
print("""
This is a program to test only heating control.
Use commands:
[+] HEAT_SLEEP #to force close the heating control
[+] HEAT_AWAKE #to recall auto mode of heating control
[+] KILL #to kill program
Choose where to collect data (random or from data_manager) via the HEAT class
- threaded_function_data()
""")
ground_ip = sys.argv[1]
Master(ground_ip).start()
|
exporter.py
|
#!/usr/bin/python
# vim: tabstop=4 expandtab shiftwidth=4
import argparse
import requests
import re
import time
import threading
from datetime import datetime
from os import environ
from . import lib
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
except ImportError:
# Python 3
unicode = str
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
parser = argparse.ArgumentParser(description='simple stellar-core Prometheus exporter/scraper')
parser.add_argument('--stellar-core-address', type=str,
help='Stellar core address. Defaults to STELLAR_CORE_ADDRESS environment '
'variable or if not set to http://127.0.0.1:11626',
default=environ.get('STELLAR_CORE_ADDRESS', 'http://127.0.0.1:11626'))
parser.add_argument('--port', type=int,
help='HTTP bind port. Defaults to PORT environment variable '
'or if not set to 9473',
default=int(environ.get('PORT', '9473')))
args = parser.parse_args()
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Copied from prometheus client_python
daemon_threads = True
class StellarCoreHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def get_labels(self):
try:
response = requests.get(self.info_url)
json = response.json()
build = json['info']['build']
network = json['info']['network']
except Exception:
return ['unknown', 'unknown', 'unknown', 'unknown', 'unknown']
match = self.build_regex.match(build)
build = re.sub('\s', '_', build).lower()
build = re.sub('\(|\)', '', build)
if not match:
return ['unknown', 'unknown', 'unknown', build, network]
labels = [
match.group(2),
match.group(3),
match.group(4),
build,
network,
]
return labels
def buckets_to_metrics(self, metric_name, buckets):
# Converts raw bucket metric into sorted list of buckets
unit = buckets['boundary_unit']
description = 'libmedida metric type: ' + buckets['type']
measurements = []
for bucket in buckets['buckets']:
measurements.append({
'boundary': lib.duration_to_seconds(bucket['boundary'], unit),
'count': bucket['count'],
'sum': bucket['sum']
}
)
count_value = 0
sum_value = 0
for m in sorted(measurements, key=lambda i: i['boundary']):
# Buckets from core contain only values from their respective ranges.
# Prometheus expects "le" buckets to be cummulative so we need some extra math
count_value += m['count']
sum_value += lib.duration_to_seconds(m['sum'], unit)
# Treat buckets larger than 30d as infinity
if float(m['boundary']) > 30 * 86400:
bucket = '+Inf'
else:
bucket = m['boundary']
self.registry.Histogram(metric_name, description,
bucket=bucket,
value=count_value,
)
self.registry.Summary(metric_name, description,
count_value=count_value,
sum_value=sum_value,
)
def set_vars(self):
self.info_url = args.stellar_core_address + '/info'
self.metrics_url = args.stellar_core_address + '/metrics'
self.cursors_url = args.stellar_core_address + '/getcursor'
self.info_keys = ['ledger', 'network', 'peers', 'protocol_version', 'quorum', 'startedOn', 'state']
self.state_metrics = ['booting', 'joining scp', 'connected', 'catching up', 'synced', 'stopping']
self.ledger_metrics = {'age': 'age', 'baseFee': 'base_fee', 'baseReserve': 'base_reserve',
'closeTime': 'close_time', 'maxTxSetSize': 'max_tx_set_size',
'num': 'num', 'version': 'version'}
self.quorum_metrics = ['agree', 'delayed', 'disagree', 'fail_at', 'missing']
self.quorum_phase_metrics = ['unknown', 'prepare', 'confirm', 'externalize']
# Examples:
# "stellar-core 11.1.0-unstablerc2 (324c1bd61b0e9bada63e0d696d799421b00a7950)"
# "stellar-core 11.1.0 (324c1bd61b0e9bada63e0d696d799421b00a7950)"
# "v11.1.0"
self.build_regex = re.compile('(stellar-core|v) ?(\d+)\.(\d+)\.(\d+).*$')
self.label_names = ["ver_major", "ver_minor", "ver_patch", "build", "network"]
self.labels = self.get_labels()
self.registry = lib.Registry(default_labels=tuple(zip(self.label_names, self.labels)))
self.content_type = str('text/plain; version=0.0.4; charset=utf-8')
def error(self, code, msg):
self.send_response(code)
self.send_header('Content-Type', self.content_type)
self.end_headers()
self.wfile.write('{}\n'.format(msg).encode('utf-8'))
def do_GET(self):
self.set_vars()
###########################################
# Export metrics from the /metrics endpoint
###########################################
try:
response = requests.get(self.metrics_url)
except requests.ConnectionError:
self.error(504, 'Error retrieving data from {}'.format(self.metrics_url))
return
if not response.ok:
self.error(504, 'Error retrieving data from {}'.format(self.metrics_url))
return
try:
metrics = response.json()['metrics']
except ValueError:
self.error(500, 'Error parsing metrics JSON data')
return
# iterate over all metrics
for k in metrics:
metric_name = re.sub('\.|-|\s', '_', k).lower()
metric_name = 'stellar_core_' + metric_name
if metrics[k]['type'] == 'timer':
# we have a timer, expose as a Prometheus Summary
# we convert stellar-core time units to seconds, as per Prometheus best practices
metric_name = metric_name + '_seconds'
if 'sum' in metrics[k]:
# use libmedida sum value
total_duration = metrics[k]['sum']
else:
# compute sum value
total_duration = (metrics[k]['mean'] * metrics[k]['count'])
self.registry.Summary(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
count_value=metrics[k]['count'],
sum_value=lib.duration_to_seconds(total_duration, metrics[k]['duration_unit']),
)
# add stellar-core calculated quantiles to our summary
self.registry.Gauge(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
labels=tuple(zip(self.label_names+['quantile'], self.labels+[0.75])),
value=lib.duration_to_seconds(metrics[k]['75%'], metrics[k]['duration_unit']),
)
self.registry.Gauge(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
labels=tuple(zip(self.label_names+['quantile'], self.labels+[0.99])),
value=lib.duration_to_seconds(metrics[k]['99%'], metrics[k]['duration_unit']),
)
elif metrics[k]['type'] == 'histogram':
if 'count' not in metrics[k]:
# Stellar-core version too old, we don't have required data
continue
self.registry.Summary(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
count_value=metrics[k]['count'],
sum_value=metrics[k]['sum'],
)
# add stellar-core calculated quantiles to our summary
self.registry.Gauge(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
labels=tuple(zip(self.label_names+['quantile'], self.labels+[0.75])),
value=metrics[k]['75%'],
)
self.registry.Gauge(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
labels=tuple(zip(self.label_names+['quantile'], self.labels+[0.99])),
value=metrics[k]['99%'],
)
elif metrics[k]['type'] == 'counter':
# we have a counter, this is a Prometheus Gauge
self.registry.Gauge(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
value=metrics[k]['count']
)
elif metrics[k]['type'] == 'meter':
# we have a meter, this is a Prometheus Counter
self.registry.Counter(metric_name, 'libmedida metric type: ' + metrics[k]['type'],
value=metrics[k]['count']
)
elif metrics[k]['type'] == 'buckets':
# We have a bucket, this is a Prometheus Histogram
self.buckets_to_metrics(metric_name, metrics[k])
#######################################
# Export metrics from the info endpoint
#######################################
try:
response = requests.get(self.info_url)
except requests.ConnectionError:
self.error(504, 'Error retrieving data from {}'.format(self.info_url))
return
if not response.ok:
self.error(504, 'Error retrieving data from {}'.format(self.info_url))
return
try:
info = response.json()['info']
except ValueError:
self.error(500, 'Error parsing info JSON data')
return
if not all([i in info for i in self.info_keys]):
self.error(500, 'Error - info endpoint did not return all required fields')
return
# Ledger metrics
for core_name, prom_name in self.ledger_metrics.items():
self.registry.Gauge('stellar_core_ledger_{}'.format(prom_name),
'Stellar core ledger metric name: {}'.format(core_name),
value=info['ledger'][core_name],
)
# Version 11.2.0 and later report quorum metrics in the following format:
# "quorum" : {
# "qset" : {
# "agree": 3
#
# Older versions use this format:
# "quorum" : {
# "758110" : {
# "agree" : 3,
if 'qset' in info['quorum']:
tmp = info['quorum']['qset']
else:
tmp = info['quorum'].values()[0]
if not tmp:
self.error(500, 'Error - missing quorum data')
return
for metric in self.quorum_metrics:
self.registry.Gauge('stellar_core_quorum_{}'.format(metric),
'Stellar core quorum metric: {}'.format(metric),
tmp[metric]
)
for metric in self.quorum_phase_metrics:
if tmp['phase'].lower() == metric:
value = 1
else:
value = 0
self.registry.Gauge('stellar_core_quorum_phase_{}'.format(metric),
'Stellar core quorum phase {}'.format(metric),
value=value,
)
# Versions >=11.2.0 expose more info about quorum
if 'transitive' in info['quorum']:
if info['quorum']['transitive']['intersection']:
value = 1
else:
value = 0
self.registry.Gauge('stellar_core_quorum_transitive_intersection',
'Stellar core quorum transitive intersection',
value=value,
)
self.registry.Gauge('stellar_core_quorum_transitive_last_check_ledger',
'Stellar core quorum transitive last_check_ledger',
value=info['quorum']['transitive']['last_check_ledger'],
)
self.registry.Gauge('stellar_core_quorum_transitive_node_count',
'Stellar core quorum transitive node_count',
value=info['quorum']['transitive']['node_count'],
)
# Versions >=11.3.0 expose "critical" key
if 'critical' in info['quorum']['transitive']:
if info['quorum']['transitive']['critical']:
for peer_list in info['quorum']['transitive']['critical']:
critical_peers = ','.join(sorted(peer_list)) # label value is comma separated listof peers
self.registry.Gauge('stellar_core_quorum_transitive_critical',
'Stellar core quorum transitive critical',
labels=tuple(zip(self.label_names+['critical_validators'],
self.labels+[critical_peers])),
value=1,
)
else:
self.registry.Gauge('stellar_core_quorum_transitive_critical',
'Stellar core quorum transitive critical',
labels=tuple(zip(self.label_names+['critical_validators'], self.labels+['null'])),
value=0,
)
# Peers metrics
self.registry.Gauge('stellar_core_peers_authenticated_count',
'Stellar core authenticated_count count',
value=info['peers']['authenticated_count'],
)
self.registry.Gauge('stellar_core_peers_pending_count',
'Stellar core pending_count count',
value=info['peers']['pending_count'],
)
self.registry.Gauge('stellar_core_protocol_version',
'Stellar core protocol_version',
value=info['protocol_version'],
)
for metric in self.state_metrics:
name = re.sub('\s', '_', metric)
if info['state'].lower().startswith(metric): # Use startswith to work around "!"
value = 1
else:
value = 0
self.registry.Gauge('stellar_core_{}'.format(name),
'Stellar core state {}'.format(metric),
value=value,
)
date = datetime.strptime(info['startedOn'], "%Y-%m-%dT%H:%M:%SZ")
self.registry.Gauge('stellar_core_started_on', 'Stellar core start time in epoch',
value=int(date.strftime('%s')),
)
#######################################
# Export cursor metrics
#######################################
try:
response = requests.get(self.cursors_url)
except requests.ConnectionError:
self.error(504, 'Error retrieving data from {}'.format(self.cursors_url))
return
# Some server modes we want to scrape do not support 'getcursors' command at all.
# These just respond with a 404 and the non-json informative unknown-commands output.
if not response.ok and response.status_code != 404:
self.error(504, 'Error retrieving data from {}'.format(self.cursors_url))
return
if "Supported HTTP commands" not in str(response.content):
try:
cursors = response.json()['cursors']
except ValueError:
self.error(500, 'Error parsing cursor JSON data')
return
for cursor in cursors:
if not cursor:
continue
cursor_name = cursor.get('id').strip()
self.registry.Gauge('stellar_core_active_cursors',
'Stellar core active cursors',
labels=tuple(zip(self.label_names+['cursor_name'], self.labels+[cursor_name])),
value=cursor['cursor'],
)
#######################################
# Render output
#######################################
output = self.registry.render()
if not output:
self.error(500, 'Error - no metrics were genereated')
return
self.send_response(200)
self.send_header('Content-Type', self.content_type)
self.end_headers()
self.wfile.write(output)
def main():
httpd = _ThreadingSimpleServer(("", args.port), StellarCoreHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
while True:
time.sleep(1)
if __name__ == "__main__":
main()
|
milvus_bootcamp.py
|
# -*- coding: UTF-8 -*-
import datetime
import time
import os
import sys, getopt
import random
from collections import defaultdict
import numpy as np
from milvus import Milvus, Prepare, IndexType, Status
from multiprocessing import Process
from functools import reduce
import struct
import psycopg2
from enum import Enum
MILVUS = Milvus()
SERVER_ADDR = "0.0.0.0"
SERVER_PORT = 19530
TABLE_DIMENSION = 128
FILE_PREFIX = "binary_"
INSERT_BATCH = 10000
FILE_GT = 'ground_truth_all'
FILE_GT_T = 'ground_truth.txt'
file_index = 0
A_results = 'accuracy_results'
P_results = 'performance_results'
NQ = 0
TOPK = 0
ALL = False
host="127.0.0.1"
port=5432
user="zilliz_support"
password="zilliz123"
database="postgres"
FOLDER_NAME ='/data/lcl/ann/100_ann_test/bvecs_data'
PG_FLAG = False
nq_scope = [1,50,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800]
topk_scope = [1,20,50,100,300,500,800,1000]
# get vectors of the files
def load_nq_vec(nq):
file_query = 'query.npy'
data = np.load(file_query)
vectors = data.tolist()
vec_list = []
for i in range(nq):
vec_list.append(vectors[i])
return vec_list
# load vectors from filr_name and num means nq's number
def load_vec_list(file_name,num=0):
data = np.load(file_name)
vec_list = []
nb = len(data)
if(num!=0):
for i in range(num):
vec_list.append(data[i].tolist())
return vec_list
for i in range(nb):
vec_list.append(data[i].tolist())
return vec_list
# Calculate the Euclidean distance
def calEuclideanDistance(vec1,vec2):
vec1 = np.array(vec1)
vec2 = np.array(vec2)
dist = np.sqrt(np.sum(np.square(vec1 - vec2)))
return dist
# generate the ground truth file
def get_ground_truth(nq, topk ,idx,vct_nq,nb):
filenames = os.listdir(FOLDER_NAME) # 得到文件夹下的所有文件名称
filenames.sort()
no_dist = {}
re = []
k = 0
for filename in filenames:
vet_list = load_vec_list_from_file(FOLDER_NAME+'/'+filename)
for j in range(len(vet_list)):
dist = calEuclideanDistance(vct_nq,vet_list[j])
j += k*nb
if(j<topk):
no_dist[j] = dist
else:
#sorted by values
max_key = max(no_dist,key=no_dist.get)
max_value = no_dist[max_key]
if(dist < max_value):
m = no_dist.pop(max_key)
no_dist[j] = dist
k = k+1
no_dist = sorted(no_dist.items(), key=lambda x: x[1])
for n in no_dist:
num = "%03d%06d\n" % (n[0]//nb,n[0]%nb)
re.append(num)
save_gt_file(re,idx)
def get_ground_truth_txt(file):
filenames = os.listdir(FILE_GT)
filenames.sort()
write_file = open(file,'w+')
for f in filenames:
f = './'+FILE_GT+'/'+f
for line in open(f,'r'):
write_file.write(line)
def ground_truth_process(nq=NQ,topk=TOPK):
import os
try:
os.mkdir('./'+ FILE_GT)
except:
print('There already exits folder named', FOLDER_NAME,'!')
else:
vectors = load_nq_vec(nq)
filenames = os.listdir(FOLDER_NAME) # 得到文件夹下的所有文件名称
filenames.sort()
vet_nb = load_vec_list_from_file(FOLDER_NAME+'/'+filenames[0])
nb = len(vet_nb)
processes = []
process_num = 2
loops = nq // process_num
time_start = time.time()
for loop in range(loops):
base = loop * process_num
print('base:',base)
for i in range(process_num):
print('nq_index:', base+i)
# seed = np.random.RandomState(base+i)
process = Process(target=get_ground_truth, args=(nq, topk, base+i ,vectors[base+i],nb))
processes.append(process)
process.start()
for p in processes:
p.join()
time_end = time.time()
time_cost = time_end - time_start
get_ground_truth_txt(FILE_GT_T)
print("time = ",round(time_cost,6),"\nGet the ground truth successfully!")
# save the id to the file
def save_gt_file(results,idx):
s = "%05d" % idx
fname = './'+FILE_GT+'/'+ s + 'ground_truth.txt'
with open(fname,'a+') as f:
for re in results:
f.write(re)
f.write('\n')
# connect to the milvus server
def connect_server():
print("connect to milvus")
status = MILVUS.connect(host=SERVER_ADDR, port=SERVER_PORT,timeout = 1000 * 1000 * 20 )
# handle_status(status=status)
return status
def connect_postgres(ids_idmap):
conn = psycopg2.connect(host=host,port=port,user=user,password=password,database=database)
cur = conn.cursor()
sql = "select idoffset from idmap_ann_100m where ids=" + str(ids_idmap)
cur.execute(sql)
rows=cur.fetchall()
for row in rows:
location=str(row[0])
conn.close()
return location
# save date(ids,maps) pg
def save_id_to_file_pg(results,table_name,gt = False):
filename_id = table_name+"_idmap.txt"
if gt == True:
filename = table_name + '_gt_output.txt'
else:
filename = table_name + '_search_output.txt'
with open(filename,'w') as f:
for r in results:
for score in r:
index = None
index = connect_postgres(score.id)
if index != None:
f.write(index + '\n')
f.write('\n')
# save date(ids,maps) get_id
def save_id_to_file_txt(results,table_name,gt = False):
filename_id = table_name+"_idmap.txt"
if gt == True:
filename = table_name + '_gt_output.txt'
else:
filename = table_name + '_search_output.txt'
with open(filename,'w') as f:
for r in results:
for score in r:
index = None
linen = str(score.id)
output = os.popen('./get_id.sh'+ ' '+linen +' '+ filename_id)
index = output.read()
if index != None:
f.write(index)
index = None
f.write('\n')
# get the recall and write the results to file
def compare(table_name,results,nq,topk,rand,time_cost,topk_ground_truth,all_out=ALL):
filename = table_name + '_gt_output.txt'
num=[]
for line in open(filename):
if line != "\n":
line=line.strip()
num.append(line)
com_vec=[]
for line in open('ground_truth.txt'):
if line != "\n":
line=line.strip()
com_vec.append(line)
accuracy, accuracy_all=compare_correct(nq,topk,num,com_vec,rand,topk_ground_truth)
if all_out == True:
result_output_all(nq,topk,com_vec,rand,num,accuracy,time_cost,results,topk_ground_truth,accuracy_all)
else:
result_output(nq,topk,com_vec,rand,num,accuracy,time_cost,results,accuracy_all)
# get the recall
def compare_correct(nq,topk,num,com_vec,rand,topk_ground_truth):
correct=[]
correct_all = 0
i=0
while i<nq:
j=0
count=0
results = []
ground_truth = []
while j<topk:
# if num[i*topk+j] == com_vec[rand[i]*topk_ground_truth+j]:
# count=count+1
results.append(num[i*topk+j])
ground_truth.append(com_vec[rand[i]*topk_ground_truth+j])
j=j+1
union = list(set(results).intersection(set(ground_truth)))
count = len(union)
correct.append(count/topk)
correct_all += count
i=i+1
correct_all = correct_all/nq/topk
return correct,correct_all
# output the whole results
def result_output_all(nq,topk,com_vec,rand,num,accuracy,time_cost,results,topk_ground_truth,accuracy_all):
filename = str(nq)+"_"+str(topk) + '_result_all.csv'
count=0
with open(filename,'w') as f:
f.write('topk,远程ID,基准ID,搜索结果,distance,time,recall' + '\n')
i=0
while i<nq:
j=0
while j<topk:
line=str(topk) + ',' + str(com_vec[rand[i]*topk_ground_truth]) + ',' + str(com_vec[rand[i]*topk_ground_truth+j]) + ',' + str(num[i*topk+j]) + ',' + str(round(results[i][j].distance,3)) + ',' + str(round(time_cost/nq,5)) + ',' + str(accuracy[i]*100) + '%' + '\n'
f.write(line)
j=j+1
i=i+1
f.write('total accuracy,'+str(accuracy_all*100)+'%')
f.close
# out put the results
def result_output(nq,topk,com_vec,rand,num,accuracy,time_cost,results,accuracy_all):
if not os.path.exists(A_results):
os.mkdir(A_results)
filename = './' + A_results + '/' + str(nq)+"_"+str(topk) + '_result.csv'
count=0
with open(filename,'w') as f:
f.write('nq,topk,total_time,avg_time,recall' + '\n')
i=0
while i<nq:
line=str(i+1) + ','+ str(topk) + ',' + str(round(time_cost,4))+ ',' + str(round(time_cost/nq,5))+ ',' + str(accuracy[i]*100) + '%' + '\n'
f.write(line)
i=i+1
f.write('avarage accuracy:'+str(accuracy_all*100)+'%'+'\n'+'max accuracy:'+str(max(accuracy)*100)+'%'+'\n'+'min accuracy:'+str(min(accuracy)*100)+'%')
f.close
# get the nq_ground_truth and topk_ground_truth
def get_nq_topk(filename):
nq = 0
topk = 0
for line in open(filename):
if line == "\n":
nq += 1
elif nq<1:
topk += 1
return nq,topk
# -s
# search the vectors from milvus and write the results
def search_vec_list(table_name,nq=0,topk=0,all_out=ALL):
query_list = []
if nq!=0 and topk!=0:
if NQ==0 and TOPK==0:
nq_ground_truth,topk_ground_truth = get_nq_topk('ground_truth.txt')
else:
nq_ground_truth = NQ
topk_ground_truth = TOPK
# print(nq_ground_truth)
vectors = load_nq_vec(nq_ground_truth)
rand = sorted(random.sample(range(0,nq_ground_truth),nq))
for i in rand:
query_list.append(vectors[i])
print("searching table name:", table_name, "\nnum of query list:", len(query_list), "top_k:", topk)
time_start = time.time()
status, results = MILVUS.search_vectors(table_name=table_name, query_records=query_list, top_k=topk)
time_end = time.time()
time_cost=time_end - time_start
print("time_search=", time_end - time_start)
time_start = time.time()
if PG_FLAG:
save_id_to_file_pg(results, table_name, gt=True)
else:
save_id_to_file_txt(results, table_name, gt=True)
time_end = time.time()
time_cost=time_end - time_start
print("time_save=", time_end - time_start)
compare(table_name,results,nq,topk,rand,time_cost,topk_ground_truth,all_out)
else:
random1 = nowTime=datetime.datetime.now().strftime("%m%d%H%M")
if not os.path.exists(P_results):
os.mkdir(P_results)
filename = './' + P_results + '/' + str(random1) + '_results.csv'
file = open(filename,"w+")
file.write('nq,topk,total_time,avg_time' + '\n')
for nq in nq_scope:
query_list = load_nq_vec(nq)
print(len(query_list))
for k in topk_scope:
time_start = time.time()
status, results = MILVUS.search_vectors(table_name=table_name, query_records=query_list, top_k=k)
time_end = time.time()
time_cost = time_end - time_start
line=str(nq) + ',' + str(k) + ',' + str(round(time_cost,4)) + ',' + str(round(time_cost/nq,4)) + '\n'
file.write(line)
print(nq, k, time_cost)
file.write('\n')
file.close()
print("search_vec_list done !")
# -b
def search_binary_vec_list(table_name,nq,k):
query_list = load_nq_vec(nq)
time_start = time.time()
status, results = MILVUS.search_vectors(table_name=table_name, query_records=query_list, top_k=k)
time_end = time.time()
if PG_FLAG:
save_id_to_file_pg(results, table_name, gt=True)
else:
save_id_to_file_txt(results, table_name, gt=True)
print(k, nq, 'time = ',time_end - time_start)
print("search_binary_vec_list done !")
# -p
def compare_binary(file01, file02):
file1 = file01 + '_search_output.txt'
file2 = file02 + '_search_output.txt'
files = [file1, file2]
list1 = []
list2 = []
print('begin to compare')
fname = file01 + "_"+ file02 + '_result.csv'
for filename in files:
with open(filename,'r') as f:
ids = []
for line in f.readlines():
line = line.split('\n')[0]
# print(line)
if (len(line) == 0) or (line == '####') or (line==None):
if filename == file1:
list1.append(ids)
else:
list2.append(ids)
ids = []
else:
ids.append(line)
res = []
match_total = 0
with open(fname,'w') as f:
f.write('nq,topk,recall' + '\n')
for nq in range(len(list1)):
union = [i for i in list1[nq] if i in list2[nq]]
line=str(nq) + ','+ str(len(list1[0])) + ','+ str(len(union)/len(list1[0]) * 100) + '%' + '\n'
f.write(line)
match_total += len(union)
overall_acc =match_total / len(list1[0]) / len(list1)
f.write('overall_acc,'+str(overall_acc * 100)+'%')
print('overall acc =', overall_acc * 100, '%')
# get the vectors files
def gen_vec_list(nb, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, TABLE_DIMENSION).astype("float32")
vec_list = xb.tolist()
for i in range(len(vec_list)):
vec = vec_list[i]
square_sum = reduce(lambda x,y:x+y, map(lambda x:x*x ,vec))
sqrt_square_sum = np.sqrt(square_sum)
coef = 1/sqrt_square_sum
vec = list(map(lambda x:x*coef, vec))
vec_list[i] = vec
return vec_list
# define the file name
def gen_file_name(idx):
s = "%05d" % idx
fname = FILE_PREFIX + str(TABLE_DIMENSION) + "d_" + s
fname = './'+FOLDER_NAME+'/'+fname
return fname
# save the list
def save_vec_list_to_file(nb, idx, seed):
time_start = time.time()
vec_list = gen_vec_list(nb, seed)
fname = gen_file_name(idx)
np.save(fname, vec_list)
time_end = time.time()
print("generate file:", fname, " time cost:", time_end - time_start)
# -g
def generate_files(nfiles, nrows):
# mkdir
import os
try:
os.mkdir('./'+FOLDER_NAME)
except:
print('There already exits folder named', FOLDER_NAME)
else:
processes = []
process_num = 1
loops = nfiles // process_num
for loop in range(loops):
base = loop * process_num
# print('base:',base)
for i in range(process_num):
# print('file_index:', base+i)
seed = np.random.RandomState(base+i)
process = Process(target=save_vec_list_to_file, args=(nrows, base + i, seed))
processes.append(process)
process.start()
for p in processes:
p.join()
# get the table's rows
def table_rows(table_name):
print(table_name,'has',MILVUS.get_table_row_count(table_name)[1],'rows')
def table_show():
print(MILVUS.show_tables()[1])
def has_table(table_name):
return MILVUS.has_table(table_name)
# load the whole files if nb=0
def load_vec_list_from_file(file_name, nb = 0):
import numpy as np
data = np.load(file_name)
data = (data + 0.5) / 255
vec_list = []
if nb == 0:
nb = len(data)
for i in range(nb):
vec_list.append(data[i].tolist())
return vec_list
# add vectors to table_name with millvus
def add_vec_to_milvus(vec_list,table_name):
time_start = time.time()
batch_begine = 0
batch_end = INSERT_BATCH*TABLE_DIMENSION
while(True):
if batch_begine >= len(vec_list):
break
if batch_end > len(vec_list):
batch_end = len(vec_list)
batch_vectors = vec_list[batch_begine:batch_end]
vectors = batch_vectors
status, ids = MILVUS.add_vectors(table_name=table_name, records=vectors)
record_id_vecid(ids,table_name = table_name)
handle_status(status=status)
batch_end += INSERT_BATCH*TABLE_DIMENSION
batch_begine += INSERT_BATCH*TABLE_DIMENSION
time_end = time.time()
print("insert vectors:", len(vec_list), " time cost:", time_end - time_start)
# wrecord the idmap
def record_id_vecid(ids,table_name):
global file_index
filename = table_name+'_idmap.txt'
with open(filename,'a') as f:
for i in range(len(ids)):
line = str(ids[i]) + " %03d%06d\n" % (file_index,i)
f.write(line)
file_index += 1
# the status of milvus
def handle_status(status):
if status.code != Status.SUCCESS:
print(status)
sys.exit(2)
# add the vets to milvus
def add_somefiles_vec_to_milvus(nfiles = 0, table_name= ''):
import os
filenames = os.listdir(FOLDER_NAME) # 得到文件夹下的所有文件名称
filenames.sort(key=lambda x: int(x.split('.')[0][-5:]))
if nfiles > 0 and nfiles < len(filenames):
filenames = filenames[:nfiles]
for filename in filenames:
vec_list = load_vec_list_from_file(FOLDER_NAME+'/'+filename)
add_vec_to_milvus(vec_list,table_name)
#-t
# create the table with milvus
def create_table(table_name, index_type):
if(index_type == 'flat'):
tt = IndexType.FLAT
elif(index_type == 'ivf'):
tt = IndexType.IVFLAT
elif(index_type == 'ivfsq8'):
tt = IndexType.IVF_SQ8
param = {'table_name':table_name, 'dimension':TABLE_DIMENSION, 'index_type':tt, 'store_raw_vector':False}
print("create table: ", table_name, " dimension:", TABLE_DIMENSION," index_type:",tt)
return MILVUS.create_table(param)
# delete the table with milvus
def delete_table(table_name ):
print("delete table:", table_name)
import os
return MILVUS.delete_table(table_name= table_name)
def build_index(table_name):
print("build index with table:", table_name)
return MILVUS.build_index(table_name)
def main():
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hlsgtan:m:q:k:bdp",
["help", "list", "search","generate","transform","delete","nb=","ivf=","flat=","table=","num=","nq=","topk=","index=","rows","show","compare","add","build"],
)
except getopt.GetoptError:
print("Usage: test.py -q <nq> -k <topk> -t <table> -l -s")
sys.exit(2)
num = None
all_out = False
nq = 0
topk = 0
for opt_name, opt_value in opts:
if opt_name in ("-h", "--help"):
print("test.py -q <nq> -k <topk> -t <table> -l -s")
sys.exit()
elif opt_name == "--table":
table_name = opt_value
elif opt_name in ("-q", "--nq"):
nq = int(opt_value)
elif opt_name in ("-k", "--topk"):
topk = int(opt_value)
elif opt_name in ("-n", "--nb"):
nb = int(opt_value)
elif opt_name in ("-m", "--num"):
num = int(opt_value)
elif opt_name in ("-a", "--all"):
all_out = True
elif opt_name in ("-g", "--generate"): #test.py -m <num> -n <nb> --g
generate_files(num, nb)
elif opt_name == "--ivf":
ivf_table_name = opt_value
elif opt_name == "--flat":
flat_table_name = opt_value
elif opt_name == "--index":
indextype = opt_value
elif opt_name in ("-t", "--transfer"): #test.py -m <num> --table <tablename> --index <index> -t
connect_server()
if num == None:
num = 0
create_table(table_name,indextype)
add_somefiles_vec_to_milvus(nfiles=num, table_name=table_name)
elif opt_name == "--add": #test.py -m <num> --table <tablename> -add
connect_server()
if num == None:
num = 0
if has_table(table_name) == True:
add_somefiles_vec_to_milvus(nfiles=num, table_name=table_name)
else:
print("please create the table first!")
elif opt_name == "--rows": #test.py --table <tablename> --rows
connect_server()
table_rows(table_name)
elif opt_name in ("-d", "--delete"): #test.py --table <tablename> -d
connect_server()
delete_table(table_name=table_name);
import os
if os.path.exists(table_name + '_idmap.txt'):
os.remove(table_name + '_idmap.txt')
elif opt_name in ("-l", "--list"): #test.py -q <nq> -k <topk> -l
ground_truth_process(nq,topk)
sys.exit()
elif opt_name == "-s":
connect_server()
search_vec_list(table_name,nq,topk,all_out) #test.py --table <tablename> -q <nq> -k <topk> [-a] -s
sys.exit()
elif opt_name == "-b":
connect_server()
search_binary_vec_list(table_name,nq,topk) #test.py --table <tablename> -q <nq> -k <topk> -b
elif opt_name in ("-p","--compare"):
compare_binary(ivf_table_name, flat_table_name) #test.py --ivf <ivf_tbname> --flat <flat_tbname> -p
elif opt_name == "--show":
connect_server() #test.py --show
table_show()
elif opt_name == "--build":
connect_server() #test.py --table <table_name> --build
build_index(table_name)
if __name__ == '__main__':
main()
|
connection.py
|
import sched
from threading import Thread
from collections import defaultdict
import logging
import time
import json
from liquidtap import websocket
class Connection(Thread):
def __init__(self, event_handler, url, reconnect_handler=None, log_level=None,
daemon=True, reconnect_interval=10, socket_kwargs=None, **thread_kwargs):
self.event_handler = event_handler
self.url = url
self.reconnect_handler = reconnect_handler or (lambda: None)
self.socket = None
self.socket_id = ""
self.event_callbacks = defaultdict(list)
self.disconnect_called = False
self.needs_reconnect = False
self.default_reconnect_interval = reconnect_interval
self.reconnect_interval = reconnect_interval
self.socket_kwargs = socket_kwargs or dict()
self.pong_timer = None
self.pong_received = False
self.pong_timeout = 30
self.bind("pusher:connection_established", self._connect_handler)
self.bind("pusher:connection_failed", self._failed_handler)
self.bind("pusher:pong", self._pong_handler)
self.bind("pusher:ping", self._ping_handler)
self.bind("pusher:error", self._pusher_error_handler)
self.state = "initialized"
self.logger = logging.getLogger(self.__module__) # create a new logger
if log_level:
self.logger.setLevel(log_level)
if log_level == logging.DEBUG:
websocket.enableTrace(True)
# From Martyn's comment at:
# https://pusher.tenderapp.com/discussions/problems/36-no-messages-received-after-1-idle-minute-heartbeat
# "We send a ping every 5 minutes in an attempt to keep connections
# alive..."
# This is why we set the connection timeout to 5 minutes, since we can
# expect a pusher heartbeat message every 5 minutes. Adding 5 sec to
# account for small timing delays which may cause messages to not be
# received in exact 5 minute intervals.
self.connection_timeout = 305
self.connection_timer = None
self.ping_interval = 120
self.ping_timer = None
self.timeout_scheduler = sched.scheduler(
time.time,
sleep_max_n(min([self.pong_timeout, self.connection_timeout, self.ping_interval]))
)
self.timeout_scheduler_thread = None
Thread.__init__(self, **thread_kwargs)
self.daemon = daemon
self.name = "PysherEventLoop"
def bind(self, event_name, callback, *args, **kwargs):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
self.event_callbacks[event_name].append((callback, args, kwargs))
def disconnect(self, timeout=None):
self.needs_reconnect = False
self.disconnect_called = True
if self.socket:
self.socket.close()
self.join(timeout)
def reconnect(self, reconnect_interval=None):
if reconnect_interval is None:
reconnect_interval = self.default_reconnect_interval
self.logger.info("Connection: Reconnect in %s" % reconnect_interval)
self.reconnect_interval = reconnect_interval
self.needs_reconnect = True
if self.socket:
self.socket.close()
def run(self):
self._connect()
def _connect(self):
self.state = "connecting"
self.socket = websocket.WebSocketApp(
self.url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close
)
self.socket.run_forever(**self.socket_kwargs)
while self.needs_reconnect and not self.disconnect_called:
self.logger.info("Attempting to connect again in %s seconds."
% self.reconnect_interval)
self.state = "unavailable"
time.sleep(self.reconnect_interval)
# We need to set this flag since closing the socket will set it to
# false
self.socket.keep_running = True
self.socket.run_forever(**self.socket_kwargs)
def _on_open(self):
self.logger.info("Connection: Connection opened")
# Send a ping right away to inform that the connection is alive. If you
# don't do this, it takes the ping interval to subcribe to channel and
# events
self.send_ping()
self._start_timers()
def _on_error(self, error):
self.logger.info("Connection: Error - %s" % error)
self.state = "failed"
self.needs_reconnect = True
def _on_message(self, message):
self.logger.info("Connection: Message - %s" % message)
# Stop our timeout timer, since we got some data
self._stop_timers()
params = self._parse(message)
if 'event' in params.keys():
if 'channel' not in params.keys():
# We've got a connection event. Lets handle it.
if params['event'] in self.event_callbacks.keys():
for func, args, kwargs in self.event_callbacks[params['event']]:
try:
func(params['data'], *args, **kwargs)
except Exception:
self.logger.exception("Callback raised unhandled")
else:
self.logger.info("Connection: Unhandled event")
else:
# We've got a channel event. Lets pass it up to the pusher
# so it can be handled by the appropriate channel.
self.event_handler(
params['event'],
params['data'],
params['channel']
)
# We've handled our data, so restart our connection timeout handler
self._start_timers()
def _on_close(self, *args):
self.logger.info("Connection: Connection closed")
self.state = "disconnected"
self._stop_timers()
@staticmethod
def _parse(message):
return json.loads(message)
def _stop_timers(self):
for event in self.timeout_scheduler.queue:
self._cancel_scheduler_event(event)
def _start_timers(self):
self._stop_timers()
self.ping_timer = self.timeout_scheduler.enter(self.ping_interval, 1, self.send_ping)
self.connection_timer = self.timeout_scheduler.enter(self.connection_timeout, 2, self._connection_timed_out)
if not self.timeout_scheduler_thread:
self.timeout_scheduler_thread = Thread(target=self.timeout_scheduler.run, daemon=True, name="PysherScheduler")
self.timeout_scheduler_thread.start()
elif not self.timeout_scheduler_thread.is_alive():
self.timeout_scheduler_thread = Thread(target=self.timeout_scheduler.run, daemon=True, name="PysherScheduler")
self.timeout_scheduler_thread.start()
def _cancel_scheduler_event(self, event):
try:
self.timeout_scheduler.cancel(event)
except ValueError:
self.logger.info('Connection: Scheduling event already cancelled')
def send_event(self, event_name, data, channel_name=None):
"""Send an event to the Pusher server.
:param str event_name:
:param Any data:
:param str channel_name:
"""
event = {'event': event_name, 'data': data}
if channel_name:
event['channel'] = channel_name
self.logger.info("Connection: Sending event - %s" % json.dumps(event))
try:
self.socket.send(json.dumps(event))
except Exception as e:
self.logger.error("Failed send event: %s" % e)
def send_ping(self):
self.logger.info("Connection: ping to pusher")
try:
self.socket.send(json.dumps({'event': 'pusher:ping', 'data': ''}))
except Exception as e:
self.logger.error("Failed send ping: %s" % e)
self.pong_timer = self.timeout_scheduler.enter(self.pong_timeout, 3, self._check_pong)
def send_pong(self):
self.logger.info("Connection: pong to pusher")
try:
self.socket.send(json.dumps({'event': 'pusher:pong', 'data': ''}))
except Exception as e:
self.logger.error("Failed send pong: %s" % e)
def _check_pong(self):
self._cancel_scheduler_event(self.pong_timer)
if self.pong_received:
self.pong_received = False
else:
self.logger.info("Did not receive pong in time. Will attempt to reconnect.")
self.state = "failed"
self.reconnect()
def _connect_handler(self, data):
parsed = data
if isinstance(data, str):
parsed = json.loads(data)
self.socket_id = parsed['socket_id']
self.state = "connected"
if self.needs_reconnect:
# Since we've opened a connection, we don't need to try to reconnect
self.needs_reconnect = False
self.reconnect_handler()
self.logger.debug('Connection: Establisheds reconnection')
else:
self.logger.debug('Connection: Establisheds first connection')
def _failed_handler(self, data):
self.state = "failed"
def _ping_handler(self, data):
self.send_pong()
# Restart our timers since we received something on the connection
self._start_timers()
def _pong_handler(self, data):
self.logger.info("Connection: pong from pusher")
self.pong_received = True
def _pusher_error_handler(self, data):
if 'code' in data:
try:
error_code = int(data['code'])
except:
error_code = None
if error_code is not None:
self.logger.error("Connection: Received error %s" % error_code)
if (error_code >= 4000) and (error_code <= 4099):
# The connection SHOULD NOT be re-established unchanged
self.logger.info("Connection: Error is unrecoverable. Disconnecting")
self.disconnect()
elif (error_code >= 4100) and (error_code <= 4199):
# The connection SHOULD be re-established after backing off
self.reconnect()
elif (error_code >= 4200) and (error_code <= 4299):
# The connection SHOULD be re-established immediately
self.reconnect(0)
else:
pass
else:
self.logger.error("Connection: Unknown error code")
else:
self.logger.error("Connection: No error code supplied")
def _connection_timed_out(self):
self.logger.info("Did not receive any data in time. Reconnecting.")
self.state = "failed"
self.reconnect()
def sleep_max_n(max_sleep_time):
def sleep(time_to_sleep):
time.sleep(min(max_sleep_time, time_to_sleep))
return sleep
|
profiling_executor.py
|
#!/usr/bin/python3
"""
profExecutor ensapsulates the complexity of leveraging the profiler tool
execution
"""
import os
import subprocess
import sys
import time
from threading import Thread
class profException(Exception):
pass
class profExecutor:
_NAME = "nvprof"
def __init__(self, nvprofpath=None):
self.nvprof = self._seeknvprof() if nvprofpath is None else nvprofpath
if self.nvprof is None:
raise profException('Profiler (%s) not found!' % (profExecutor._NAME))
def _isexecutable(self, path):
try:
profExecutor._execute([path])
except FileNotFoundError as e:
return False
return True
def _seeknvprof(self):
profiler = profExecutor._NAME
if( self._isexecutable(profiler) ):
return profiler
if 'CUDA_PATH' in os.environ:
cuda_install_path = os.environ['CUDA_PATH']
if( self._isexecutable(cuda_install_path+"/bin/"+profiler) ):
return cuda_install_path+"/bin/"+profiler
DEFAULT_CUDA_PATH_LIN = '/usr/local/cuda/'
if( self._isexecutable(DEFAULT_CUDA_PATH_LIN+profiler) ):
return DEFAULT_CUDA_PATH_LIN+profiler
DEFAULT_CUDA_PATH_WIN = '%CUDA_PATH%/bin/'
if( self._isexecutable(DEFAULT_CUDA_PATH_WIN+profiler) ):
return DEFAULT_CUDA_PATH_WIN+profiler
return None
def _progresswait(self, proc):
CHARS = '/|-\|'
counter = 0
while proc.poll() is None:
print("%c\b" % (CHARS[counter]), end='', flush=True)
time.sleep(0.1)
counter = (counter + 1) % len(CHARS)
def execute(self, arguments, message=None, device=None):
envvars = {}
if device is not None:
envvars['CUDA_VISIBLE_DEVICES'] = str(device)
proc = profExecutor._execute( [self.nvprof]+arguments, envvars )
(stdout, stderr) = (proc.stdout, proc.stderr)
if message is not None:
print("%s... " % (message), end='', flush=True)
wait_thread = Thread(target=profExecutor._progresswait, args=(self, proc))
wait_thread.start()
(output, errors) = proc.communicate()
wait_thread.join()
lines_out = output.splitlines()
lines_err = errors.splitlines()
if message is not None:
if proc.returncode==0:
print("Done")
else:
print("Error code: %d" % (proc.returncode))
raise profException("Profiling returned non zero error code. Profiler error output follows:\n%s" % (errors))
return (lines_out, lines_err)
def query_driver(self):
"""Query the running NVidia GPU driver via nvidia-smi tool."""
try:
proc = profExecutor._execute( ['nvidia-smi', '-q'] )
(stdout, stderr) = (proc.stdout, proc.stderr)
print("Querying GPU driver version (via nvidia-smi)... ", end='', flush=True)
wait_thread = Thread(target=profExecutor._progresswait, args=(self, proc))
wait_thread.start()
(output, _) = proc.communicate()
wait_thread.join()
lines_out = output.splitlines()
#lines_err = errors.splitlines()
if proc.returncode==0:
print("Done")
else:
print("Error code: %d" % (proc.returncode))
raise profException("Profiling returned non zero error code. Profiler error output follows:\n%s" % (errors))
ver_line = filter(lambda x: x.startswith('Driver Version'), lines_out)
return next(ver_line).split()[-1]
except Exception as e:
print("Warning: nvidia-smi {}".format(str(e)))
return '-';
@staticmethod
def _execute(arguments, envvars=None):
#nvprof --devices 0 --query-metrics
#print("DEBUG: executing:'%s'" % (' '.join(arguments)))
myenv = os.environ.copy()
if envvars is not None:
myenv.update(envvars)
#if device is not None:
# myenv["CUDA_VISIBLE_DEVICES"] = str(device)
proc = subprocess.Popen(arguments, env=myenv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
return proc
(proc_out, proc_err) = (proc.stdout, proc.stderr)
print('proc_out:',proc_out)
errors = proc_err.read()
print(errors)
if len(errors)>0:
print( 'Error: '+errors)
lines_out = proc_out.read()
if len(lines_out)>0:
print( 'stdout: '+lines_out)
|
test_oddball.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Oddball cases for testing coverage.py"""
import os.path
import re
import sys
from flaky import flaky
import pytest
import coverage
from coverage import env
from coverage.files import abs_file
from coverage.misc import import_local_file
from tests.coveragetest import CoverageTest
from tests import osinfo
class ThreadingTest(CoverageTest):
"""Tests of the threading support."""
def test_threading(self):
self.check_coverage("""\
import threading
def fromMainThread():
return "called from main thread"
def fromOtherThread():
return "called from other thread"
def neverCalled():
return "no one calls me"
other = threading.Thread(target=fromOtherThread)
other.start()
fromMainThread()
other.join()
""",
[1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15], "10")
def test_thread_run(self):
self.check_coverage("""\
import threading
class TestThread(threading.Thread):
def run(self):
self.a = 5
self.do_work()
self.a = 7
def do_work(self):
self.a = 10
thd = TestThread()
thd.start()
thd.join()
""",
[1, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14], "")
class RecursionTest(CoverageTest):
"""Check what happens when recursive code gets near limits."""
def test_short_recursion(self):
# We can definitely get close to 500 stack frames.
self.check_coverage("""\
def recur(n):
if n == 0:
return 0
else:
return recur(n-1)+1
recur(495) # We can get at least this many stack frames.
i = 8 # and this line will be traced
""",
[1, 2, 3, 5, 7, 8], "")
def test_long_recursion(self):
# We can't finish a very deep recursion, but we don't crash.
with pytest.raises(RuntimeError):
with pytest.warns(None):
self.check_coverage("""\
def recur(n):
if n == 0:
return 0
else:
return recur(n-1)+1
recur(100000) # This is definitely too many frames.
""",
[1, 2, 3, 5, 7], ""
)
def test_long_recursion_recovery(self):
# Test the core of bug 93: https://github.com/nedbat/coveragepy/issues/93
# When recovering from a stack overflow, the Python trace function is
# disabled, but the C trace function is not. So if we're using a
# Python trace function, we won't trace anything after the stack
# overflow, and there should be a warning about it. If we're using
# the C trace function, only line 3 will be missing, and all else
# will be traced.
self.make_file("recur.py", """\
def recur(n):
if n == 0:
return 0 # never hit
else:
return recur(n-1)+1
try:
recur(100000) # This is definitely too many frames.
except RuntimeError:
i = 10
i = 11
""")
cov = coverage.Coverage()
with pytest.warns(None):
self.start_import_stop(cov, "recur")
pytrace = (cov._collector.tracer_name() == "PyTracer")
expected_missing = [3]
if pytrace: # pragma: no metacov
expected_missing += [9, 10, 11]
_, statements, missing, _ = cov.analysis("recur.py")
assert statements == [1, 2, 3, 5, 7, 8, 9, 10, 11]
assert expected_missing == missing
# Get a warning about the stackoverflow effect on the tracing function.
if pytrace: # pragma: no metacov
assert len(cov._warnings) == 1
assert re.fullmatch(
r"Trace function changed, data is likely wrong: None != " +
r"<bound method PyTracer._trace of " +
"<PyTracer at 0x[0-9a-fA-F]+: 5 lines in 1 files>>",
cov._warnings[0],
)
else:
assert not cov._warnings
class MemoryLeakTest(CoverageTest):
"""Attempt the impossible: test that memory doesn't leak.
Note: this test is truly unusual, and has had a colorful history. See
for example: https://github.com/nedbat/coveragepy/issues/186
It may still fail occasionally, especially on PyPy.
"""
@flaky
@pytest.mark.skipif(env.JYTHON, reason="Don't bother on Jython")
@pytest.mark.skipif(not env.C_TRACER, reason="Only the C tracer has refcounting issues")
def test_for_leaks(self):
# Our original bad memory leak only happened on line numbers > 255, so
# make a code object with more lines than that. Ugly string mumbo
# jumbo to get 300 blank lines at the beginning..
code = """\
# blank line\n""" * 300 + """\
def once(x): # line 301
if x % 100 == 0:
raise Exception("100!")
elif x % 2:
return 10
else: # line 306
return 11
i = 0 # Portable loop without alloc'ing memory.
while i < ITERS:
try:
once(i)
except:
pass
i += 1 # line 315
"""
lines = list(range(301, 315))
lines.remove(306) # Line 306 is the "else".
# This is a non-deterministic test, so try it a few times, and fail it
# only if it predominantly fails.
fails = 0
for _ in range(10):
ram_0 = osinfo.process_ram()
self.check_coverage(code.replace("ITERS", "10"), lines, "")
ram_10 = osinfo.process_ram()
self.check_coverage(code.replace("ITERS", "10000"), lines, "")
ram_10k = osinfo.process_ram()
# Running the code 10k times shouldn't grow the ram much more than
# running it 10 times.
ram_growth = (ram_10k - ram_10) - (ram_10 - ram_0)
if ram_growth > 100000:
fails += 1 # pragma: only failure
if fails > 8:
pytest.fail("RAM grew by %d" % (ram_growth)) # pragma: only failure
class MemoryFumblingTest(CoverageTest):
"""Test that we properly manage the None refcount."""
@pytest.mark.skipif(not env.C_TRACER, reason="Only the C tracer has refcounting issues")
def test_dropping_none(self): # pragma: not covered
# TODO: Mark this so it will only be run sometimes.
pytest.skip("This is too expensive for now (30s)")
# Start and stop coverage thousands of times to flush out bad
# reference counting, maybe.
self.make_file("the_code.py", """\
import random
def f():
if random.random() > .5:
x = 1
else:
x = 2
""")
self.make_file("main.py", """\
import coverage
import sys
from the_code import f
for i in range(10000):
cov = coverage.Coverage(branch=True)
cov.start()
f()
cov.stop()
cov.erase()
print("Final None refcount: %d" % (sys.getrefcount(None)))
""")
status, out = self.run_command_status("python main.py")
assert status == 0
assert "Final None refcount" in out
assert "Fatal" not in out
@pytest.mark.skipif(env.JYTHON, reason="Pyexpat isn't a problem on Jython")
class PyexpatTest(CoverageTest):
"""Pyexpat screws up tracing. Make sure we've counter-defended properly."""
def test_pyexpat(self):
# pyexpat calls the trace function explicitly (inexplicably), and does
# it wrong for exceptions. Parsing a DOCTYPE for some reason throws
# an exception internally, and triggers its wrong behavior. This test
# checks that our fake PyTrace_RETURN hack in tracer.c works. It will
# also detect if the pyexpat bug is fixed unbeknownst to us, meaning
# we'd see two RETURNs where there should only be one.
self.make_file("trydom.py", """\
import xml.dom.minidom
XML = '''\\
<!DOCTYPE fooey SYSTEM "http://www.example.com/example.dtd">
<root><child/><child/></root>
'''
def foo():
dom = xml.dom.minidom.parseString(XML)
assert len(dom.getElementsByTagName('child')) == 2
a = 11
foo()
""")
self.make_file("outer.py", "\n"*100 + "import trydom\na = 102\n")
cov = coverage.Coverage()
cov.erase()
# Import the Python file, executing it.
self.start_import_stop(cov, "outer")
_, statements, missing, _ = cov.analysis("trydom.py")
assert statements == [1, 3, 8, 9, 10, 11, 13]
assert missing == []
_, statements, missing, _ = cov.analysis("outer.py")
assert statements == [101, 102]
assert missing == []
# Make sure pyexpat isn't recorded as a source file.
# https://github.com/nedbat/coveragepy/issues/419
files = cov.get_data().measured_files()
msg = f"Pyexpat.c is in the measured files!: {files!r}:"
assert not any(f.endswith("pyexpat.c") for f in files), msg
class ExceptionTest(CoverageTest):
"""I suspect different versions of Python deal with exceptions differently
in the trace function.
"""
def test_exception(self):
# Python 2.3's trace function doesn't get called with "return" if the
# scope is exiting due to an exception. This confounds our trace
# function which relies on scope announcements to track which files to
# trace.
#
# This test is designed to sniff this out. Each function in the call
# stack is in a different file, to try to trip up the tracer. Each
# file has active lines in a different range so we'll see if the lines
# get attributed to the wrong file.
self.make_file("oops.py", """\
def oops(args):
a = 2
raise Exception("oops")
a = 4
""")
self.make_file("fly.py", "\n"*100 + """\
def fly(calls):
a = 2
calls[0](calls[1:])
a = 4
""")
self.make_file("catch.py", "\n"*200 + """\
def catch(calls):
try:
a = 3
calls[0](calls[1:])
a = 5
except:
a = 7
""")
self.make_file("doit.py", "\n"*300 + """\
def doit(calls):
try:
calls[0](calls[1:])
except:
a = 5
""")
# Import all the modules before starting coverage, so the def lines
# won't be in all the results.
for mod in "oops fly catch doit".split():
import_local_file(mod)
# Each run nests the functions differently to get different
# combinations of catching exceptions and letting them fly.
runs = [
("doit fly oops", {
'doit.py': [302, 303, 304, 305],
'fly.py': [102, 103],
'oops.py': [2, 3],
}),
("doit catch oops", {
'doit.py': [302, 303],
'catch.py': [202, 203, 204, 206, 207],
'oops.py': [2, 3],
}),
("doit fly catch oops", {
'doit.py': [302, 303],
'fly.py': [102, 103, 104],
'catch.py': [202, 203, 204, 206, 207],
'oops.py': [2, 3],
}),
("doit catch fly oops", {
'doit.py': [302, 303],
'catch.py': [202, 203, 204, 206, 207],
'fly.py': [102, 103],
'oops.py': [2, 3],
}),
]
for callnames, lines_expected in runs:
# Make the list of functions we'll call for this test.
callnames = callnames.split()
calls = [getattr(sys.modules[cn], cn) for cn in callnames]
cov = coverage.Coverage()
cov.start()
# Call our list of functions: invoke the first, with the rest as
# an argument.
calls[0](calls[1:]) # pragma: nested
cov.stop() # pragma: nested
# Clean the line data and compare to expected results.
# The file names are absolute, so keep just the base.
clean_lines = {}
data = cov.get_data()
for callname in callnames:
filename = callname + ".py"
lines = data.lines(abs_file(filename))
clean_lines[filename] = sorted(lines)
if env.JYTHON: # pragma: only jython
# Jython doesn't report on try or except lines, so take those
# out of the expected lines.
invisible = [202, 206, 302, 304]
for lines in lines_expected.values():
lines[:] = [l for l in lines if l not in invisible]
assert clean_lines == lines_expected
class DoctestTest(CoverageTest):
"""Tests invoked with doctest should measure properly."""
def test_doctest(self):
# Doctests used to be traced, with their line numbers credited to the
# file they were in. Below, one of the doctests has four lines (1-4),
# which would incorrectly claim that lines 1-4 of the file were
# executed. In this file, line 2 is not executed.
self.make_file("the_doctest.py", '''\
if "x" in "abc":
print("hello")
def return_arg_or_void(arg):
"""If <arg> is None, return "Void"; otherwise return <arg>
>>> return_arg_or_void(None)
'Void'
>>> return_arg_or_void("arg")
'arg'
>>> return_arg_or_void("None")
'None'
>>> if "x" in "xyz": # line 1
... if "a" in "aswed": # line 2
... if "a" in "abc": # line 3
... return_arg_or_void(12) # line 4
12
"""
if arg is None:
return "Void"
else:
return arg
import doctest, sys
doctest.testmod(sys.modules[__name__]) # we're not __main__ :(
''')
cov = coverage.Coverage()
self.start_import_stop(cov, "the_doctest")
data = cov.get_data()
assert len(data.measured_files()) == 1
lines = data.lines(data.measured_files().pop())
assert lines == [1, 3, 18, 19, 21, 23, 24]
class GettraceTest(CoverageTest):
"""Tests that we work properly with `sys.gettrace()`."""
def test_round_trip_in_untraced_function(self):
# https://github.com/nedbat/coveragepy/issues/575
self.make_file("main.py", """import sample""")
self.make_file("sample.py", """\
from swap import swap_it
def doit():
print(3)
swap_it()
print(5)
def doit_soon():
print(7)
doit()
print(9)
print(10)
doit_soon()
print(12)
""")
self.make_file("swap.py", """\
import sys
def swap_it():
sys.settrace(sys.gettrace())
""")
# Use --source=sample to prevent measurement of swap.py.
cov = coverage.Coverage(source=["sample"])
self.start_import_stop(cov, "main")
assert self.stdout() == "10\n7\n3\n5\n9\n12\n"
_, statements, missing, _ = cov.analysis("sample.py")
assert statements == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert missing == []
def test_setting_new_trace_function(self):
# https://github.com/nedbat/coveragepy/issues/436
self.check_coverage('''\
import os.path
import sys
def tracer(frame, event, arg):
filename = os.path.basename(frame.f_code.co_filename)
print(f"{event}: {filename} @ {frame.f_lineno}")
return tracer
def begin():
sys.settrace(tracer)
def collect():
t = sys.gettrace()
assert t is tracer, t
def test_unsets_trace():
begin()
collect()
old = sys.gettrace()
test_unsets_trace()
sys.settrace(old)
a = 21
b = 22
''',
lines=[1, 2, 4, 5, 6, 7, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 23, 24],
missing="5-7, 13-14",
)
out = self.stdout().replace(self.last_module_name, "coverage_test")
expected = (
"call: coverage_test.py @ 12\n" +
"line: coverage_test.py @ 13\n" +
"line: coverage_test.py @ 14\n" +
"return: coverage_test.py @ 14\n"
)
assert expected == out
@pytest.mark.expensive
@pytest.mark.skipif(env.METACOV, reason="Can't set trace functions during meta-coverage")
def test_atexit_gettrace(self):
# This is not a test of coverage at all, but of our understanding
# of this edge-case behavior in various Pythons.
self.make_file("atexit_gettrace.py", """\
import atexit, sys
def trace_function(frame, event, arg):
return trace_function
sys.settrace(trace_function)
def show_trace_function():
tfn = sys.gettrace()
if tfn is not None:
tfn = tfn.__name__
print(tfn)
atexit.register(show_trace_function)
# This will show what the trace function is at the end of the program.
""")
status, out = self.run_command_status("python atexit_gettrace.py")
assert status == 0
if env.PYPY and env.PYPYVERSION >= (5, 4):
# Newer PyPy clears the trace function before atexit runs.
assert out == "None\n"
else:
# Other Pythons leave the trace function in place.
assert out == "trace_function\n"
class ExecTest(CoverageTest):
"""Tests of exec."""
def test_correct_filename(self):
# https://github.com/nedbat/coveragepy/issues/380
# Bug was that exec'd files would have their lines attributed to the
# calling file. Make two files, both with ~30 lines, but no lines in
# common. Line 30 in to_exec.py was recorded as line 30 in main.py,
# but now it's fixed. :)
self.make_file("to_exec.py", """\
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
print("var is {}".format(var)) # line 31
""")
self.make_file("main.py", """\
namespace = {'var': 17}
with open("to_exec.py") as to_exec_py:
code = compile(to_exec_py.read(), 'to_exec.py', 'exec')
exec(code, globals(), namespace)
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
print("done") # line 35
""")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
_, statements, missing, _ = cov.analysis("main.py")
assert statements == [1, 2, 3, 4, 35]
assert missing == []
_, statements, missing, _ = cov.analysis("to_exec.py")
assert statements == [31]
assert missing == []
def test_unencodable_filename(self):
# https://github.com/nedbat/coveragepy/issues/891
self.make_file("bug891.py", r"""exec(compile("pass", "\udcff.py", "exec"))""")
cov = coverage.Coverage()
self.start_import_stop(cov, "bug891")
# Saving would fail trying to encode \udcff.py
cov.save()
files = [os.path.basename(f) for f in cov.get_data().measured_files()]
assert "bug891.py" in files
class MockingProtectionTest(CoverageTest):
"""Tests about protecting ourselves from aggressive mocking.
https://github.com/nedbat/coveragepy/issues/416
"""
def test_os_path_exists(self):
# To see if this test still detects the problem, change isolate_module
# in misc.py to simply return its argument. It should fail with a
# StopIteration error.
self.make_file("bug416.py", """\
import os.path
from unittest import mock
@mock.patch('os.path.exists')
def test_path_exists(mock_exists):
mock_exists.side_effect = [17]
print("in test")
import bug416a
print(bug416a.foo)
print(os.path.exists("."))
test_path_exists()
""")
self.make_file("bug416a.py", """\
print("bug416a.py")
foo = 23
""")
import py_compile
py_compile.compile("bug416a.py")
out = self.run_command("coverage run bug416.py")
assert out == "in test\nbug416a.py\n23\n17\n"
|
connection_manager.py
|
import socket
import threading
import pickle
import codecs
from concurrent.futures import ThreadPoolExecutor
from .core_node_list import CoreNodeList
from .edge_node_list import EdgeNodeList
from .message_manager import (
MessageManager,
MSG_ADD,
MSG_REMOVE,
MSG_CORE_LIST,
MSG_REQUEST_CORE_LIST,
MSG_PING,
MSG_ADD_AS_EDGE,
MSG_REMOVE_EDGE,
MSG_NEW_TRANSACTION,
MSG_NEW_BLOCK,
MSG_REQUEST_FULL_CHAIN,
RSP_FULL_CHAIN,
MSG_ENHANCED,
ERR_PROTOCOL_UNMATCH,
ERR_VERSION_UNMATCH,
OK_WITH_PAYLOAD,
OK_WITHOUT_PAYLOAD,
)
# 動作確認用の値。本来は30分(1800)くらいがいいのでは
PING_INTERVAL = 10
class ConnectionManager:
def __init__(self, host, my_port, callback):
"""
初期化処理
params:
host : 自分自身のIPアドレス (イントラとかだとGoogleのDNSが使えないのでやむなく手入力の
余地を残してある
my_port : 自分の ServerSocketが 利用するポート番号
callback : 受信したメッセージを処理する関数を外部から登録する
"""
print('Initializing ConnectionManager...')
self.host = host
self.port = my_port
self.my_c_host = None
self.my_c_port = None
self.core_node_set = CoreNodeList()
self.edge_node_set = EdgeNodeList()
self.__add_peer((host, my_port))
self.mm = MessageManager()
self.callback = callback
def start(self):
"""
最初の待受を開始する際に呼び出される(ServerCore向け
"""
t = threading.Thread(target=self.__wait_for_access)
t.start()
self.ping_timer_p = threading.Timer(PING_INTERVAL, self.__check_peers_connection)
self.ping_timer_p.start()
self.ping_timer_e = threading.Timer(PING_INTERVAL, self.__check_edges_connection)
self.ping_timer_e.start()
# ユーザが指定した既知のCoreノードへの接続(ServerCore向け
def join_network(self, host, port):
"""
ユーザが指定した既知のCoreノードへの接続(ServerCore向け
params:
host : 接続先のIPアドレス
port : 接続先のポート番号
"""
self.my_c_host = host
self.my_c_port = port
self.__connect_to_P2PNW(host, port)
def get_message_text(self, msg_type, payload = None):
"""
指定したメッセージ種別のプロトコルメッセージを作成して返却する
params:
msg_type : 作成したいメッセージの種別をMessageManagerの規定に従い指定
payload : メッセージにデータを格納したい場合に指定する
return:
msgtxt : MessageManagerのbuild_messageによって生成されたJSON形式のメッセージ
"""
msgtxt = self.mm.build(msg_type, self.port, payload)
return msgtxt
# 指定されたノードに対してメッセージを送信する
def send_msg(self, peer, msg):
"""
指定されたノードに対してメッセージを送信する
params:
peer : 接続先のIPアドレスとポート番号を格納するタプル
msg : 送信したいメッセージ(JSON形式を想定)
"""
print('send_msg called', msg)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((peer))
s.sendall(msg.encode('utf-8'))
s.close()
except OSError:
print('Connection failed for peer : ', peer)
self.__remove_peer(peer)
def send_msg_to_all_peer(self, msg):
"""
Coreノードリストに登録されている全てのノードに対して同じメッセージをブロードキャストする。
Param:
msg: 送信したいメッセージ(JSON形式を想定)
"""
print('send_msg_to_all_peer was called!')
current_list = self.core_node_set.get_list()
for peer in current_list:
if peer != (self.host, self.port):
print("message will be sent to ... ", peer)
self.send_msg(peer, msg)
def send_msg_to_all_edge(self, msg):
"""
Edgeノードリストに登録されている全てのノードに対して同じメッセージをブロードキャストする
Params:
msg: 送信したいメッセージ(JSON形式を想定)
"""
print('send_msg_to_all_edge was called! ')
current_list = self.edge_node_set.get_list()
for edge in current_list:
print("message will be sent to ... " ,edge)
self.send_msg(edge, msg)
def connection_close(self):
"""
終了前の処理としてソケットを閉じる
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
self.socket.close()
s.close()
self.ping_timer_p.cancel()
self.ping_timer_e.cancel()
#離脱要求の送信
if self.my_c_host is not None:
msg = self.mm.build(MSG_REMOVE, self.port)
self.send_msg((self.my_c_host, self.my_c_port), msg)
def has_this_edge(self, pubky_address):
return self.edge_node_set.has_this_edge(pubky_address)
def __connect_to_P2PNW(self, host, port):
"""
指定したCoreノードへ接続要求メッセージを送信する
params:
host : 接続先となるCoreノードのIPアドレス
port : 接続先となるCoreノードのポート番号
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
m_type = MSG_ADD
msg = self.mm.build(m_type, self.port)
s.sendall(msg.encode('utf-8'))
s.close()
def __wait_for_access(self):
"""
Serverソケットを開いて待ち受け状態に移行する
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.listen(0)
executor = ThreadPoolExecutor(max_workers=10)
while True:
print('Waiting for the connection ...')
soc, addr = self.socket.accept()
print('Connected by .. ', addr)
data_sum = ''
params = (soc, addr, data_sum)
executor.submit(self.__handle_message, params)
def __is_in_core_set(self, peer):
"""
与えられたnodeがCoreノードのリストに含まれているか?をチェックする
param:
peer : IPアドレスとポート番号のタプル
return:
True or False
"""
return self.core_node_set.has_this_peer(peer)
def __handle_message(self, params):
"""
受信したメッセージを確認して、内容に応じた処理を行う。クラスの外からは利用しない想定
params :
soc : 受信したsocketのコネクション
addr : 送信元のアドレス情報
data_sum : 受信したデータを連結するためのベースにする空文字
の3要素のタプル
"""
soc, addr, data_sum = params
while True:
data = soc.recv(1024)
data_sum = data_sum + data.decode('utf-8')
if not data:
break
if not data_sum:
return
result, reason, cmd, peer_port, payload = self.mm.parse(data_sum)
print(result, reason, cmd, peer_port, payload)
status = (result, reason)
if status == ('error', ERR_PROTOCOL_UNMATCH):
print('Error: Protocol name is not matched')
return
elif status == ('error', ERR_VERSION_UNMATCH):
print('Error: Protocol version is not matched')
return
elif status == ('ok', OK_WITHOUT_PAYLOAD):
if cmd == MSG_ADD:
print('ADD node request was received!!')
self.__add_peer((addr[0], peer_port))
if(addr[0], peer_port) == (self.host, self.port):
return
else:
cl = pickle.dumps(self.core_node_set.get_list(), 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg_to_all_peer(msg)
self.send_msg_to_all_edge(msg)
elif cmd == MSG_REMOVE:
print('REMOVE request was received!! from', addr[0], peer_port)
self.__remove_peer((addr[0], peer_port))
cl = pickle.dumps(self.core_node_set.get_list(), 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg_to_all_peer(msg)
self.send_msg_to_all_edge(msg)
elif cmd == MSG_PING:
# 特にやること思いつかない
pass
elif cmd == MSG_REQUEST_CORE_LIST:
print('List for Core nodes was requested!!')
cl = pickle.dumps(self.core_node_set.get_list(), 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg((addr[0], peer_port), msg)
elif cmd == MSG_ADD_AS_EDGE:
print('ADD request for Edge node was received!!')
self.__add_edge_node((addr[0], peer_port))
cl = pickle.dumps(self.core_node_set.get_list(), 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg((addr[0], peer_port), msg)
elif cmd == MSG_REMOVE_EDGE:
print('REMOVE EDGE request was received!! from', addr[0], peer_port)
self.__remove_edge_node((addr[0], peer_port))
else:
is_core = self.__is_in_core_set((addr[0], peer_port))
self.callback((result, reason, cmd, peer_port, payload), is_core, (addr[0], peer_port))
elif status == ('ok', OK_WITH_PAYLOAD):
if cmd == MSG_CORE_LIST:
# TODO: 受信したリストをただ上書きしてしまうのは本来セキュリティ的には宜しくない。
# 信頼できるノードの鍵とかをセットしとく必要があるかも
# このあたりの議論については6章にて補足予定
print('Refresh the core node list...')
new_core_set = pickle.loads(payload.encode('utf8'))
print('latest core node list: ', new_core_set)
self.core_node_set.overwrite(new_core_set)
else:
is_core = self.__is_in_core_set((addr[0], peer_port))
self.callback((result, reason, cmd, peer_port, payload), is_core, None)
else:
print('Unexpected status', status)
def __add_peer(self, peer):
"""
Coreノードをリストに追加する。クラスの外からは利用しない想定
param:
peer : Coreノードとして格納されるノードの接続情報(IPアドレスとポート番号)
"""
self.core_node_set.add((peer))
def __add_edge_node(self, edge):
"""
Edgeノードをリストに追加する。クラスの外からは利用しない想定
param:
edge : Edgeノードとして格納されるノードの接続情報(IPアドレスとポート番号)
"""
self.edge_node_set.add((edge))
def __remove_peer(self, peer):
"""
離脱したと判断されるCoreノードをリストから削除する。クラスの外からは利用しない想定
param:
peer : 削除するノードの接続先情報(IPアドレスとポート番号)
"""
self.core_node_set.remove(peer)
def __remove_edge_node(self, edge):
"""
離脱したと判断されるEdgeノードをリストから削除する。クラスの外からは利用しない想定
param:
edge : 削除するノードの接続先情報(IPアドレスとポート番号)
"""
self.edge_node_set.remove(edge)
def __check_peers_connection(self):
"""
接続されているCoreノード全ての生存確認を行う。クラスの外からは利用しない想定
この確認処理は定期的に実行される
"""
print('check_peers_connection was called')
current_core_list = self.core_node_set.get_list()
changed = False
dead_c_node_set = list(filter(lambda p: not self.__is_alive(p), current_core_list))
if dead_c_node_set:
changed = True
print('Removing ', dead_c_node_set)
current_core_list = current_core_list - set(dead_c_node_set)
self.core_node_set.overwrite(current_core_list)
current_core_list = self.core_node_set.get_list()
print('current core node list:', current_core_list)
# 変更があった時だけブロードキャストで通知する
if changed:
cl = pickle.dumps(current_core_list, 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg_to_all_peer(msg)
self.send_msg_to_all_edge(msg)
self.ping_timer_p = threading.Timer(PING_INTERVAL, self.__check_peers_connection)
self.ping_timer_p.start()
def __check_edges_connection(self):
"""
接続されているEdgeノード全ての生存確認を行う。クラスの外からは利用しない想定
この確認処理は定期的に実行される
"""
print('check_edges_connection was called')
current_edge_list = self.edge_node_set.get_list()
dead_e_node_set = list(filter(lambda p: not self.__is_alive(p), current_edge_list))
if dead_e_node_set:
print('Removing ', dead_e_node_set)
current_edge_list = current_edge_list - set(dead_e_node_set)
self.edge_node_set.overwrite(current_edge_list)
current_edge_list = self.edge_node_set.get_list()
print('current edge node list:', current_edge_list)
self.ping_timer_e = threading.Timer(PING_INTERVAL, self.__check_edges_connection)
self.ping_timer_e.start()
def __is_alive(self, target):
"""
有効ノード確認メッセージの送信
param:
target : 有効ノード確認メッセージの送り先となるノードの接続情報(IPアドレスとポート番号)
"""
print('target', target)
if target == (self.host, self.port):
return True
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target))
msg = self.mm.build(MSG_PING)
s.sendall(msg.encode('utf-8'))
s.close()
return True
except Exception as e:
print(e)
print('Connection failed for peer : ', target)
return False
|
__init__.py
|
from threading import Thread
from platypush.backend import Backend
from platypush.context import get_plugin
from platypush.message.event.light import LightStatusChangeEvent
class LightHueBackend(Backend):
"""
This backend will periodically check for the status of your configured
Philips Hue light devices and trigger events when the status of a device
(power, saturation, brightness or hue) changes.
Triggers:
* :class:`platypush.message.event.light.LightStatusChangeEvent` when the
status of a lightbulb changes
Requires:
* The :class:`platypush.plugins.light.hue.LightHuePlugin` plugin to be
active and configured.
"""
_DEFAULT_POLL_SECONDS = 10
def __init__(self, poll_seconds=_DEFAULT_POLL_SECONDS, *args, **kwargs):
"""
:param poll_seconds: How often the backend will poll the Hue plugin for
status updates. Default: 10 seconds
:type poll_seconds: float
"""
super().__init__(*args, **kwargs)
self.poll_seconds = poll_seconds
@staticmethod
def _get_lights():
plugin = get_plugin('light.hue')
if not plugin:
plugin = get_plugin('light.hue', reload=True)
return plugin.get_lights().output
def _listener(self):
def _thread():
lights = self._get_lights()
while not self.should_stop():
try:
lights_new = self._get_lights()
for light_id, light in lights_new.items():
event_args = {}
state = light.get('state')
prev_state = lights.get(light_id, {}).get('state', {})
if 'on' in state and state.get('on') != prev_state.get('on'):
event_args['on'] = state.get('on')
if 'bri' in state and state.get('bri') != prev_state.get('bri'):
event_args['bri'] = state.get('bri')
if 'sat' in state and state.get('sat') != prev_state.get('sat'):
event_args['sat'] = state.get('sat')
if 'hue' in state and state.get('hue') != prev_state.get('hue'):
event_args['hue'] = state.get('hue')
if 'ct' in state and state.get('ct') != prev_state.get('ct'):
event_args['ct'] = state.get('ct')
if 'xy' in state and state.get('xy') != prev_state.get('xy'):
event_args['xy'] = state.get('xy')
if event_args:
event_args['plugin_name'] = 'light.hue'
event_args['light_id'] = light_id
event_args['light_name'] = light.get('name')
self.bus.post(LightStatusChangeEvent(**event_args))
lights = lights_new
except Exception as e:
self.logger.exception(e)
finally:
self.wait_stop(self.poll_seconds)
return _thread
def run(self):
super().run()
self.logger.info('Starting Hue lights backend')
while not self.should_stop():
try:
poll_thread = Thread(target=self._listener())
poll_thread.start()
poll_thread.join()
except Exception as e:
self.logger.exception(e)
self.wait_stop(self.poll_seconds)
self.logger.info('Stopped Hue lights backend')
# vim:sw=4:ts=4:et:
|
mission_test.py
|
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2015-2016 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Andreas Antener <andreas@uaventure.com>
#
# The shebang of this file is currently Python2 because some
# dependencies such as pymavlink don't play well with Python3 yet.
from __future__ import division
PKG = 'px4'
import unittest
import rospy
import glob
import json
import math
import os
import px4tools
import sys
from mavros import mavlink
from mavros.mission import QGroundControlWP
from pymavlink import mavutil
from threading import Thread
from mavros_msgs.msg import Altitude, ExtendedState, HomePosition, Mavlink, \
State, Waypoint
from mavros_msgs.srv import CommandBool, SetMode, WaypointPush
from sensor_msgs.msg import NavSatFix
def get_last_log():
try:
log_path = os.environ['PX4_LOG_DIR']
except KeyError:
log_path = os.path.join(os.environ['HOME'],
'.ros/rootfs/fs/microsd/log')
last_log_dir = sorted(glob.glob(os.path.join(log_path, '*')))[-1]
last_log = sorted(glob.glob(os.path.join(last_log_dir, '*.ulg')))[-1]
return last_log
def read_new_mission(f):
d = json.load(f)
current = True
for wp in d['items']:
yield Waypoint(
is_current=current,
frame=int(wp['frame']),
command=int(wp['command']),
param1=float(wp['param1']),
param2=float(wp['param2']),
param3=float(wp['param3']),
param4=float(wp['param4']),
x_lat=float(wp['coordinate'][0]),
y_long=float(wp['coordinate'][1]),
z_alt=float(wp['coordinate'][2]),
autocontinue=bool(wp['autoContinue']))
if current:
current = False
class MavrosMissionTest(unittest.TestCase):
"""
Run a mission
"""
# dictionaries correspond to mavros ExtendedState msg
LAND_STATES = {
0: 'UNDEFINED',
1: 'ON_GROUND',
2: 'IN_AIR',
3: 'TAKEOFF',
4: 'LANDING'
}
VTOL_STATES = {
0: 'VTOL UNDEFINED',
1: 'VTOL MC->FW',
2: 'VTOL FW->MC',
3: 'VTOL MC',
4: 'VTOL FW'
}
def setUp(self):
self.rate = rospy.Rate(10) # 10hz
self.has_global_pos = False
self.global_position = NavSatFix()
self.extended_state = ExtendedState()
self.altitude = Altitude()
self.state = State()
self.mc_rad = 5
self.fw_rad = 60
self.fw_alt_rad = 10
self.last_alt_d = None
self.last_pos_d = None
self.mission_name = ""
self.sub_topics_ready = {
key: False
for key in ['global_pos', 'home_pos', 'ext_state', 'alt', 'state']
}
# setup ROS topics and services
try:
rospy.wait_for_service('mavros/mission/push', 30)
rospy.wait_for_service('mavros/cmd/arming', 30)
rospy.wait_for_service('mavros/set_mode', 30)
except rospy.ROSException:
self.fail("failed to connect to mavros services")
self.wp_push_srv = rospy.ServiceProxy('mavros/mission/push',
WaypointPush)
self.set_arming_srv = rospy.ServiceProxy('/mavros/cmd/arming',
CommandBool)
self.set_mode_srv = rospy.ServiceProxy('/mavros/set_mode', SetMode)
self.global_pos_sub = rospy.Subscriber('mavros/global_position/global',
NavSatFix,
self.global_position_callback)
self.home_pos_sub = rospy.Subscriber('mavros/home_position/home',
HomePosition,
self.home_position_callback)
self.ext_state_sub = rospy.Subscriber('mavros/extended_state',
ExtendedState,
self.extended_state_callback)
self.alt_sub = rospy.Subscriber('mavros/altitude', Altitude,
self.altitude_callback)
self.state_sub = rospy.Subscriber('mavros/state', State,
self.state_callback)
self.mavlink_pub = rospy.Publisher('mavlink/to', Mavlink, queue_size=1)
# need to simulate heartbeat to prevent datalink loss detection
self.hb_mav_msg = mavutil.mavlink.MAVLink_heartbeat_message(
mavutil.mavlink.MAV_TYPE_GCS, 0, 0, 0, 0, 0)
self.hb_mav_msg.pack(mavutil.mavlink.MAVLink('', 2, 1))
self.hb_ros_msg = mavlink.convert_to_rosmsg(self.hb_mav_msg)
self.hb_thread = Thread(target=self.send_heartbeat, args=())
self.hb_thread.daemon = True
self.hb_thread.start()
def tearDown(self):
pass
#
# Callback functions
#
def global_position_callback(self, data):
self.global_position = data
if not self.sub_topics_ready['global_pos']:
self.sub_topics_ready['global_pos'] = True
def home_position_callback(self, data):
# this topic publishing seems to be a better indicator that the sim
# is ready, it's not actually needed
self.home_pos_sub.unregister()
if not self.sub_topics_ready['home_pos']:
self.sub_topics_ready['home_pos'] = True
def extended_state_callback(self, data):
if self.extended_state.vtol_state != data.vtol_state:
rospy.loginfo("VTOL state changed from {0} to {1}".format(
self.VTOL_STATES.get(self.extended_state.vtol_state),
self.VTOL_STATES.get(data.vtol_state)))
if self.extended_state.landed_state != data.landed_state:
rospy.loginfo("landed state changed from {0} to {1}".format(
self.LAND_STATES.get(self.extended_state.landed_state),
self.LAND_STATES.get(data.landed_state)))
self.extended_state = data
if not self.sub_topics_ready['ext_state']:
self.sub_topics_ready['ext_state'] = True
def state_callback(self, data):
if self.state.armed != data.armed:
rospy.loginfo("armed state changed from {0} to {1}".format(
self.state.armed, data.armed))
if self.state.mode != data.mode:
rospy.loginfo("mode changed from {0} to {1}".format(
self.state.mode, data.mode))
self.state = data
# mavros publishes a disconnected state message on init
if not self.sub_topics_ready['state'] and data.connected:
self.sub_topics_ready['state'] = True
def altitude_callback(self, data):
self.altitude = data
# amsl has been observed to be nan while other fields are valid
if not self.sub_topics_ready['alt'] and not math.isnan(data.amsl):
self.sub_topics_ready['alt'] = True
#
# Helper methods
#
def send_heartbeat(self):
rate = rospy.Rate(2) # Hz
while not rospy.is_shutdown():
self.mavlink_pub.publish(self.hb_ros_msg)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
def set_mode(self, mode, timeout):
"""mode: PX4 mode string, timeout(int): seconds"""
old_mode = self.state.mode
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
mode_set = False
for i in xrange(timeout * loop_freq):
if self.state.mode == mode:
mode_set = True
rospy.loginfo(
"set mode success | new mode: {0}, old mode: {1} | seconds: {2} of {3}".
format(mode, old_mode, i / loop_freq, timeout))
break
else:
try:
res = self.set_mode_srv(0, mode) # 0 is custom mode
if not res.mode_sent:
rospy.logerr("failed to send mode command")
except rospy.ServiceException as e:
rospy.logerr(e)
rate.sleep()
self.assertTrue(mode_set, (
"failed to set mode | new mode: {0}, old mode: {1} | timeout(seconds): {2}".
format(mode, old_mode, timeout)))
def set_arm(self, arm, timeout):
"""arm: True to arm or False to disarm, timeout(int): seconds"""
old_arm = self.state.armed
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
arm_set = False
for i in xrange(timeout * loop_freq):
if self.state.armed == arm:
arm_set = True
rospy.loginfo(
"set arm success | new arm: {0}, old arm: {1} | seconds: {2} of {3}".
format(arm, old_arm, i / loop_freq, timeout))
break
else:
try:
res = self.set_arming_srv(arm)
if not res.success:
rospy.logerr("failed to send arm command")
except rospy.ServiceException as e:
rospy.logerr(e)
rate.sleep()
self.assertTrue(arm_set, (
"failed to set arm | new arm: {0}, old arm: {1} | timeout(seconds): {2}".
format(arm, old_arm, timeout)))
def is_at_position(self, lat, lon, alt, xy_offset, z_offset):
"""alt(amsl), xy_offset, z_offset: meters"""
R = 6371000 # metres
rlat1 = math.radians(lat)
rlat2 = math.radians(self.global_position.latitude)
rlat_d = math.radians(self.global_position.latitude - lat)
rlon_d = math.radians(self.global_position.longitude - lon)
a = (math.sin(rlat_d / 2) * math.sin(rlat_d / 2) + math.cos(rlat1) *
math.cos(rlat2) * math.sin(rlon_d / 2) * math.sin(rlon_d / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
alt_d = abs(alt - self.altitude.amsl)
# remember best distances
if not self.last_pos_d or self.last_pos_d > d:
self.last_pos_d = d
if not self.last_alt_d or self.last_alt_d > alt_d:
self.last_alt_d = alt_d
rospy.logdebug("d: {0}, alt_d: {1}".format(d, alt_d))
return d < xy_offset and alt_d < z_offset
def reach_position(self, lat, lon, alt, timeout, index):
"""alt(amsl): meters, timeout(int): seconds"""
# reset best distances
self.last_alt_d = None
self.last_pos_d = None
rospy.loginfo(
"trying to reach waypoint | lat: {0:13.9f}, lon: {1:13.9f}, alt: {2:6.2f}, index: {3}".
format(lat, lon, alt, index))
# does it reach the position in 'timeout' seconds?
loop_freq = 10 # Hz
rate = rospy.Rate(loop_freq)
reached = False
for i in xrange(timeout * loop_freq):
# use MC radius by default
# FIXME: also check MAV_TYPE from system status, otherwise pure fixed-wing won't work
xy_radius = self.mc_rad
z_radius = self.mc_rad
# use FW radius if in FW or in transition
if (self.extended_state.vtol_state == ExtendedState.VTOL_STATE_FW
or self.extended_state.vtol_state ==
ExtendedState.VTOL_STATE_TRANSITION_TO_MC or
self.extended_state.vtol_state ==
ExtendedState.VTOL_STATE_TRANSITION_TO_FW):
xy_radius = self.fw_rad
z_radius = self.fw_alt_rad
if self.is_at_position(lat, lon, alt, xy_radius, z_radius):
reached = True
rospy.loginfo(
"position reached | pos_d: {0:.2f}, alt_d: {1:.2f}, index: {2} | seconds: {3} of {4}".
format(self.last_pos_d, self.last_alt_d, index, i /
loop_freq, timeout))
break
rate.sleep()
self.assertTrue(reached, (
"({0}) took too long to get to position | lat: {1:13.9f}, lon: {2:13.9f}, alt: {3:6.2f}, xy off: {4}, z off: {5}, pos_d: {6:.2f}, alt_d: {7:.2f}, VTOL state: {8}, index: {9} | timeout(seconds): {10}".
format(self.mission_name, lat, lon, alt, xy_radius, z_radius,
self.last_pos_d, self.last_alt_d,
self.VTOL_STATES.get(self.extended_state.vtol_state), index,
timeout)))
def wait_for_topics(self, timeout):
"""wait for simulation to be ready, make sure we're getting topic info
from all topics by checking dictionary of flag values set in callbacks,
timeout(int): seconds"""
rospy.loginfo("waiting for simulation topics to be ready")
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
simulation_ready = False
for i in xrange(timeout * loop_freq):
if all(value for value in self.sub_topics_ready.values()):
simulation_ready = True
rospy.loginfo("simulation topics ready | seconds: {0} of {1}".
format(i / loop_freq, timeout))
break
rate.sleep()
self.assertTrue(simulation_ready, (
"failed to hear from all subscribed simulation topics | topic ready flags: {0} | timeout(seconds): {1}".
format(self.sub_topics_ready, timeout)))
def wait_on_landed_state(self, desired_landed_state, timeout, index):
rospy.loginfo(
"waiting for landed state | state: {0}, index: {1}".format(
self.LAND_STATES.get(desired_landed_state), index))
loop_freq = 10 # Hz
rate = rospy.Rate(loop_freq)
landed_state_confirmed = False
for i in xrange(timeout * loop_freq):
if self.extended_state.landed_state == desired_landed_state:
landed_state_confirmed = True
rospy.loginfo(
"landed state confirmed | state: {0}, index: {1}".format(
self.LAND_STATES.get(desired_landed_state), index))
break
rate.sleep()
self.assertTrue(landed_state_confirmed, (
"({0}) landed state not detected | desired: {1}, current: {2} | index: {3}, timeout(seconds): {4}".
format(self.mission_name,
self.LAND_STATES.get(desired_landed_state),
self.LAND_STATES.get(self.extended_state.landed_state),
index, timeout)))
def wait_on_transition(self, transition, timeout, index):
"""Wait for VTOL transition, timeout(int): seconds"""
rospy.loginfo(
"waiting for VTOL transition | transition: {0}, index: {1}".format(
self.VTOL_STATES.get(transition), index))
loop_freq = 10 # Hz
rate = rospy.Rate(loop_freq)
transitioned = False
for i in xrange(timeout * loop_freq):
if transition == self.extended_state.vtol_state:
rospy.loginfo(
"transitioned | index: {0} | seconds: {1} of {2}".format(
index, i / loop_freq, timeout))
transitioned = True
break
rate.sleep()
self.assertTrue(transitioned, (
"({0}) transition not detected | index: {1} | timeout(seconds): {2}, ".
format(self.mission_name, index, timeout)))
#
# Test method
#
def test_mission(self):
"""Test mission"""
if len(sys.argv) < 2:
self.fail("usage: mission_test.py mission_file")
return
self.mission_name = sys.argv[1]
mission_file = os.path.dirname(
os.path.realpath(__file__)) + "/" + sys.argv[1]
rospy.loginfo("reading mission {0}".format(mission_file))
wps = []
with open(mission_file, 'r') as f:
mission_ext = os.path.splitext(mission_file)[1]
if mission_ext == '.mission':
rospy.loginfo("new style mission file detected")
for waypoint in read_new_mission(f):
wps.append(waypoint)
rospy.logdebug(waypoint)
elif mission_ext == '.txt':
rospy.loginfo("old style mission file detected")
mission = QGroundControlWP()
for waypoint in mission.read(f):
wps.append(waypoint)
rospy.logdebug(waypoint)
else:
raise IOError('unknown mission file extension', mission_ext)
rospy.loginfo("send mission")
result = False
try:
res = self.wp_push_srv(start_index=0, waypoints=wps)
result = res.success
except rospy.ServiceException as e:
rospy.logerr(e)
self.assertTrue(
result,
"({0}) mission could not be transfered".format(self.mission_name))
# delay starting the mission
self.wait_for_topics(30)
# make sure the simulation is ready to start the mission
self.wait_on_landed_state(ExtendedState.LANDED_STATE_ON_GROUND, 10, -1)
rospy.loginfo("seting mission mode")
self.set_mode("AUTO.MISSION", 5)
rospy.loginfo("arming")
self.set_arm(True, 5)
rospy.loginfo("run mission")
for index, waypoint in enumerate(wps):
# only check position for waypoints where this makes sense
if waypoint.frame == Waypoint.FRAME_GLOBAL_REL_ALT or waypoint.frame == Waypoint.FRAME_GLOBAL:
alt = waypoint.z_alt
if waypoint.frame == Waypoint.FRAME_GLOBAL_REL_ALT:
alt += self.altitude.amsl - self.altitude.relative
self.reach_position(waypoint.x_lat, waypoint.y_long, alt, 60,
index)
# check if VTOL transition happens if applicable
if waypoint.command == 84 or waypoint.command == 85 or waypoint.command == 3000:
transition = waypoint.param1
if waypoint.command == 84: # VTOL takeoff implies transition to FW
transition = ExtendedState.VTOL_STATE_FW
if waypoint.command == 85: # VTOL takeoff implies transition to MC
transition = ExtendedState.VTOL_STATE_MC
self.wait_on_transition(transition, 60, index)
# after reaching position, wait for landing detection if applicable
if waypoint.command == 85 or waypoint.command == 21:
self.wait_on_landed_state(ExtendedState.LANDED_STATE_ON_GROUND,
60, index)
rospy.loginfo("disarming")
self.set_arm(False, 5)
rospy.loginfo("mission done, calculating performance metrics")
last_log = get_last_log()
rospy.loginfo("log file {0}".format(last_log))
data = px4tools.read_ulog(last_log).concat(dt=0.1)
data = px4tools.compute_data(data)
res = px4tools.estimator_analysis(data, False)
# enforce performance
self.assertTrue(abs(res['roll_error_mean']) < 5.0, str(res))
self.assertTrue(abs(res['pitch_error_mean']) < 5.0, str(res))
self.assertTrue(abs(res['yaw_error_mean']) < 5.0, str(res))
self.assertTrue(res['roll_error_std'] < 5.0, str(res))
self.assertTrue(res['pitch_error_std'] < 5.0, str(res))
self.assertTrue(res['yaw_error_std'] < 5.0, str(res))
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
name = "mavros_mission_test"
if len(sys.argv) > 1:
name += "-%s" % sys.argv[1]
rostest.rosrun(PKG, name, MavrosMissionTest)
|
MainGui.py
|
import traceback
from threading import Thread
import wx
from compress import runCompressionRound, saveBlobsInformation, saveJustBlobs
from decompress import loadBlobsInformation, loadBlobsPixels
from image_manipulation import show_image_from_numpy_array, save_image_from_numpy_array
from structure.Blobs import Blobs
from structure.Image import Image
from structure.Vector import Vector2
class MyFileDropTarget(wx.FileDropTarget):
onDrop = None
def __init__(self, window):
wx.FileDropTarget.__init__(self)
self.window = window
def BindOnDrop(self, handler):
self.onDrop = handler
def OnDropFiles(self, x, y, filenames):
if self.onDrop:
self.onDrop(filenames[0])
return True
class Title(wx.StaticText):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
titleFont = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.SetFont(titleFont)
class MainGui(wx.Frame):
selectedFileToCompress: str = None
selectedBlobToDecompress: str = None
selectedPcfToDecompress: str = None
def __init__(self, ):
super().__init__(parent=None, title="PIC - Patrikovo komprese obrázků", size=(400, 620))
self._setup_gui()
self.Show()
def _setup_gui(self):
self.panel = wx.Panel(self, size=(400, 600))
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.sizer)
self.__setupCompressMenu()
self.sizer.Add(wx.StaticLine(self.panel, style=wx.LI_HORIZONTAL, size=(5000, 2)), 0, wx.ALL, 5)
self.__setupDecompressMenu()
def __setupCompressMenu(self):
text = Title(self.panel, label="Komprimovat")
self.sizer.Add(text, 0, wx.ALL | wx.ALIGN_CENTER, 5)
#######################################################
self.compress_drop = wx.StaticText(self.panel, -1, "Přetáhni soubor ke kopresi nebo vyber kliknutím",
size=(5000, 200),
style=wx.ST_NO_AUTORESIZE | wx.BORDER_SIMPLE)
self.compress_drop.SetWindowVariant(wx.WINDOW_VARIANT_NORMAL)
self.compress_drop.SetCursor(wx.Cursor(wx.CURSOR_HAND))
dropTarget = MyFileDropTarget(self.panel)
self.compress_drop.SetDropTarget(dropTarget)
dropTarget.BindOnDrop(self.selectFileToCompress)
self.compress_drop.Bind(wx.EVT_LEFT_DOWN, self.selectFileToCompress)
self.sizer.Add(self.compress_drop, 0, wx.ALL, 5)
#######################################################
self.compressButton = wx.Button(self.panel, label="Začít kompresi")
self.compressButton.Disable()
self.compressButton.Bind(wx.EVT_LEFT_DOWN, self.startCompress)
self.sizer.Add(self.compressButton, 0, wx.ALL | wx.ALIGN_CENTER, 5)
def __setupDecompressMenu(self):
text = Title(self.panel, label="Dekomprimovat")
self.sizer.Add(text, 0, wx.ALL | wx.ALIGN_CENTER, 5)
#######################################################
dropPanel = wx.Panel(self.panel)
dropSizer = wx.BoxSizer(wx.HORIZONTAL)
dropPanel.SetSizer(dropSizer)
self.sizer.Add(dropPanel, 0, wx.ALL | wx.EXPAND, 5)
#######################################################
self.decompress_drop_blob = wx.StaticText(dropPanel, -1,
"Přetáhni soubor .blob k dekompresi nebo vyber kliknutím",
size=(180, 200),
style=wx.ST_NO_AUTORESIZE | wx.BORDER_SIMPLE)
self.decompress_drop_blob.SetWindowVariant(wx.WINDOW_VARIANT_NORMAL)
self.decompress_drop_blob.SetCursor(wx.Cursor(wx.CURSOR_HAND))
BlobDropTarget = MyFileDropTarget(dropPanel)
self.decompress_drop_blob.SetDropTarget(BlobDropTarget)
BlobDropTarget.BindOnDrop(self.selectBlobToDecompress)
self.decompress_drop_blob.Bind(wx.EVT_LEFT_DOWN, self.selectBlobToDecompress)
dropSizer.Add(self.decompress_drop_blob, 0, wx.ALL, 5)
#######################################################
self.decompress_drop_pcf = wx.StaticText(dropPanel, -1,
"Přetáhni soubor .pcf k dekompresi nebo vyber kliknutím",
size=(180, 200),
style=wx.ST_NO_AUTORESIZE | wx.BORDER_SIMPLE)
self.decompress_drop_pcf.SetWindowVariant(wx.WINDOW_VARIANT_NORMAL)
self.decompress_drop_pcf.SetCursor(wx.Cursor(wx.CURSOR_HAND))
pcfDropTarget = MyFileDropTarget(dropPanel)
self.decompress_drop_pcf.SetDropTarget(pcfDropTarget)
pcfDropTarget.BindOnDrop(self.selectPcfToDecompress)
self.decompress_drop_pcf.Bind(wx.EVT_LEFT_DOWN, self.selectPcfToDecompress)
dropSizer.Add(self.decompress_drop_pcf, 0, wx.ALL, 5)
#######################################################
self.decompressButton = wx.Button(self.panel, label="Začít dekompresi")
self.decompressButton.Disable()
self.decompressButton.Bind(wx.EVT_LEFT_UP, self.startDecompress)
self.sizer.Add(self.decompressButton, 0, wx.ALL | wx.ALIGN_CENTER, 5)
def selectFileToCompress(self, filepath: str = None):
if not filepath or type(filepath) != str:
dlg = wx.FileDialog(self, message="Zvol obrázek ke kompresi")
if dlg.ShowModal() == wx.ID_OK:
filepath = dlg.GetPath()
else:
return
self.selectedFileToCompress = filepath
self.compress_drop.SetLabelText('Zvolil jsi soubor: {}'.format(filepath.split("\\")[-1]))
self.compressButton.Enable()
def selectBlobToDecompress(self, filepath: str = None):
if not filepath or type(filepath) != str:
dlg = wx.FileDialog(self, message="Zvol .blob část obrázku k dekompresi",
wildcard="Blob files (*.blob)|*.blob", )
if dlg.ShowModal() == wx.ID_OK:
filepath = dlg.GetPath()
else:
return
self.selectedBlobToDecompress = filepath
self.decompress_drop_blob.SetLabelText('Zvolil jsi blob soubor: {}'.format(filepath.split("\\")[-1]))
if self.selectedFileToCompress:
self.decompressButton.Enable()
def selectPcfToDecompress(self, filepath: str = None):
if not filepath or type(filepath) != str:
dlg = wx.FileDialog(self, message="Zvol .pcf část obrázku k dekompresi",
wildcard="Patriks Compressed Files (*.pcf)|*.pcf")
if dlg.ShowModal() == wx.ID_OK:
filepath = dlg.GetPath()
else:
return
self.selectedPcfToDecompress = filepath
self.decompress_drop_pcf.SetLabelText('Zvolil jsi blob soubor: {}'.format(filepath.split("\\")[-1]))
if self.selectedBlobToDecompress:
self.decompressButton.Enable()
def startCompress(self, event=None):
try:
image: Image = Image.fromFile(self.selectedFileToCompress, Vector2(8, 8))
print(f"blobs size: {image.blobs.size}")
print(f"blobs count: {image.blobs.getBlobsCount()}")
if image.blobs.getBlobsCount() > 1024:
dlg = wx.MessageBox(f"Obrázek je velký, komprese zabere dlouho (cca {image.blobs.getBlobsCount() // 750} minut). Chceš pokračovat?", "Varování",
wx.ICON_WARNING | wx.YES_NO)
if dlg != wx.YES:
return
print("Starting compression")
image.showFromBlobs("Tohle budu komprimovat")
allBlobs = image.getFlattenedBlobsArray()
dlg = wx.ProgressDialog("Komprimuji", f'',
maximum=len(allBlobs),
parent=self,
style=wx.PD_APP_MODAL | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
progressObject = {"count": 0, "max": len(allBlobs), "cancel": False}
Thread(target=runCompressionRound, args=(allBlobs, progressObject)).start()
while progressObject["count"] < progressObject["max"]:
wx.MilliSleep(100)
wx.Yield()
if dlg.WasCancelled():
print("Cancelled")
progressObject["cancel"] = True
wx.MessageBox("Komprese byla přerušena uživatelem.", "Zrušeno", wx.ICON_INFORMATION)
dlg.Update(progressObject["max"])
dlg.Destroy()
return
dlg.Update(progressObject["count"])
print("Compression done")
dlg.Destroy()
image.showFromBlobs("Zkomprimovaný obrázek")
blobSaveDlg = wx.FileDialog(None, "Kam chceš uložit část obrázku .blob ?",
wildcard="Blob files (*.blob)|*.blob",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if blobSaveDlg.ShowModal() != wx.ID_OK:
wx.MessageBox("Zrušeno uživatelem.", "Zrušeno", wx.ICON_INFORMATION)
pcfSaveDlg = wx.FileDialog(None, "Kam chceš uložit část obrázku .pcf ?",
wildcard="Patriks Compressed Files (*.pcf)|*.pcf",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if pcfSaveDlg.ShowModal() != wx.ID_OK:
wx.MessageBox("Zrušeno uživatelem.", "Zrušeno", wx.ICON_INFORMATION)
saveBlobsInformation(image.blobs, blobSaveDlg.GetPath())
saveJustBlobs(allBlobs, pcfSaveDlg.GetPath())
wx.MessageBox("Uloženo.", "Úspěch", wx.ICON_INFORMATION)
except Exception as e:
print("Compression error: ", e)
traceback.print_exc()
wx.MessageBox("Nepodařilo se zkomprimovat obrázek! Zkuste jiný.", "Chyba", wx.ICON_ERROR)
def startDecompress(self, event=None):
try:
blobs: Blobs = loadBlobsInformation(self.selectedBlobToDecompress)
loadBlobsPixels(blobs, self.selectedPcfToDecompress)
imageArray = blobs.toPixels()
# dlg = wx.ProgressDialog("Komprimuji", f'',
# maximum=len(allBlobs),
# parent=self,
# style=wx.PD_APP_MODAL | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
#
# progressObject = {"count": 0, "max": len(allBlobs), "cancel": False}
# Thread(target=runCompressionRound, args=(allBlobs, progressObject)).start()
#
# while progressObject["count"] < progressObject["max"]:
# wx.MilliSleep(100)
# wx.Yield()
# if dlg.WasCancelled():
# print("Cancelled")
# progressObject["cancel"] = True
# wx.MessageBox("Komprese byla přerušena uživatelem.", "Zrušeno", wx.ICON_INFORMATION)
# dlg.Update(progressObject["max"])
# dlg.Destroy()
# return
#
# dlg.Update(progressObject["count"])
print("Decompression done")
# dlg.Destroy()
show_image_from_numpy_array(imageArray, "Dekomprimovaný obrázek")
saveDlg = wx.FileDialog(None, "Kam chceš uložit dekomprimovaný obrázek?",
wildcard="Patriks Compressed Files (*.jpg)|*.jpg",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if saveDlg.ShowModal() != wx.ID_OK:
wx.MessageBox("Zrušeno uživatelem.", "Zrušeno", wx.ICON_INFORMATION)
return
save_image_from_numpy_array(saveDlg.GetPath(), imageArray)
wx.MessageBox("Uloženo.", "Úspěch", wx.ICON_INFORMATION)
except Exception as e:
print("Compression error: ", e)
traceback.print_exc()
wx.MessageBox("Nepodařilo se zkomprimovat obrázek! Zkuste jiný.", "Chyba", wx.ICON_ERROR)
|
substructure_search.py
|
import os
import sys
import time, datetime
import json
import flask
import atexit
import hashlib
import ConfigParser
import multiprocessing, Queue
import pygly.alignment
from pygly.GlycanFormatter import WURCS20Format, GlycoCTFormat
# Default Configuration
flask_API_port = 10980
flask_API_host = "localhost" # "0.0.0.0"
worker_num = 1
max_motif_size = 10
structure_file_path = ""
result_file_path = ""
# Error classes
class SubstructureSearchError(RuntimeError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ParameterError(SubstructureSearchError):
pass
# Handle parameters and configuration file
if len(sys.argv) > 1:
kvpara = {}
for k,v in zip(sys.argv[1::2], sys.argv[2::2]):
if not k.startswith("-"):
raise ParameterError("Unknown parameter %s" % k)
k = k.lstrip("-")
kvpara[k] = v
if "c" not in kvpara:
raise ParameterError("No config file provided")
else:
currentdir = os.path.dirname(sys.argv[0])
currentdirabs = os.path.abspath(currentdir)
configpath = os.path.join(currentdirabs, kvpara["c"])
config = ConfigParser.SafeConfigParser()
config.readfp(open(configpath))
worker_num = config.get("substructure_search", "cpu_core")
max_motif_size = config.get("substructure_search", "max_motif_size")
structure_file_path = config.get("substructure_search", "glycan_set")
structure_file_path = os.path.join(currentdirabs, structure_file_path)
flask_API_host = config.get("service", "host")
flask_API_port = config.get("service", "port")
# result_file_path = config.get("service", "result_file_log")
max_motif_size = int(max_motif_size)
worker_num = int(worker_num)
flask_API_port = int(flask_API_port)
else:
raise ParameterError("No config file provided")
# Define functions for flask process
def flask_API_init(shared_resources, flask_API_host, flask_API_port):
task_queue, result_queue = shared_resources
results = {}
app = flask.Flask(__name__)
# app.config["DEBUG"] = True
@app.route('/', methods=['GET', 'POST'])
def home():
return open(os.path.join(currentdirabs, "index.html")).read()
@app.route('/date', methods=['GET', 'POST'])
def getdate():
return flask.jsonify(datetime.datetime.now())
@app.route('/queue', methods=['GET', 'POST'])
def getqueuelength():
update_results()
n = len(filter(lambda x: not x["finished"], results.values()))
return flask.jsonify(n)
@app.route('/submit', methods=['GET', 'POST'])
def submit():
motif_match_position = "anywhere"
#additional_subst = False
#loose_root_match = False
query_sequence = ""
if flask.request.method == "GET":
para = flask.request.args
elif flask.request.method == "POST":
para = flask.request.form
else:
return flask.jsonify("METHOD %s is not suppoted" % flask.request.method)
if "seq" not in para:
flask.jsonify("Please provide a valid sequence")
query_sequence = str(para["seq"])
if "motif_match_position" in para:
motif_match_position = para["motif_match_position"].lower()
if motif_match_position not in ["anywhere", "reo"]:
raise ParameterError("motif match position is not recognized")
"""
if "additional_subst" in para:
if para["additional_subst"] == 'true':
additional_subst = True
"""
tmp = query_sequence + "_" + str(motif_match_position)# + "_" + str(additional_subst)
list_id = hashlib.sha256(tmp).hexdigest()
task = {
"id": list_id,
"seq": query_sequence,
"motif_match_position": motif_match_position,
#"additional_subst": additional_subst
}
print >> sys.stderr, "Job received by API: %s" % (task)
status = {
"id": list_id,
"submission_detail": task,
"finished": False,
"result": {}
}
if list_id in results:
pass
else:
task_queue.put(task)
results[list_id] = status
return flask.jsonify({
"list_id": list_id
})
def update_results():
for i in range(5):
try:
res = result_queue.get_nowait()
results[res["id"]]['finished'] = True
results[res["id"]]["result"] = res
except Queue.Empty:
break
except KeyError:
print >> sys.stderr, "Job ID %s is not present" % res["id"]
@app.route('/retrieve', methods=['GET', 'POST'])
def retrieve():
update_results()
rjson = {"error": "Please provide valid list_id"}
if flask.request.method == "GET":
para = flask.request.args
elif flask.request.method == "POST":
para = flask.request.form
else:
return flask.jsonify({"error": "METHOD %s is not suppoted" % flask.request.method})
if "list_id" in para:
list_id = para["list_id"]
rjson = results.get(list_id, {"error": list_id + " not found"})
return flask.jsonify(rjson)
print >> sys.stderr, "Running FLASK at http://%s:%s" % (flask_API_host, flask_API_port)
app.run(host=flask_API_host, port=flask_API_port, threaded=False)
# Define functions for computing process
def substructure_search_init(shared_resources, structure_list_file_path, PPID):
print >> sys.stderr, "Computing Processor%s is starting" % PPID
task_queue, result_queue = shared_resources
gp = GlycoCTFormat()
wp = WURCS20Format()
motif_match_connected_nodes_cache = pygly.alignment.ConnectedNodesCache()
mm1 = pygly.alignment.GlyTouCanMotif(connected_nodes_cache=motif_match_connected_nodes_cache)
# mm2 = pygly.alignment.MotifAllowOptionalSub(connected_nodes_cache=motif_match_connected_nodes_cache)
glycans = {}
for line in open(structure_list_file_path):
acc, s = line.strip().split()
glycans[acc] = wp.toGlycan(s)
print >> sys.stderr, "Processor-%s: finishes loading %s glycans" % (PPID, len(glycans))
while True:
task_detail = task_queue.get(block=True)
print >> sys.stderr, "Processor-%s: Job %s received." % (PPID, task_detail["id"])
seq = task_detail["seq"]
jobid = task_detail["id"]
#loose_root_match = task_detail["loose_root_match"]
#additional_subst = task_detail["additional_subst"]
motif_match_position = task_detail["motif_match_position"]
motif_matcher = mm1
"""
if loose_root_match:
motif_matcher = mm3
"""
#fullstructure = False
rootOnly = False
anywhereExceptRoot = False
if motif_match_position == "anywhere":
pass
elif motif_match_position == "reo":
rootOnly = True
else:
pass
"""
elif motif_match_position == "notre":
anywhereExceptRoot = True
elif motif_match_position == "fullstructure":
rootOnly = True
fullstructure = True
"""
matches = []
error = []
calculation_start_time = time.time()
try:
if "RES" in seq:
motif = gp.toGlycan(seq)
elif "WURCS" in seq:
motif = wp.toGlycan(seq)
else:
raise RuntimeError
except:
error.append("Unable to parse")
if len(error) == 0:
motif_node_num = len(list(motif.all_nodes()))
if motif_node_num > max_motif_size:
error.append("Motif is too big")
# TODO time out mechanism to avoid running for too long
for acc, glycan in glycans.items():
if len(error) != 0:
for e in error:
print >> sys.stderr, "Processor-%s: Issues (%s) is found with task %s" % (PPID, e, task_detail["id"])
break
#if fullstructure:
# if motif_node_num != len(list(glycan.all_nodes())):
# continue
if motif_matcher.leq(motif, glycan, rootOnly=rootOnly, anywhereExceptRoot=anywhereExceptRoot):
matches.append(acc)
calculation_end_time = time.time()
calculation_time_cost = calculation_end_time - calculation_start_time
res = {
"id": jobid,
"start time": calculation_start_time,
"end time": calculation_end_time,
"alignment calculation time": calculation_time_cost,
"matches": matches,
"error": error
}
print >> sys.stderr, "Processor-%s: Job %s finished within %ss" % (PPID, task_detail["id"], calculation_time_cost)
result_queue.put(res)
def cleanup():
for p in worker_processor_pool:
p.terminate()
front_end_API_process.terminate()
if __name__ == "__main__":
task_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
shared_resources = [task_queue, result_queue, ]
front_end_API_process = multiprocessing.Process(target=flask_API_init, args=(shared_resources, flask_API_host, flask_API_port))
front_end_API_process.start()
worker_processor_pool = []
for i in range(worker_num):
worker_processor = multiprocessing.Process(target=substructure_search_init, args=(shared_resources, structure_file_path, i))
worker_processor.start()
worker_processor_pool.append(worker_processor)
atexit.register(cleanup)
while True:
goodbye = not front_end_API_process.is_alive()
for p in worker_processor_pool:
if not p.is_alive():
goodbye = True
if goodbye:
cleanup()
break
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.